diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml new file mode 100644 index 000000000000..105ae1f1485d --- /dev/null +++ b/.github/workflows/ci-prover-e2e.yml @@ -0,0 +1,127 @@ +name: Workflow for testing prover component end-to-end +on: + workflow_call: + +jobs: + e2e-test: + runs-on: [ matterlabs-ci-gpu-l4-runner-prover-tests ] + env: + RUNNER_COMPOSE_FILE: "docker-compose-gpu-runner-cuda-12-0.yml" + + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + with: + submodules: "recursive" + fetch-depth: 0 + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + + mkdir -p prover_logs + + - name: Start services + run: | + run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull + mkdir -p ./volumes/postgres ./volumes/reth/data + docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait + ci_run sccache --start-server + + - name: Init + run: | + ci_run git config --global --add safe.directory "*" + ci_run chmod -R +x ./bin + + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + + ci_run zkstack chain create \ + --chain-name proving_chain \ + --chain-id sequential \ + --prover-mode gpu \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default true \ + --ignore-prerequisites + + ci_run zkstack ecosystem init --dev --verbose + ci_run zkstack prover init --dev --verbose + + echo "URL=$(grep "http_url" ./chains/proving_chain/configs/general.yaml | awk '{ print $2 }')" >> $GITHUB_ENV + - name: Build prover binaries + run: | + ci_run cargo build --release --workspace --manifest-path=prover/Cargo.toml + - name: Prepare prover subsystem + run: | + ci_run zkstack prover init-bellman-cuda --clone --verbose + ci_run zkstack prover setup-keys --mode=download --region=us --verbose + - name: Run server + run: | + ci_run zkstack server --uring --chain=proving_chain --components=api,tree,eth,state_keeper,commitment_generator,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip &>prover_logs/server.log & + - name: Run Gateway + run: | + ci_run zkstack prover run --component=gateway --docker=false &>prover_logs/gateway.log & + - name: Run Prover Job Monitor + run: | + ci_run zkstack prover run --component=prover-job-monitor --docker=false &>prover_logs/prover-job-monitor.log & + - name: Wait for batch to be passed through gateway + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 300 + run: | + PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/batch_availability_checker + - name: Run Witness Generator + run: | + ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log & + - name: Run Circuit Prover + run: | + ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log & + - name: Wait for prover jobs to finish + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 1200 + run: | + PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/prover_jobs_status_checker + + - name: Kill prover & start compressor + run: | + sudo ./bin/prover_checkers/kill_prover + + ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log & + - name: Wait for batch to be executed on L1 + env: + DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain + BATCH_NUMBER: 1 + INTERVAL: 30 + TIMEOUT: 600 + run: | + PASSED_ENV_VARS="BATCH_NUMBER,DATABASE_URL,URL,INTERVAL,TIMEOUT" \ + ci_run ./bin/prover_checkers/batch_l1_status_checker + + - name: Upload logs + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + if: always() + with: + name: prover_logs + path: prover_logs + + - name: Show sccache logs + if: always() + run: | + ci_run sccache --show-stats || true + ci_run cat /tmp/sccache_log.txt || true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fd9dedf8af4e..47ae3c517517 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -94,6 +94,12 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml + e2e-for-prover: + name: E2E Test for Prover Components + needs: changed_files + if: ${{(needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + uses: ./.github/workflows/ci-prover-e2e.yml + ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 73303d15cb30..b9321c8f5d6c 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -49,10 +49,10 @@ jobs: - docker/zk-environment/Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_11_8: - - docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_12: - - docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile - .github/workflows/zk-environment-publish.yml get_short_sha: @@ -245,7 +245,7 @@ jobs: if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: - file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile + file: docker/zk-environment/22.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest diff --git a/bin/prover_checkers/batch_availability_checker b/bin/prover_checkers/batch_availability_checker new file mode 100644 index 000000000000..ae7aade2f687 --- /dev/null +++ b/bin/prover_checkers/batch_availability_checker @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check availability for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM witness_inputs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + echo "Batch is not available yet. Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/batch_l1_status_checker b/bin/prover_checkers/batch_l1_status_checker new file mode 100755 index 000000000000..24f26e354eac --- /dev/null +++ b/bin/prover_checkers/batch_l1_status_checker @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Needs following configuration +# URL - URL of the API endpoint +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +echo "URL: $URL" + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the curl request and capture the response + RESPONSE=$(curl --silent --request POST \ + --url $URL \ + --header 'Content-Type: application/json' \ + --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "zks_getBlockDetails", + "params": [1] + }') + + # Parse the executedAt field using jq + EXECUTED_AT=$(echo $RESPONSE | jq -r '.result.executedAt') + + # Check if executedAt is not null + if [ "$EXECUTED_AT" != "null" ] && [ -n "$EXECUTED_AT" ]; then + echo "executedAt is not null: $EXECUTED_AT" + echo "true" + exit 0 # Exit with zero status to succeed CI + else + DATABASE_STATUS=$(psql $DATABASE_URL -c "SELECT status FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + echo "executedAt is null, database status is $DATABASE_STATUS, retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/kill_prover b/bin/prover_checkers/kill_prover new file mode 100644 index 000000000000..2a65aea2d673 --- /dev/null +++ b/bin/prover_checkers/kill_prover @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Use pkill to find and kill processes using circuit prover +if ! pkill -f 'zksync_circuit_prover|zkstack prover run --component=circuit-prover'; then + echo "No processes are currently using the GPU." + exit 0 +fi + +echo "All GPU-related processes have been killed." diff --git a/bin/prover_checkers/prover_jobs_status_checker b/bin/prover_checkers/prover_jobs_status_checker new file mode 100755 index 000000000000..6816d9a2d140 --- /dev/null +++ b/bin/prover_checkers/prover_jobs_status_checker @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check readiness for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER AND status = 'queued';" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + STATUS=$(psql $DATABASE_URL -c "SELECT COUNT(*), status FROM prover_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER GROUP BY status;" -t -A) + echo "Current status is $STATUS" + echo "Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 618a786ea658..51780f03230d 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -30,7 +30,7 @@ pub async fn run_server( mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); - tracing::debug!("Starting proof data handler server on {bind_address}"); + tracing::info!("Starting proof data handler server on {bind_address}"); let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); let listener = tokio::net::TcpListener::bind(bind_address) diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 35a0faeb9620..c930fa376f5e 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -3,6 +3,8 @@ services: reth: restart: always image: "ghcr.io/paradigmxyz/reth:v1.0.6" + ports: + - 127.0.0.1:8545:8545 volumes: - type: bind source: ./volumes/reth/data @@ -12,11 +14,9 @@ services: target: /chaindata command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config - ports: - - 127.0.0.1:8545:8545 zk: - image: ghcr.io/matter-labs/zk-environment:cuda-12-0-latest + image: ghcr.io/matter-labs/zk-environment:cuda-12_0-latest depends_on: - reth - postgres @@ -49,11 +49,18 @@ services: - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host + pid: host deploy: resources: reservations: devices: - capabilities: [ gpu ] + postgres: image: "postgres:14" command: postgres -c 'max_connections=200' diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index f95ae0d5f544..32665eb7010a 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -16,7 +16,7 @@ services: - 127.0.0.1:8545:8545 zk: - image: "ghcr.io/matter-labs/zk-environment:cuda-11-8-latest" + image: "ghcr.io/matter-labs/zk-environment:cuda-11_8-latest" container_name: zk depends_on: - reth @@ -40,6 +40,11 @@ services: - GITHUB_WORKSPACE=$GITHUB_WORKSPACE env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host deploy: resources: reservations: diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile similarity index 95% rename from docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile index 90f089ba8bd4..fe44d55acbbc 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04@sha256:3246518d9735254519e1b2ff35f95686e4a5011c90c85344c1f38df7bae9dd37 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -31,19 +31,19 @@ RUN apt-get update && apt-get install -y \ wget \ bzip2 \ unzip \ - hub + hub \ + curl \ + gnutls-bin git \ + build-essential \ + clang \ + lldb \ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - curl \ - gnutls-bin git \ - build-essential \ - clang \ - lldb \ - lld \ liburing-dev \ libclang-dev @@ -83,6 +83,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile similarity index 96% rename from docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile index b6b023a5b7f4..da041b121816 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -30,18 +30,18 @@ RUN apt-get update && apt-get install -y \ gnupg2 \ postgresql-client \ hub \ - unzip + unzip \ + gnutls-bin \ + build-essential \ + clang \ + lldb\ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - gnutls-bin \ - build-essential \ - clang \ - lldb\ - lld \ liburing-dev \ libclang-dev @@ -81,6 +81,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index 4a8a1b3e4064..37e004d54ecc 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -28,8 +28,8 @@ pub mod keys; pub mod queue; // THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS -pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(2); +pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version25; +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(0); pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { minor: PROVER_PROTOCOL_VERSION, patch: PROVER_PROTOCOL_PATCH, diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs index 37ee2e076ab9..82986d9b41ae 100644 --- a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -57,6 +57,18 @@ pub async fn init_configs( } let mut general_config = chain_config.get_general_config()?; + + if general_config.proof_data_handler_config.is_some() && general_config.prover_gateway.is_some() + { + let proof_data_handler_config = general_config.proof_data_handler_config.clone().unwrap(); + let mut prover_gateway = general_config.prover_gateway.clone().unwrap(); + + prover_gateway.api_url = + format!("http://127.0.0.1:{}", proof_data_handler_config.http_port); + + general_config.prover_gateway = Some(prover_gateway); + } + let mut consensus_config = general_config .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 94fea1389d28..280b5b2e91d8 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -33,6 +33,9 @@ use crate::{ #[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { + #[clap(long)] + pub dev: bool, + // Proof store object #[clap(long)] pub proof_store_dir: Option, @@ -228,6 +231,10 @@ impl ProverInitArgs { ) -> anyhow::Result { logger::info(MSG_GETTING_PROOF_STORE_CONFIG); + if self.dev { + return Ok(self.handle_file_backed_config(Some(DEFAULT_PROOF_STORE_DIR.to_string()))); + } + if self.proof_store_dir.is_some() { return Ok(self.handle_file_backed_config(self.proof_store_dir.clone())); } @@ -277,6 +284,11 @@ impl ProverInitArgs { shell: &Shell, ) -> anyhow::Result> { logger::info(MSG_GETTING_PUBLIC_STORE_CONFIG); + + if self.dev { + return Ok(None); + } + let shall_save_to_public_bucket = self .shall_save_to_public_bucket .unwrap_or_else(|| PromptConfirm::new(MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT).ask()); @@ -345,6 +357,12 @@ impl ProverInitArgs { &self, default_path: &str, ) -> Option { + if self.dev { + return Some(CompressorKeysArgs { + path: Some(default_path.to_string()), + }); + } + let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) @@ -363,6 +381,9 @@ impl ProverInitArgs { } fn fill_setup_keys_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } let args = self.setup_keys_args.clone(); if self.setup_keys.unwrap_or_else(|| { @@ -475,6 +496,10 @@ impl ProverInitArgs { } fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } + let args = self.bellman_cuda_config.clone(); if self.bellman_cuda.unwrap_or_else(|| { PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) @@ -488,6 +513,10 @@ impl ProverInitArgs { } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { + if self.dev { + return CloudConnectionMode::Local; + } + let cloud_type = self.cloud_type.clone().unwrap_or_else(|| { PromptSelect::new( MSG_CLOUD_TYPE_PROMPT, @@ -503,25 +532,32 @@ impl ProverInitArgs { &self, config: &ChainConfig, ) -> Option { - let setup_database = self - .setup_database - .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); + let setup_database = self.dev + || self + .setup_database + .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); if setup_database { let DBNames { prover_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - let dont_drop = self.dont_drop.unwrap_or_else(|| { - !PromptConfirm::new("Do you want to drop the database?") - .default(true) - .ask() - }); + let dont_drop = if !self.dev { + self.dont_drop.unwrap_or_else(|| { + !PromptConfirm::new("Do you want to drop the database?") + .default(true) + .ask() + }) + } else { + false + }; - if self.use_default.unwrap_or_else(|| { - PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) - .default(true) - .ask() - }) { + if self.dev + || self.use_default.unwrap_or_else(|| { + PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) + .default(true) + .ask() + }) + { Some(ProverDatabaseConfig { database_config: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop, diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs index ba204b0be9e9..98a5c78be2a6 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs @@ -10,7 +10,9 @@ use crate::messages::{ #[derive(Debug, Clone, Parser, Default, Serialize, Deserialize)] pub struct InitBellmanCudaArgs { - #[clap(long)] + #[clap(long, conflicts_with_all(["bellman_cuda_dir"]))] + pub clone: bool, + #[clap(long, conflicts_with_all(["clone"]))] pub bellman_cuda_dir: Option, } @@ -31,19 +33,26 @@ impl std::fmt::Display for BellmanCudaPathSelection { impl InitBellmanCudaArgs { pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { - let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { - match PromptSelect::new( - MSG_BELLMAN_CUDA_ORIGIN_SELECT, - BellmanCudaPathSelection::iter(), - ) - .ask() - { - BellmanCudaPathSelection::Clone => "".to_string(), - BellmanCudaPathSelection::Path => Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask(), - } - }); + let bellman_cuda_dir = if self.clone { + "".to_string() + } else { + self.bellman_cuda_dir.unwrap_or_else(|| { + match PromptSelect::new( + MSG_BELLMAN_CUDA_ORIGIN_SELECT, + BellmanCudaPathSelection::iter(), + ) + .ask() + { + BellmanCudaPathSelection::Clone => "".to_string(), + BellmanCudaPathSelection::Path => { + Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask() + } + } + }) + }; InitBellmanCudaArgs { + clone: self.clone, bellman_cuda_dir: Some(bellman_cuda_dir), } } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index 59a82152f1ff..d7600ba2d31f 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -8,7 +8,8 @@ use strum::{EnumIter, IntoEnumIterator}; use crate::{ consts::{ - COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + CIRCUIT_PROVER_BINARY_NAME, CIRCUIT_PROVER_DOCKER_IMAGE, COMPRESSOR_BINARY_NAME, + COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, @@ -30,6 +31,8 @@ pub struct ProverRunArgs { pub witness_vector_generator_args: WitnessVectorGeneratorArgs, #[clap(flatten)] pub fri_prover_args: FriProverRunArgs, + #[clap(flatten)] + pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, } @@ -46,6 +49,8 @@ pub enum ProverComponent { WitnessVectorGenerator, #[strum(to_string = "Prover")] Prover, + #[strum(to_string = "CircuitProver")] + CircuitProver, #[strum(to_string = "Compressor")] Compressor, #[strum(to_string = "ProverJobMonitor")] @@ -59,6 +64,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, Self::Prover => PROVER_DOCKER_IMAGE, + Self::CircuitProver => CIRCUIT_PROVER_DOCKER_IMAGE, Self::Compressor => COMPRESSOR_DOCKER_IMAGE, Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, } @@ -70,6 +76,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, Self::Prover => PROVER_BINARY_NAME, + Self::CircuitProver => CIRCUIT_PROVER_BINARY_NAME, Self::Compressor => COMPRESSOR_BINARY_NAME, Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, } @@ -78,10 +85,10 @@ impl ProverComponent { pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { let mut application_args = vec![]; - if self == &Self::Prover || self == &Self::Compressor { + if self == &Self::Prover || self == &Self::Compressor || self == &Self::CircuitProver { if in_docker { application_args.push("--gpus=all".to_string()); - } else { + } else if self != &Self::CircuitProver { application_args.push("--features=gpu".to_string()); } } @@ -160,6 +167,26 @@ impl ProverComponent { )); }; } + Self::CircuitProver => { + if args.circuit_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + if args + .circuit_prover_args + .witness_vector_generator_count + .is_some() + { + additional_args.push(format!( + "--witness-vector-generator-count={}", + args.circuit_prover_args + .witness_vector_generator_count + .unwrap() + )); + }; + } _ => {} }; @@ -211,6 +238,37 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct CircuitProverArgs { + #[clap(long)] + pub witness_vector_generator_count: Option, + #[clap(long)] + pub max_allocation: Option, +} + +impl CircuitProverArgs { + pub fn fill_values_with_prompt( + self, + component: ProverComponent, + ) -> anyhow::Result { + if component != ProverComponent::CircuitProver { + return Ok(Self::default()); + } + + let witness_vector_generator_count = + self.witness_vector_generator_count.unwrap_or_else(|| { + Prompt::new("Number of WVG jobs to run in parallel") + .default("1") + .ask() + }); + + Ok(CircuitProverArgs { + witness_vector_generator_count: Some(witness_vector_generator_count), + max_allocation: self.max_allocation, + }) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct FriProverRunArgs { /// Memory allocation limit in bytes (for prover component) @@ -232,6 +290,10 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let circuit_prover_args = self + .circuit_prover_args + .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { Prompt::new("Do you want to run Docker image for the component?") .default("false") @@ -243,6 +305,7 @@ impl ProverRunArgs { witness_generator_args, witness_vector_generator_args, fri_prover_args: self.fri_prover_args, + circuit_prover_args, docker: Some(docker), }) } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs index ed2f5b41a86a..863816b9ae69 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -8,7 +8,8 @@ use xshell::{cmd, Shell}; use super::args::run::{ProverComponent, ProverRunArgs}; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, - MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, + MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR, MSG_RUNNING_COMPRESSOR, + MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, @@ -49,6 +50,12 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() } (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } + ProverComponent::CircuitProver => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR) + } ProverComponent::Compressor => { if !in_docker { check_prerequisites(shell, &GPU_PREREQUISITES, false); diff --git a/zkstack_cli/crates/zkstack/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs index df27d2f02d2c..ba00af77b5a6 100644 --- a/zkstack_cli/crates/zkstack/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -22,6 +22,7 @@ pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:l pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator:latest2.0"; pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu:latest2.0"; pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; @@ -29,6 +30,7 @@ pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const CIRCUIT_PROVER_BINARY_NAME: &str = "zksync_circuit_prover"; pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zkstack_cli/crates/zkstack/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs index d1d86db83989..6d6a1ceb566f 100644 --- a/zkstack_cli/crates/zkstack/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -351,6 +351,7 @@ pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job moni pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER: &str = "Running circuit prover"; pub(super) const MSG_RUNNING_COMPRESSOR: &str = "Running compressor"; pub(super) const MSG_RUN_COMPONENT_PROMPT: &str = "What component do you want to run?"; pub(super) const MSG_RUNNING_PROVER_GATEWAY_ERR: &str = "Failed to run prover gateway"; @@ -359,6 +360,7 @@ pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR: &str = "Failed to run witness vector generator"; pub(super) const MSG_RUNNING_COMPRESSOR_ERR: &str = "Failed to run compressor"; pub(super) const MSG_RUNNING_PROVER_ERR: &str = "Failed to run prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER_ERR: &str = "Failed to run circuit prover"; pub(super) const MSG_PROOF_STORE_CONFIG_PROMPT: &str = "Select where you would like to store the proofs"; pub(super) const MSG_PROOF_STORE_DIR_PROMPT: &str =