From 9f2ab39968ff696c8b760315d64ffa3e9b599aed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 15:48:44 +0000 Subject: [PATCH 1/5] build(deps): bump tj-actions/changed-files from 29.0.0 to 29.0.1 (#4959) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 29.0.0 to 29.0.1. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v29.0.0...v29.0.1) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9f1be10a969..668c3229a22 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,7 +32,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v29.0.0 + uses: tj-actions/changed-files@v29.0.1 with: files: | **/*.rs @@ -44,7 +44,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v29.0.0 + uses: tj-actions/changed-files@v29.0.1 with: files: | .github/workflows/*.yml From 326ae04b0f2fb28e9484e58a2936ac02d4565fb0 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 26 Aug 2022 14:06:32 -0400 Subject: [PATCH 2/5] ci(test): run build and test jobs on cargo and clippy config changes (#4941) Previous behavior: If warnings or error are added in `.cargo/config.toml` or `clippy.toml`, and those could generate CI failures, we wouldn't catch those new as the pipelines are not run when this files are changed Expected behavior: If warnings or error are added in `.cargo/config.toml` or `clippy.toml`, run all the builds and test jobs which also track a `Cargo.toml`. Solution: Add `.cargo/config.toml` and `clippy.toml` as paths to all the required jobs which needs to be triggered when these files changes. Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .github/workflows/build-crates-individually.patch.yml | 3 +++ .github/workflows/build-crates-individually.yml | 6 ++++++ .github/workflows/continous-integration-docker.patch.yml | 3 +++ .github/workflows/continous-integration-docker.yml | 3 +++ .github/workflows/continous-integration-os.patch.yml | 2 ++ .github/workflows/continous-integration-os.yml | 3 +++ .github/workflows/coverage.patch.yml | 3 +++ .github/workflows/coverage.yml | 3 +++ 8 files changed, 26 insertions(+) diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/build-crates-individually.patch.yml index f78e69a2731..2fdc44905ac 100644 --- a/.github/workflows/build-crates-individually.patch.yml +++ b/.github/workflows/build-crates-individually.patch.yml @@ -10,6 +10,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 501bd904761..57b25707fe1 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -11,6 +11,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' pull_request: @@ -20,6 +23,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' diff --git a/.github/workflows/continous-integration-docker.patch.yml b/.github/workflows/continous-integration-docker.patch.yml index b96f03e6b01..3f4bd9b3897 100644 --- a/.github/workflows/continous-integration-docker.patch.yml +++ b/.github/workflows/continous-integration-docker.patch.yml @@ -14,6 +14,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'docker/**' - '.github/workflows/continous-integration-docker.yml' diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index e15a46896b1..6fc73bf7773 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -33,6 +33,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'docker/**' - '.github/workflows/continous-integration-docker.yml' diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index ef965a6433a..8f9018b52d2 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -9,6 +9,8 @@ on: - '**/Cargo.toml' - '**/Cargo.lock' - '**/deny.toml' + - '.cargo/config.toml' + - '**/clippy.toml' - '.github/workflows/continous-integration-os.yml' jobs: diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 3389fd22906..0bf90b8daab 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -17,6 +17,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/ci.yml' pull_request: diff --git a/.github/workflows/coverage.patch.yml b/.github/workflows/coverage.patch.yml index fdf9a5f6a7a..241f92e73e5 100644 --- a/.github/workflows/coverage.patch.yml +++ b/.github/workflows/coverage.patch.yml @@ -8,6 +8,9 @@ on: - '**/*.snap' - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' - 'codecov.yml' - '.github/workflows/coverage.yml' diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ae28188d5a4..230afd6847e 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,6 +15,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'codecov.yml' - '.github/workflows/coverage.yml' From 1d861b0d20b79cb9d2e9f66e54cd0f0fd5860a50 Mon Sep 17 00:00:00 2001 From: teor Date: Sun, 28 Aug 2022 05:42:20 +1000 Subject: [PATCH 3/5] fix(ci): Increase full sync timeouts for longer syncs (#4961) * Increase full sync timeout to 24 hours Expected sync time is ~21 hours as of August 2022. * Split final checkpoint job into two smaller jobs to avoid timeouts Also make regexes easier to read. * Fix a job name typo --- .github/workflows/deploy-gcp-tests.yml | 86 +++++++++++++++++++++++--- zebrad/tests/common/sync.rs | 4 +- 2 files changed, 81 insertions(+), 9 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index f21739d239e..e8da122c6a2 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -547,7 +547,12 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Sapling)|(estimated progress.*network_upgrade.*=.*Blossom)|(estimated progress.*network_upgrade.*=.*Heartwood)|(estimated progress.*network_upgrade.*=.*Canopy)|(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Sapling' \ + -e 'estimated progress.*network_upgrade.*=.*Blossom' \ + -e 'estimated progress.*network_upgrade.*=.*Heartwood' \ + -e 'estimated progress.*network_upgrade.*=.*Canopy' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to Canopy activation (or the test finishing) @@ -602,7 +607,9 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Canopy)|(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Canopy' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to NU5 activation (or the test finishing) @@ -657,14 +664,14 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to block 1,740,000 or later # (or the test finishing) # # We chose this height because it was about 5 hours into the NU5 sync, at the end of July 2022. - # This is a temporary workaround until we improve sync speeds. logs-1740k: name: Log ${{ inputs.test_id }} test (1740k) needs: [ logs-canopy ] @@ -716,13 +723,77 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(test result:.*finished in)' \ + -e 'estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ + " + + # follow the logs of the test we just launched, up to block 1,760,000 or later + # (or the test finishing) + # + # We chose this height because it was about 9 hours into the NU5 sync, at the end of August 2022. + logs-1760k: + name: Log ${{ inputs.test_id }} test (1760k) + needs: [ logs-1740k ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + retries: '3' + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Show recent logs, following until block 1,760,000 (or the test finishes) + - name: Show logs for ${{ inputs.test_id }} test (1760k) + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'estimated progress.*current_height.*=.*17[6-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) logs-checkpoint: name: Log ${{ inputs.test_id }} test (checkpoint) - needs: [ logs-1740k ] + needs: [ logs-1760k ] # If the previous job fails, we still want to show the logs. if: ${{ !cancelled() }} runs-on: ubuntu-latest @@ -773,7 +844,8 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(verified final checkpoint)|(test result:.*finished in)' \ + -e 'verified final checkpoint' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, until it finishes diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index a9a8d283bd8..494f79ddce8 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -363,8 +363,8 @@ pub fn create_cached_database_height( ) -> Result<()> { eprintln!("creating cached database"); - // 20 hours - let timeout = Duration::from_secs(60 * 60 * 20); + // 24 hours + let timeout = Duration::from_secs(24 * 60 * 60); // Use a persistent state, so we can handle large syncs let mut config = cached_mandatory_checkpoint_test_config()?; From 156fc2b93dae893a5286863a7fa0ec806e25cf89 Mon Sep 17 00:00:00 2001 From: teor Date: Sun, 28 Aug 2022 09:12:45 +1000 Subject: [PATCH 4/5] Update disk usage based on recent data (#4963) --- README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 7c50fd1a7bc..fe669b66f89 100644 --- a/README.md +++ b/README.md @@ -103,10 +103,10 @@ cargo install --features= ... ### System Requirements The recommended requirements for compiling and running `zebrad` are: -- 4+ CPU cores -- 16+ GB RAM -- 300 GB+ available disk space for building binaries and storing cached chain state -- 100+ Mbps network connection, with 100+ GB of uploads and downloads per month +- 4 CPU cores +- 16 GB RAM +- 300 GB available disk space for building binaries and storing cached chain state +- 100 Mbps network connection, with 300 GB of uploads and downloads per month We continuously test that our builds and tests pass on: @@ -157,10 +157,11 @@ If this is a problem for you, please [open a ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) `zebrad`'s typical mainnet network usage is: -- Initial sync: 50 GB download, we expect the initial download to grow to hundreds of gigabytes over time -- Ongoing updates: 10 MB - 1 GB upload and download per day, depending on user-created transaction size, and peer requests +- Initial sync: 100 GB download, we expect the initial download to grow to hundreds of gigabytes over time +- Ongoing updates: 10 MB - 10 GB upload and download per day, depending on user-created transaction size and peer requests -Zebra also performs an initial sync every time its internal database version changes. +Zebra performs an initial sync every time its internal database version changes, +so some version upgrades might require a full download of the whole chain. For more detailed information, refer to the [documentation](https://zebra.zfnd.org/user/run.html). @@ -177,7 +178,7 @@ See our [roadmap](#future-work) for details. Zebra uses around 100 GB of space for cached mainnet data, and 10 GB of space for cached testnet data. We expect disk usage to grow over time, so we recommend reserving at least 300 GB for mainnet nodes. -RocksDB cleans up outdated data periodically, and when the database is closed and re-opened. +Zebra's database cleans up outdated data periodically, and when Zebra is shut down and restarted. #### Disk Troubleshooting From 6fd3cdb3dab7eb3bab715f190039268938e07c04 Mon Sep 17 00:00:00 2001 From: teor Date: Sun, 28 Aug 2022 19:47:42 +1000 Subject: [PATCH 5/5] fix(ci): Expand cached state disks before running tests (#4962) * Expand cached state disks before running tests * Install partition management tool * There isn't actually a partition on the cached state image * Make e2fsck non-interactive * Limit the length of image names to 63 characters * Ignore possibly long branch names when matching images, just match the commit --- .github/workflows/deploy-gcp-tests.yml | 61 +++++++++++++++++++------- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index e8da122c6a2..18d2261ad34 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -148,6 +148,9 @@ jobs: --zone ${{ env.ZONE }} sleep 60 + # Create a docker volume with the new disk we just created. + # + # SSH into the just created VM, and create a docker volume with the newly created disk. - name: Create ${{ inputs.test_id }} Docker volume run: | gcloud compute ssh \ @@ -157,7 +160,7 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - sudo mkfs.ext4 /dev/sdb \ + sudo mkfs.ext4 -v /dev/sdb \ && \ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -285,11 +288,16 @@ jobs: DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }} fi - # Try to find an image generated from this branch and commit - # Fields are listed in the "Create image from state disk" step - COMMIT_DISK_PREFIX="${DISK_PREFIX}-${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" + # Try to find an image generated from a previous step or run of this commit. + # Fields are listed in the "Create image from state disk" step. + # + # We can't match the full branch name here, + # because it might have been shortened for the image. + # + # The probability of two matching short commit hashes within the same month is very low. + COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT} Disk: $COMMIT_CACHED_DISK_NAME" + echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME" if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" fi @@ -351,7 +359,10 @@ jobs: # Create a docker volume with the selected cached state. # - # SSH into the just created VM, and create a docker volume with the recently attached disk. + # SSH into the just created VM, expand the partition and filesystem to fill the entire disk, + # then create a docker volume with the recently attached disk. + # (The cached state and disk are usually the same size, + # but the cached state can be smaller if we just increased the disk size.) - name: Create ${{ inputs.test_id }} Docker volume run: | gcloud compute ssh \ @@ -361,6 +372,10 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ + sudo e2fsck -v -f -p /dev/sdb \ + && \ + sudo resize2fs -p /dev/sdb \ + && \ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ " @@ -1000,14 +1015,22 @@ jobs: with: short-length: 7 + # Performs formatting on disk name components. + # # Disk images in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }} + # uses sentence case, so we need to downcase ${{ inputs.network }}. # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable - - name: Downcase network name for disks + # Disk image names in GCP are limited to 63 characters, so we need to limit + # branch names to 13 characters. + # + # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. + # Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. + - name: Format network name and branch name for disks run: | NETWORK_CAPS=${{ inputs.network }} echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + LONG_GITHUB_REF=${{ env.GITHUB_REF_SLUG_URL }} + echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:13}" >> $GITHUB_ENV # Setup gcloud CLI - name: Authenticate to Google Cloud @@ -1052,7 +1075,7 @@ jobs: SYNC_HEIGHT=$(echo $DOCKER_LOGS | grep -oE '${{ inputs.height_grep_text }}\([0-9]+\)' | grep -oE '[0-9]+' | tail -1 || [[ $? == 1 ]]) echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> $GITHUB_ENV - # Sets the $UPDATE_SUFFIX env var to "-update" if using cached state, + # Sets the $UPDATE_SUFFIX env var to "-u" if using cached state, # and the empty string otherwise. # # Also sets a unique date and time suffix $TIME_SUFFIX. @@ -1061,26 +1084,32 @@ jobs: UPDATE_SUFFIX="" if [[ "${{ inputs.needs_zebra_state }}" == "true" ]]; then - UPDATE_SUFFIX="-update" + UPDATE_SUFFIX="-u" fi - TIME_SUFFIX=$(date '+%Y-%m-%d-%H-%M-%S' --utc) + # We're going to delete old images after a month, so we don't need the year here + TIME_SUFFIX=$(date '+%m%d%H%M%S' --utc) echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> $GITHUB_ENV echo "TIME_SUFFIX=$TIME_SUFFIX" >> $GITHUB_ENV - # Create an image from disk that will be used for following/other tests + # Create an image from disk that will be used for following/other tests. + # # This image can contain: # - Zebra cached state # - Zebra + lightwalletd cached state - # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }} + # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}. + # + # The image name must be unique, and be 63 characters or less. + # The timestamp makes images from the same commit unique, + # as long as they don't finish in the same second. # # Force the image creation (--force) as the disk is still attached even though is not being - # used by the container + # used by the container. - name: Create image from state disk run: | gcloud compute images create \ - "${{ inputs.disk_prefix }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }}$UPDATE_SUFFIX-$TIME_SUFFIX" \ + "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ --force \ --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ env.ZONE }} \