diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/build-crates-individually.patch.yml index f78e69a2731..2fdc44905ac 100644 --- a/.github/workflows/build-crates-individually.patch.yml +++ b/.github/workflows/build-crates-individually.patch.yml @@ -10,6 +10,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/build-crates-individually.yml index 501bd904761..57b25707fe1 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/build-crates-individually.yml @@ -11,6 +11,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' pull_request: @@ -20,6 +23,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/build-crates-individually.yml' diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 9b337a1a29d..665a3fee61b 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -37,12 +37,19 @@ on: required: false type: string default: info + outputs: + image_digest: + description: 'The image digest to be used on a caller workflow' + value: ${{ jobs.build.outputs.image_digest }} jobs: build: name: Build images timeout-minutes: 210 runs-on: ubuntu-latest + outputs: + image_digest: ${{ steps.docker_build.outputs.digest }} + image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }} permissions: contents: 'read' id-token: 'write' @@ -67,12 +74,12 @@ jobs: # generate Docker tags based on the following events/attributes tags: | type=schedule + type=sha type=ref,event=branch type=ref,event=pr type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} - type=sha # Setup Docker Buildx to allow use of docker cache layers from GH - name: Set up Docker Buildx diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/continous-delivery.yml index 51401205f83..b896d1ce2aa 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/continous-delivery.yml @@ -14,6 +14,9 @@ on: push: branches: - main + release: + types: + - published env: NETWORK: Mainnet @@ -23,6 +26,36 @@ env: MACHINE_TYPE: c2-standard-4 jobs: + # If a release was made we want to extract the first part of the semver from the + # tag_name + # + # Generate the following output to pass to subsequent jobs + # - If our semver is `v1.3.0` the resulting output from this job would be `v1` + # + # Note: We just use the first part of the version to replace old instances, and change + # it when a major version is released, to keep a segregation between new and old + # versions. + versioning: + name: Versioning + runs-on: ubuntu-latest + outputs: + major_version: ${{ steps.set.outputs.major_version }} + steps: + - name: Getting Zebrad Version + id: get + uses: actions/github-script@v6.1.0 + with: + result-encoding: string + script: | + return context.payload.release.tag_name.substring(0,2) + - name: Setting API Version + id: set + run: echo "::set-output name=major_version::${{ steps.get.outputs.result }}" + + # Each time this workflow is executed, a build will be triggered to create a new image + # with the corresponding tags using information from Git + # + # The image will be commonly named `zebrad:` build: uses: ./.github/workflows/build-docker-image.yml with: @@ -35,15 +68,26 @@ jobs: zebra_skip_ipv6_tests: '1' rust_log: info + # This jobs handles the deployment of a Managed Instance Group (MiG) with 2 nodes in + # the us-central1 region. Two different groups of MiGs are deployed one for pushes to + # the main branch and another for version releases of Zebra + # + # Once this workflow is triggered the previous MiG is replaced, on pushes to main its + # always replaced, and with releases its only replaced if the same major version is + # being deployed, otherwise a new major version is deployed + # + # Runs: + # - on every push/merge to the `main` branch + # - on every release, when it's published deploy-nodes: name: Deploy Mainnet nodes - needs: build + needs: [ build, versioning ] runs-on: ubuntu-latest timeout-minutes: 30 permissions: contents: 'read' id-token: 'write' - if: ${{ github.event_name == 'push' && github.ref_name == 'main' }} + if: ${{ (github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release' }} steps: - name: Inject slug/short variables @@ -63,9 +107,9 @@ jobs: - name: Create instance template run: | - gcloud compute instance-templates create-with-container zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --boot-disk-type=pd-ssd \ - --container-image ${{ env.GAR_BASE }}/${{ env.GITHUB_REF_SLUG_URL }}:${{ env.GITHUB_SHA_SHORT }} \ + --container-image ${{ env.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --create-disk name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=100GB,type=pd-ssd \ --container-mount-disk mount-path="/zebrad-cache",name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ --machine-type ${{ env.MACHINE_TYPE }} \ @@ -77,15 +121,15 @@ jobs: id: does-group-exist continue-on-error: true run: | - gcloud compute instance-groups list | grep "zebrad-${{ env.GITHUB_REF_SLUG_URL }}" | grep "${{ env.REGION }}" + gcloud compute instance-groups list | grep "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" | grep "${{ env.REGION }}" # Deploy new managed instance group using the new instance template - name: Create managed instance group if: steps.does-group-exist.outcome == 'failure' run: | gcloud compute instance-groups managed create \ - "zebrad-${{ env.GITHUB_REF_SLUG_URL }}" \ - --template "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ + --template "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --health-check zebrad-tracing-filter \ --initial-delay 30 \ --region "${{ env.REGION }}" \ @@ -96,10 +140,17 @@ jobs: if: steps.does-group-exist.outcome == 'success' run: | gcloud compute instance-groups managed rolling-action start-update \ - "zebrad-${{ env.GITHUB_REF_SLUG_URL }}" \ - --version template="zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + "zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}" \ + --version template="zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --region "${{ env.REGION }}" + # This jobs handles the deployment of a single node (1) in the us-central1-a zone + # when an instance is required to test a specific commit + # + # Runs: + # - on request, using workflow_dispatch with regenerate-disks + # + # Note: this instances are not automatically replaced or deleted deploy-instance: name: Deploy single instance needs: build @@ -134,7 +185,7 @@ jobs: --boot-disk-type=pd-ssd \ --container-stdin \ --container-tty \ - --container-image ${{ env.GAR_BASE }}/${{ env.GITHUB_REF_SLUG_URL }}:${{ env.GITHUB_SHA_SHORT }} \ + --container-image ${{ env.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --create-disk name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }},auto-delete=yes,size=100GB,type=pd-ssd \ --container-mount-disk mount-path='/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }} \ --machine-type ${{ env.MACHINE_TYPE }} \ diff --git a/.github/workflows/continous-integration-docker.patch.yml b/.github/workflows/continous-integration-docker.patch.yml index b96f03e6b01..3f4bd9b3897 100644 --- a/.github/workflows/continous-integration-docker.patch.yml +++ b/.github/workflows/continous-integration-docker.patch.yml @@ -14,6 +14,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'docker/**' - '.github/workflows/continous-integration-docker.yml' diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/continous-integration-docker.yml index e15a46896b1..6fc73bf7773 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/continous-integration-docker.yml @@ -33,6 +33,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'docker/**' - '.github/workflows/continous-integration-docker.yml' diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/continous-integration-os.patch.yml index ef965a6433a..8f9018b52d2 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/continous-integration-os.patch.yml @@ -9,6 +9,8 @@ on: - '**/Cargo.toml' - '**/Cargo.lock' - '**/deny.toml' + - '.cargo/config.toml' + - '**/clippy.toml' - '.github/workflows/continous-integration-os.yml' jobs: diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/continous-integration-os.yml index 3389fd22906..0bf90b8daab 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/continous-integration-os.yml @@ -17,6 +17,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - '.github/workflows/ci.yml' pull_request: diff --git a/.github/workflows/coverage.patch.yml b/.github/workflows/coverage.patch.yml index fdf9a5f6a7a..241f92e73e5 100644 --- a/.github/workflows/coverage.patch.yml +++ b/.github/workflows/coverage.patch.yml @@ -8,6 +8,9 @@ on: - '**/*.snap' - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' - 'codecov.yml' - '.github/workflows/coverage.yml' diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ae28188d5a4..230afd6847e 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,6 +15,9 @@ on: # dependencies - '**/Cargo.toml' - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' # workflow definitions - 'codecov.yml' - '.github/workflows/coverage.yml' diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/deploy-gcp-tests.yml index f21739d239e..2a062cbf709 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/deploy-gcp-tests.yml @@ -148,6 +148,9 @@ jobs: --zone ${{ env.ZONE }} sleep 60 + # Create a docker volume with the new disk we just created. + # + # SSH into the just created VM, and create a docker volume with the newly created disk. - name: Create ${{ inputs.test_id }} Docker volume run: | gcloud compute ssh \ @@ -157,7 +160,7 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ - sudo mkfs.ext4 /dev/sdb \ + sudo mkfs.ext4 -v /dev/sdb \ && \ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -285,11 +288,16 @@ jobs: DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }} fi - # Try to find an image generated from this branch and commit - # Fields are listed in the "Create image from state disk" step - COMMIT_DISK_PREFIX="${DISK_PREFIX}-${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" + # Try to find an image generated from a previous step or run of this commit. + # Fields are listed in the "Create image from state disk" step. + # + # We can't match the full branch name here, + # because it might have been shortened for the image. + # + # The probability of two matching short commit hashes within the same month is very low. + COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "${GITHUB_REF_SLUG_URL}-${GITHUB_SHA_SHORT} Disk: $COMMIT_CACHED_DISK_NAME" + echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME" if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" fi @@ -351,7 +359,10 @@ jobs: # Create a docker volume with the selected cached state. # - # SSH into the just created VM, and create a docker volume with the recently attached disk. + # SSH into the just created VM, expand the partition and filesystem to fill the entire disk, + # then create a docker volume with the recently attached disk. + # (The cached state and disk are usually the same size, + # but the cached state can be smaller if we just increased the disk size.) - name: Create ${{ inputs.test_id }} Docker volume run: | gcloud compute ssh \ @@ -361,6 +372,10 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --command \ "\ + sudo e2fsck -v -f -p /dev/sdb \ + && \ + sudo resize2fs -p /dev/sdb \ + && \ docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ " @@ -531,6 +546,9 @@ jobs: # following until Sapling activation (or the test finishes). # # The log pipeline ignores the exit status of `docker logs`. + # It also ignores the expected 'broken pipe' error from `tee`, + # which happens when `grep` finds a matching output and moves on to the next job. + # # Errors in the tests are caught by the final test status job. - name: Show logs for ${{ inputs.test_id }} test (sprout) run: | @@ -547,7 +565,12 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Sapling)|(estimated progress.*network_upgrade.*=.*Blossom)|(estimated progress.*network_upgrade.*=.*Heartwood)|(estimated progress.*network_upgrade.*=.*Canopy)|(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Sapling' \ + -e 'estimated progress.*network_upgrade.*=.*Blossom' \ + -e 'estimated progress.*network_upgrade.*=.*Heartwood' \ + -e 'estimated progress.*network_upgrade.*=.*Canopy' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to Canopy activation (or the test finishing) @@ -602,7 +625,9 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Canopy)|(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Canopy' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to NU5 activation (or the test finishing) @@ -657,14 +682,14 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*network_upgrade.*=.*Nu5)|(test result:.*finished in)' \ + -e 'estimated progress.*network_upgrade.*=.*Nu5' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to block 1,740,000 or later # (or the test finishing) # # We chose this height because it was about 5 hours into the NU5 sync, at the end of July 2022. - # This is a temporary workaround until we improve sync speeds. logs-1740k: name: Log ${{ inputs.test_id }} test (1740k) needs: [ logs-canopy ] @@ -716,13 +741,77 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks)|(test result:.*finished in)' \ + -e 'estimated progress.*current_height.*=.*17[4-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ + " + + # follow the logs of the test we just launched, up to block 1,760,000 or later + # (or the test finishing) + # + # We chose this height because it was about 9 hours into the NU5 sync, at the end of August 2022. + logs-1760k: + name: Log ${{ inputs.test_id }} test (1760k) + needs: [ logs-1740k ] + # If the previous job fails, we still want to show the logs. + if: ${{ !cancelled() }} + runs-on: ubuntu-latest + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v3.0.2 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks + run: | + NETWORK_CAPS=${{ inputs.network }} + echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v0.8.0 + with: + retries: '3' + workload_identity_provider: 'projects/143793276228/locations/global/workloadIdentityPools/github-actions/providers/github-oidc' + service_account: 'github-service-account@zealous-zebra.iam.gserviceaccount.com' + token_format: 'access_token' + + # Show recent logs, following until block 1,760,000 (or the test finishes) + - name: Show logs for ${{ inputs.test_id }} test (1760k) + run: | + gcloud compute ssh \ + ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ env.ZONE }} \ + --quiet \ + --ssh-flag="-o ServerAliveInterval=5" \ + --command \ + "\ + docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'estimated progress.*current_height.*=.*17[6-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*1[8-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'estimated progress.*current_height.*=.*2[0-9][0-9][0-9][0-9][0-9][0-9].*remaining_sync_blocks' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, up to the last checkpoint (or the test finishing) logs-checkpoint: name: Log ${{ inputs.test_id }} test (checkpoint) - needs: [ logs-1740k ] + needs: [ logs-1760k ] # If the previous job fails, we still want to show the logs. if: ${{ !cancelled() }} runs-on: ubuntu-latest @@ -773,7 +862,8 @@ jobs: ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ - '(verified final checkpoint)|(test result:.*finished in)' \ + -e 'verified final checkpoint' \ + -e 'test result:.*finished in' \ " # follow the logs of the test we just launched, until it finishes @@ -832,7 +922,7 @@ jobs: " - # check the results of the test + # check the results of the test, and show all of the test logs test-result: # TODO: update the job name here, and in the branch protection rules name: Run ${{ inputs.test_id }} test @@ -872,7 +962,7 @@ jobs: # Check that the container executed at least 1 Rust test harness test, and that all tests passed. # Then wait for the container to finish, and exit with the test's exit status. - # Also shows recent test logs. + # Also shows all the test logs. # # If the container has already finished, `docker wait` should return its status. # But sometimes this doesn't work, so we use `docker inspect` as a fallback. @@ -890,7 +980,7 @@ jobs: --command=' \ set -e; docker logs \ - --tail ${{ env.EXTRA_LOG_LINES }} \ + --tail all \ ${{ inputs.test_id }} | \ tee --output-error=exit /dev/stderr | \ grep --max-count=1 --extended-regexp --color=always \ @@ -928,14 +1018,22 @@ jobs: with: short-length: 7 + # Performs formatting on disk name components. + # # Disk images in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }} + # uses sentence case, so we need to downcase ${{ inputs.network }}. # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable - - name: Downcase network name for disks + # Disk image names in GCP are limited to 63 characters, so we need to limit + # branch names to 13 characters. + # + # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. + # Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. + - name: Format network name and branch name for disks run: | NETWORK_CAPS=${{ inputs.network }} echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV + LONG_GITHUB_REF=${{ env.GITHUB_REF_SLUG_URL }} + echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:13}" >> $GITHUB_ENV # Setup gcloud CLI - name: Authenticate to Google Cloud @@ -980,7 +1078,7 @@ jobs: SYNC_HEIGHT=$(echo $DOCKER_LOGS | grep -oE '${{ inputs.height_grep_text }}\([0-9]+\)' | grep -oE '[0-9]+' | tail -1 || [[ $? == 1 ]]) echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> $GITHUB_ENV - # Sets the $UPDATE_SUFFIX env var to "-update" if using cached state, + # Sets the $UPDATE_SUFFIX env var to "-u" if using cached state, # and the empty string otherwise. # # Also sets a unique date and time suffix $TIME_SUFFIX. @@ -989,26 +1087,32 @@ jobs: UPDATE_SUFFIX="" if [[ "${{ inputs.needs_zebra_state }}" == "true" ]]; then - UPDATE_SUFFIX="-update" + UPDATE_SUFFIX="-u" fi - TIME_SUFFIX=$(date '+%Y-%m-%d-%H-%M-%S' --utc) + # We're going to delete old images after a month, so we don't need the year here + TIME_SUFFIX=$(date '+%m%d%H%M%S' --utc) echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> $GITHUB_ENV echo "TIME_SUFFIX=$TIME_SUFFIX" >> $GITHUB_ENV - # Create an image from disk that will be used for following/other tests + # Create an image from disk that will be used for following/other tests. + # # This image can contain: # - Zebra cached state # - Zebra + lightwalletd cached state - # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }} + # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}. + # + # The image name must be unique, and be 63 characters or less. + # The timestamp makes images from the same commit unique, + # as long as they don't finish in the same second. # # Force the image creation (--force) as the disk is still attached even though is not being - # used by the container + # used by the container. - name: Create image from state disk run: | gcloud compute images create \ - "${{ inputs.disk_prefix }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }}$UPDATE_SUFFIX-$TIME_SUFFIX" \ + "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${{ env.NETWORK }}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ --force \ --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ --source-disk-zone=${{ env.ZONE }} \ diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9f1be10a969..668c3229a22 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,7 +32,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v29.0.0 + uses: tj-actions/changed-files@v29.0.1 with: files: | **/*.rs @@ -44,7 +44,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v29.0.0 + uses: tj-actions/changed-files@v29.0.1 with: files: | .github/workflows/*.yml diff --git a/Cargo.lock b/Cargo.lock index c1943aea5a7..ff6668db39b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -740,15 +740,16 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "6127248204b9aba09a362f6c930ef6a78f2c1b2215f8a7b398c06e1083f17af0" dependencies = [ - "libc", + "js-sys", "num-integer", "num-traits", "serde", "time 0.1.44", + "wasm-bindgen", "winapi", ] diff --git a/README.md b/README.md index 7c50fd1a7bc..fe669b66f89 100644 --- a/README.md +++ b/README.md @@ -103,10 +103,10 @@ cargo install --features= ... ### System Requirements The recommended requirements for compiling and running `zebrad` are: -- 4+ CPU cores -- 16+ GB RAM -- 300 GB+ available disk space for building binaries and storing cached chain state -- 100+ Mbps network connection, with 100+ GB of uploads and downloads per month +- 4 CPU cores +- 16 GB RAM +- 300 GB available disk space for building binaries and storing cached chain state +- 100 Mbps network connection, with 300 GB of uploads and downloads per month We continuously test that our builds and tests pass on: @@ -157,10 +157,11 @@ If this is a problem for you, please [open a ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) `zebrad`'s typical mainnet network usage is: -- Initial sync: 50 GB download, we expect the initial download to grow to hundreds of gigabytes over time -- Ongoing updates: 10 MB - 1 GB upload and download per day, depending on user-created transaction size, and peer requests +- Initial sync: 100 GB download, we expect the initial download to grow to hundreds of gigabytes over time +- Ongoing updates: 10 MB - 10 GB upload and download per day, depending on user-created transaction size and peer requests -Zebra also performs an initial sync every time its internal database version changes. +Zebra performs an initial sync every time its internal database version changes, +so some version upgrades might require a full download of the whole chain. For more detailed information, refer to the [documentation](https://zebra.zfnd.org/user/run.html). @@ -177,7 +178,7 @@ See our [roadmap](#future-work) for details. Zebra uses around 100 GB of space for cached mainnet data, and 10 GB of space for cached testnet data. We expect disk usage to grow over time, so we recommend reserving at least 300 GB for mainnet nodes. -RocksDB cleans up outdated data periodically, and when the database is closed and re-opened. +Zebra's database cleans up outdated data periodically, and when Zebra is shut down and restarted. #### Disk Troubleshooting diff --git a/book/src/dev/continous-delivery.md b/book/src/dev/continous-delivery.md new file mode 100644 index 00000000000..e3592a145b5 --- /dev/null +++ b/book/src/dev/continous-delivery.md @@ -0,0 +1,28 @@ +# Zebra Continuous Delivery + +Zebra has an extension of it's continuous integration since it automatically deploys all +code changes to a testing and/or pre-production environment after each PR gets merged +into the `main` branch, and on each Zebra `release`. + +## Triggers + +The Continuous delivery pipeline is triggered when: + +* A PR is merged to `main` (technically, a `push` event) +* A new release is published in GitHub + +## Deployments + +On each trigger Zebra is deployed using the branch or version references as part of +the deployment naming convention. Deployments are made using [Managed Instance Groups (MIGs)](https://cloud.google.com/compute/docs/instance-groups#managed_instance_groups) +from Google Cloud Platform with, 2 nodes in the us-central1 region. + +**Note**: These *MIGs* are always replaced when PRs are merged to the `main` branch and +when a release is published. If a new major version is released, a new *MIG* is also +created, keeping the previous major version running until it's no longer needed. + +A single instance can also be deployed, on an on-demand basis, if required, when a +long-lived instance, with specific changes, is needed to be tested in the Mainnet with +the same infrastructure used for CI & CD. + +Further validations of the actual process can be done on our continuous delivery [workflow file](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/continous-delivery.yml). diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index df7aa486ab6..32c2997074b 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -35,7 +35,7 @@ rand_core = "0.6.3" ripemd = "0.1.1" # Matches version used by hdwallet secp256k1 = { version = "0.21.3", features = ["serde"] } -sha2 = { version = "0.9.9", features=["compress"] } +sha2 = { version = "0.9.9", features = ["compress"] } subtle = "2.4.1" uint = "0.9.1" x25519-dalek = { version = "1.2.0", features = ["serde"] } @@ -49,7 +49,7 @@ zcash_note_encryption = "0.1" zcash_primitives = { version = "0.7.0", features = ["transparent-inputs"] } # Time -chrono = { version = "0.4.19", features = ["serde"] } +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting diff --git a/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs b/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs index 3a310524da0..5d4e1f59237 100644 --- a/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs +++ b/zebra-chain/src/chain_tip/network_chain_tip_height_estimator.rs @@ -93,7 +93,7 @@ impl NetworkChainTipHeightEstimator { let target_spacing_seconds = self.current_target_spacing.num_seconds(); let time_to_activation = Duration::seconds(remaining_blocks * target_spacing_seconds); - self.current_block_time = self.current_block_time + time_to_activation; + self.current_block_time += time_to_activation; self.current_height = max_height; } } diff --git a/zebra-chain/src/serialization/arbitrary.rs b/zebra-chain/src/serialization/arbitrary.rs index 953e8643c16..9c432475891 100644 --- a/zebra-chain/src/serialization/arbitrary.rs +++ b/zebra-chain/src/serialization/arbitrary.rs @@ -2,7 +2,7 @@ use std::convert::TryInto; -use chrono::{TimeZone, Utc, MAX_DATETIME, MIN_DATETIME}; +use chrono::{DateTime, TimeZone, Utc}; use proptest::{arbitrary::any, prelude::*}; use super::{ @@ -41,7 +41,7 @@ impl Arbitrary for DateTime32 { pub fn datetime_full() -> impl Strategy> { ( // TODO: should we be subtracting 1 from the maximum timestamp? - MIN_DATETIME.timestamp()..=MAX_DATETIME.timestamp(), + DateTime::::MIN_UTC.timestamp()..=DateTime::::MAX_UTC.timestamp(), 0..2_000_000_000_u32, ) .prop_map(|(secs, nsecs)| Utc.timestamp(secs, nsecs)) diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index a0624f98828..c8e55223f77 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -18,7 +18,7 @@ jubjub = "0.9.0" rand = { version = "0.8.5", package = "rand" } rayon = "1.5.3" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } dirs = "4.0.0" displaydoc = "0.2.3" lazy_static = "1.4.0" diff --git a/zebra-consensus/src/transaction/tests.rs b/zebra-consensus/src/transaction/tests.rs index bae2d326c78..21f579884cd 100644 --- a/zebra-consensus/src/transaction/tests.rs +++ b/zebra-consensus/src/transaction/tests.rs @@ -1,11 +1,9 @@ //! Tests for Zcash transaction consensus checks. -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; +use chrono::{DateTime, Utc}; +use color_eyre::eyre::Report; use halo2::pasta::{group::ff::PrimeField, pallas}; use tower::{service_fn, ServiceExt}; @@ -27,10 +25,9 @@ use zebra_chain::{ transparent::{self, CoinbaseData}, }; -use super::{check, Request, Verifier}; - use crate::error::TransactionError; -use color_eyre::eyre::Report; + +use super::{check, Request, Verifier}; #[cfg(test)] mod prop; @@ -264,7 +261,7 @@ async fn v5_transaction_is_rejected_before_nu5_activation() { height: canopy .activation_height(network) .expect("Canopy activation height is specified"), - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -327,7 +324,7 @@ fn v5_transaction_is_accepted_after_nu5_activation_for_network(network: Network) transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: expiry_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -377,7 +374,7 @@ async fn v4_transaction_with_transparent_transfer_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -416,7 +413,7 @@ async fn v4_transaction_with_last_valid_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -461,7 +458,7 @@ async fn v4_coinbase_transaction_with_low_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -503,7 +500,7 @@ async fn v4_transaction_with_too_low_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -548,7 +545,7 @@ async fn v4_transaction_with_exceeding_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -601,7 +598,7 @@ async fn v4_coinbase_transaction_with_exceeding_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -652,7 +649,7 @@ async fn v4_coinbase_transaction_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -702,7 +699,7 @@ async fn v4_transaction_with_transparent_transfer_is_rejected_by_the_script() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -752,7 +749,7 @@ async fn v4_transaction_with_conflicting_transparent_spend_is_rejected() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -818,7 +815,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_inside_joinsplit_is_rejected transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -889,7 +886,7 @@ fn v4_transaction_with_conflicting_sprout_nullifier_across_joinsplits_is_rejecte transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -943,7 +940,7 @@ async fn v5_transaction_with_transparent_transfer_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -983,7 +980,7 @@ async fn v5_transaction_with_last_valid_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1026,7 +1023,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1047,7 +1044,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1072,7 +1069,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1099,7 +1096,7 @@ async fn v5_coinbase_transaction_expiry_height() { transaction: Arc::new(new_transaction.clone()), known_utxos: Arc::new(HashMap::new()), height: new_expiry_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1141,7 +1138,7 @@ async fn v5_transaction_with_too_low_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1187,7 +1184,7 @@ async fn v5_transaction_with_exceeding_expiry_height() { transaction: Arc::new(transaction.clone()), known_utxos: Arc::new(known_utxos), height: block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1241,7 +1238,7 @@ async fn v5_coinbase_transaction_is_accepted() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1293,7 +1290,7 @@ async fn v5_transaction_with_transparent_transfer_is_rejected_by_the_script() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1345,7 +1342,7 @@ async fn v5_transaction_with_conflicting_transparent_spend_is_rejected() { transaction: Arc::new(transaction), known_utxos: Arc::new(known_utxos), height: transaction_block_height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1390,7 +1387,7 @@ fn v4_with_signed_sprout_transfer_is_accepted() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1463,7 +1460,7 @@ async fn v4_with_joinsplit_is_rejected_for_modification( transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1499,7 +1496,7 @@ fn v4_with_sapling_spends() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1542,7 +1539,7 @@ fn v4_with_duplicate_sapling_spends() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1587,7 +1584,7 @@ fn v4_with_sapling_outputs_and_no_spends() { transaction, known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1636,7 +1633,7 @@ fn v5_with_sapling_spends() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1680,7 +1677,7 @@ fn v5_with_duplicate_sapling_spends() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; @@ -1743,7 +1740,7 @@ fn v5_with_duplicate_orchard_action() { transaction: Arc::new(transaction), known_utxos: Arc::new(HashMap::new()), height, - time: chrono::MAX_DATETIME, + time: DateTime::::MAX_UTC, }) .await; diff --git a/zebra-consensus/src/transaction/tests/prop.rs b/zebra-consensus/src/transaction/tests/prop.rs index e243dd4fa98..aaddb3649d6 100644 --- a/zebra-consensus/src/transaction/tests/prop.rs +++ b/zebra-consensus/src/transaction/tests/prop.rs @@ -1,6 +1,6 @@ //! Randomised property tests for transaction verification. -use std::{collections::HashMap, convert::TryInto, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use chrono::{DateTime, Duration, Utc}; use proptest::{collection::vec, prelude::*}; @@ -14,9 +14,10 @@ use zebra_chain::{ transparent, }; -use super::mock_transparent_transfer; use crate::{error::TransactionError, transaction}; +use super::mock_transparent_transfer; + /// The maximum number of transparent inputs to include in a mock transaction. const MAX_TRANSPARENT_INPUTS: usize = 10; @@ -204,7 +205,7 @@ proptest! { (first_datetime, second_datetime) } else if first_datetime > second_datetime { (second_datetime, first_datetime) - } else if first_datetime == chrono::MAX_DATETIME { + } else if first_datetime == DateTime::::MAX_UTC { (first_datetime - Duration::nanoseconds(1), first_datetime) } else { (first_datetime, first_datetime + Duration::nanoseconds(1)) diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index e40e8015ea3..f9c7930e8ff 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -16,7 +16,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] bitflags = "1.3.2" byteorder = "1.4.3" bytes = "1.2.1" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } hex = "0.4.3" humantime-serde = "1.1.1" indexmap = { version = "1.9.1", features = ["serde"] } diff --git a/zebra-network/src/protocol/external/codec.rs b/zebra-network/src/protocol/external/codec.rs index f007b73c648..1c6de9b8f05 100644 --- a/zebra-network/src/protocol/external/codec.rs +++ b/zebra-network/src/protocol/external/codec.rs @@ -2,7 +2,6 @@ use std::{ cmp::min, - convert::TryInto, fmt, io::{Cursor, Read, Write}, }; @@ -724,15 +723,18 @@ impl Codec { } } -// XXX replace these interior unit tests with exterior integration tests + proptest +// TODO: +// - move these unit tests to a separate file +// - add exterior integration tests + proptest #[cfg(test)] mod tests { - use super::*; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use chrono::{MAX_DATETIME, MIN_DATETIME}; + use chrono::DateTime; use futures::prelude::*; use lazy_static::lazy_static; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use super::*; lazy_static! { static ref VERSION_TEST_VECTOR: Message = { @@ -808,8 +810,10 @@ mod tests { deserialize_version_with_time(1620777600).expect("recent time is valid"); deserialize_version_with_time(0).expect("zero time is valid"); - deserialize_version_with_time(MIN_DATETIME.timestamp()).expect("min time is valid"); - deserialize_version_with_time(MAX_DATETIME.timestamp()).expect("max time is valid"); + deserialize_version_with_time(DateTime::::MIN_UTC.timestamp()) + .expect("min time is valid"); + deserialize_version_with_time(DateTime::::MAX_UTC.timestamp()) + .expect("max time is valid"); } /// Deserialize a `Version` message containing `time`, and return the result. diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index d14f2c613eb..6bd3b1ff83a 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -12,12 +12,7 @@ default = [] proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl", "zebra-state/proptest-impl"] [dependencies] -zebra-chain = { path = "../zebra-chain" } -zebra-network = { path = "../zebra-network" } -zebra-node-services = { path = "../zebra-node-services" } -zebra-state = { path = "../zebra-state" } - -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } futures = "0.3.21" # lightwalletd sends JSON-RPC requests over HTTP 1.1 @@ -42,6 +37,11 @@ serde = { version = "1.0.142", features = ["serde_derive"] } proptest = { version = "0.10.1", optional = true } proptest-derive = { version = "0.3.0", optional = true } +zebra-chain = { path = "../zebra-chain" } +zebra-network = { path = "../zebra-network" } +zebra-node-services = { path = "../zebra-node-services" } +zebra-state = { path = "../zebra-state" } + [dev-dependencies] insta = { version = "1.17.1", features = ["redactions"] } proptest = "0.10.1" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 69345ddc391..82df95dfcc0 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -10,7 +10,7 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-test", "zebra-chain/propt [dependencies] bincode = "1.3.3" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } dirs = "4.0.0" displaydoc = "0.2.3" futures = "0.3.21" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 6c979d45c26..ba068e3895f 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -77,7 +77,7 @@ zebra-state = { path = "../zebra-state" } abscissa_core = "0.5" gumdrop = "0.7" -chrono = "0.4.19" +chrono = { version = "0.4.20", default-features = false, features = ["clock", "std"] } humantime = "2.1.0" humantime-serde = "1.1.1" indexmap = "1.9.1" diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 94b042f78cd..58bc9f3b1a0 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -354,6 +354,7 @@ fn misconfigured_ephemeral_missing_directory() -> Result<()> { ) } +#[tracing::instrument] fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) -> Result<()> { use std::io::ErrorKind; @@ -520,6 +521,7 @@ fn config_test() -> Result<()> { } /// Test that `zebrad start` can parse the output from `zebrad generate`. +#[tracing::instrument] fn valid_generated_config(command: &str, expect_stdout_line_contains: &str) -> Result<()> { let _init_guard = zebra_test::init(); @@ -823,6 +825,7 @@ fn sync_large_checkpoints_mempool_mainnet() -> Result<()> { .map(|_tempdir| ()) } +#[tracing::instrument] fn create_cached_database(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height(); let checkpoint_stop_regex = format!("{}.*CommitFinalized request", STOP_AT_HEIGHT_REGEX); @@ -839,6 +842,7 @@ fn create_cached_database(network: Network) -> Result<()> { ) } +#[tracing::instrument] fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { let height = network.mandatory_checkpoint_height() + 1200; let full_validation_stop_regex = @@ -862,6 +866,7 @@ fn sync_past_mandatory_checkpoint(network: Network) -> Result<()> { /// `timeout_argument_name` parameter. The value of the environment variable must the number of /// minutes specified as an integer. #[allow(clippy::print_stderr)] +#[tracing::instrument] fn full_sync_test(network: Network, timeout_argument_name: &str) -> Result<()> { let timeout_argument: Option = env::var(timeout_argument_name) .ok() @@ -1284,6 +1289,7 @@ async fn lightwalletd_test_suite() -> Result<()> { /// Set `FullSyncFromGenesis { allow_lightwalletd_cached_state: true }` to speed up manual full sync tests. /// /// The random ports in this test can cause [rare port conflicts.](#Note on port conflict) +#[tracing::instrument] fn lightwalletd_integration_test(test_type: LightwalletdTestType) -> Result<()> { let _init_guard = zebra_test::init(); @@ -1686,6 +1692,7 @@ fn zebra_state_conflict() -> Result<()> { /// `second_dir`. Check that the first node's stdout contains /// `first_stdout_regex`, and the second node's stderr contains /// `second_stderr_regex`. +#[tracing::instrument] fn check_config_conflict( first_dir: T, first_stdout_regex: &str, @@ -1693,8 +1700,8 @@ fn check_config_conflict( second_stderr_regex: &str, ) -> Result<()> where - T: ZebradTestDirExt, - U: ZebradTestDirExt, + T: ZebradTestDirExt + std::fmt::Debug, + U: ZebradTestDirExt + std::fmt::Debug, { // Start the first node let mut node1 = first_dir.spawn_child(args!["start"])?; diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index 8739491d108..3323c3ac716 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -25,6 +25,7 @@ pub type BoxStateService = BoxService; /// Starts a state service using the provided `cache_dir` as the directory with the chain state. +#[tracing::instrument(skip(cache_dir))] pub async fn start_state_service_with_cache_dir( network: Network, cache_dir: impl Into, @@ -47,6 +48,7 @@ pub async fn start_state_service_with_cache_dir( } /// Loads the chain tip height from the state stored in a specified directory. +#[tracing::instrument] pub async fn load_tip_height_from_state_directory( network: Network, state_path: &Path, @@ -87,6 +89,7 @@ pub async fn copy_state_directory(source: impl AsRef) -> Result { /// /// Copies all files from the `directory` into the destination specified by the concatenation of /// the `base_destination_path` and `directory` stripped of its `prefix`. +#[tracing::instrument] async fn copy_directory( directory: &Path, prefix: &Path, diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 3c27a3039df..490bcd8bece 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -206,7 +206,8 @@ where /// /// This prevents it from downloading blocks. Instead, the `zebra_directory` parameter allows /// providing an initial state to the zebrad instance. -pub fn spawn_zebrad_for_rpc_without_initial_peers( +#[tracing::instrument] +pub fn spawn_zebrad_for_rpc_without_initial_peers( network: Network, zebra_directory: P, test_type: LightwalletdTestType, diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index 56639f67141..db638204c67 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -145,6 +145,7 @@ pub async fn run() -> Result<()> { /// /// Returns a list of valid transactions that are not in any of the blocks present in the /// original `zebrad_state_path`. +#[tracing::instrument] async fn load_transactions_from_a_future_block( network: Network, zebrad_state_path: PathBuf, @@ -179,6 +180,7 @@ async fn load_transactions_from_a_future_block( /// /// If the specified `zebrad_state_path` contains a chain state that's not synchronized to a tip that's /// after `height`. +#[tracing::instrument] async fn load_transactions_from_block_after( height: block::Height, network: Network, @@ -213,6 +215,7 @@ async fn load_transactions_from_block_after( /// Performs a request to the provided read-only `state` service to fetch all transactions from a /// block at the specified `height`. +#[tracing::instrument(skip(state))] async fn load_transactions_from_block( height: block::Height, state: &mut ReadStateService, diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc.rs b/zebrad/tests/common/lightwalletd/wallet_grpc.rs index 78d575f6c9d..edfed7c5b3e 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc.rs @@ -25,6 +25,7 @@ pub type LightwalletdRpcClient = /// Waits for `lightwalletd` to sync to near the tip, if `wait_for_sync` is true. /// /// Returns the lightwalletd instance and the port number that it is listening for RPC connections. +#[tracing::instrument] pub fn spawn_lightwalletd_with_rpc_server( zebrad_rpc_address: SocketAddr, lightwalletd_state_path: Option, @@ -56,6 +57,7 @@ pub fn spawn_lightwalletd_with_rpc_server( } /// Connect to a lightwalletd RPC instance. +#[tracing::instrument] pub async fn connect_to_lightwalletd(lightwalletd_rpc_port: u16) -> Result { let lightwalletd_rpc_address = format!("http://127.0.0.1:{lightwalletd_rpc_port}"); diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index a9a8d283bd8..495ca341497 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -86,6 +86,7 @@ pub const MIN_HEIGHT_FOR_DEFAULT_LOOKAHEAD: Height = Height(3 * sync::DEFAULT_CHECKPOINT_CONCURRENCY_LIMIT as u32); /// What the expected behavior of the mempool is for a test that uses [`sync_until`]. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum MempoolBehavior { /// The mempool should be forced to activate at a certain height, for debug purposes. /// @@ -177,6 +178,7 @@ impl MempoolBehavior { /// On success, returns the associated `TempDir`. Returns an error if /// the child exits or `timeout` elapses before `stop_regex` is found. #[allow(clippy::too_many_arguments)] +#[tracing::instrument(skip(reuse_tempdir))] pub fn sync_until( height: Height, network: Network, @@ -297,6 +299,7 @@ pub fn sync_until( /// The zebrad instance is executed on a copy of the partially synchronized chain state. This copy /// is returned afterwards, containing the fully synchronized chain state. #[allow(dead_code)] +#[tracing::instrument] pub async fn perform_full_sync_starting_from( network: Network, partial_sync_path: &Path, @@ -354,6 +357,7 @@ pub fn cached_mandatory_checkpoint_test_config() -> Result { /// Returns an error if the child exits or the fixed timeout elapses /// before `STOP_AT_HEIGHT_REGEX` is found. #[allow(clippy::print_stderr)] +#[tracing::instrument] pub fn create_cached_database_height( network: Network, height: Height, @@ -363,8 +367,8 @@ pub fn create_cached_database_height( ) -> Result<()> { eprintln!("creating cached database"); - // 20 hours - let timeout = Duration::from_secs(60 * 60 * 20); + // 24 hours + let timeout = Duration::from_secs(24 * 60 * 60); // Use a persistent state, so we can handle large syncs let mut config = cached_mandatory_checkpoint_test_config()?;