diff --git a/.circleci/config.yml b/.circleci/config.yml index 45925c249f..0478f5aaae 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -106,24 +106,37 @@ commands: name: Check eslint command: cd integration_tests && npm run lint - run: - name: Pre-build base node + name: Build base node command: cargo build --release --bin tari_base_node - run: - name: Pre-build wallet + name: Build wallet command: cargo build --release --bin tari_console_wallet - run: - name: Pre-build mmproxy + name: Build wallet FFI + command: cargo build --release --package tari_wallet_ffi + - run: + name: Build mmproxy command: cargo build --release --bin tari_merge_mining_proxy + - run: + name: Build mining_node + command: cargo build --release --bin tari_mining_node - run: name: Run cucumber scenarios - command: cd integration_tests && mkdir -p cucumber_output && node_modules/.bin/cucumber-js --tags "not @long-running and not @broken and not @flaky" --format json:cucumber_output/tests.cucumber + command: cd integration_tests && mkdir -p cucumber_output && node_modules/.bin/cucumber-js --tags "not @long-running and not @broken and not @flaky and not @wallet-ffi" --format json:cucumber_output/tests.cucumber --exit - run: name: Generate report command: cd integration_tests && node ./generate_report.js when: always - run: - name: Run flaky/broken cucumber scenarios (Always pass) - command: cd integration_tests && node_modules/.bin/cucumber-js --tags "not @long-running and (@broken or @flaky)" --format json:cucumber_output/broken-tests.cucumber || true + name: Run ffi cucumber scenarios + command: cd integration_tests && mkdir -p cucumber_output && node_modules/.bin/cucumber-js --tags "not @long-running and not @broken and not @flaky and @wallet-ffi" --format json:cucumber_output/tests-ffi.cucumber --exit + - run: + name: Generate report (ffi) + command: cd integration_tests && touch cucumber_output/tests-ffi.cucumber && node ./generate_report.js cucumber_output/tests-ffi.cucumber temp/reports/cucumber_ffi_report.html + when: always + # - run: + # name: Run flaky/broken cucumber scenarios (Always pass) + # command: cd integration_tests && node_modules/.bin/cucumber-js --tags "not @long-running and (@broken or @flaky)" --format json:cucumber_output/broken-tests.cucumber || true - store_test_results: path: integration_tests/cucumber_output - store_artifacts: diff --git a/.dockerignore b/.dockerignore index 1f9ca954ba..3d1d7cfbad 100644 --- a/.dockerignore +++ b/.dockerignore @@ -28,7 +28,7 @@ book report # On development branch only. This should be removed for point releases -Cargo.lock +# Cargo.lock *.log # Ignore DataStore, Database and Log files @@ -39,4 +39,4 @@ base_layer/wallet_ffi/build.config /base_layer/wallet_ffi/logs/ base_layer/wallet_ffi/.cargo/config -keys.json \ No newline at end of file +keys.json diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 3c586c7191..159fcfc52e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,18 +1,9 @@ - +Description +--- -## Description - +Motivation and Context +--- -## Motivation and Context - - +How Has This Been Tested? +--- -## How Has This Been Tested? - - - - -## Checklist: - -* [ ] I'm merging against the `development` branch. -* [ ] I have squashed my commits into a single commit. diff --git a/.github/workflows/base_node_binaries.yml b/.github/workflows/base_node_binaries.yml index 5ac5d3cf09..225b5d2591 100644 --- a/.github/workflows/base_node_binaries.yml +++ b/.github/workflows/base_node_binaries.yml @@ -5,9 +5,9 @@ on: tags: - "v[0-9]+.[0-9]+.[0-9]+" branches: - - leet-* + - ci-* schedule: - - cron: '05 00 01 * *' + - cron: '05 00 * * *' workflow_dispatch: inputs: customTag: @@ -24,31 +24,15 @@ jobs: strategy: fail-fast: false matrix: -# # Breaks build - cmake issue? -# os: [windows-2016] -# # Breaks build - clang build (croaring-sys)? -# features: ["safe"] -# os: [ubuntu-latest, macos-latest, windows-latest, self-hosted] -# os: [ubuntu-18.04, ubuntu-20.04, macos-10.15, macos-11.0, windows-2016, windows-2019] - os: [ubuntu-18.04, macos-10.15, windows-2019, self-hosted] + os: [ubuntu-20.04, macos-10.15, windows-2019] # https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html -# features: ["safe", "avx2"] -# target_cpu: ["x86-64", "broadwell", "skylake"] target_cpu: ["haswell"] -# target_release: ["release", "debug"] -# exclude: -# - target_cpu: "x86-64" -# features: "avx2" exclude: - os: "macos-10.15" target_cpu: "haswell" - - os: "self-hosted" - target_cpu: "haswell" include: - os: "macos-10.15" target_cpu: "x86-64" - - os: "self-hosted" - target_cpu: "x86-64" runs-on: ${{ matrix.os }} @@ -75,14 +59,11 @@ jobs: - name: Setup Rust toolchain uses: actions-rs/toolchain@v1 with: -# toolchain: stable toolchain: nightly-2021-05-09 components: rustfmt -# target: ${{ matrix.target }} override: true - name: Install Linux dependencies - Ubuntu -# if: startsWith(matrix.os,'ubuntu') if: startsWith(runner.os,'Linux') run: | sudo apt-get update @@ -104,12 +85,10 @@ jobs: # sudo apt-get -y upgrade - name: Install macOS dependencies -# if: startsWith(matrix.os,'macos') if: startsWith(runner.os,'macOS') run: brew install cmake zip - name: Install Windows dependencies -# if: startsWith(matrix.os,'windows') if: startsWith(runner.os,'Windows') run: | vcpkg.exe install sqlite3:x64-windows zlib:x64-windows @@ -142,15 +121,6 @@ jobs: echo "LIBCLANG_PATH=C:\Program Files\LLVM\bin" >> $GITHUB_ENV echo "C:\Strawberry\perl\bin" >> $GITHUB_PATH - # this is a hack to fix an issue with building libclang in MSVC - # it should be fixed in release 16.9 of MSVC - # issue https://github.com/microsoft/STL/issues/1300 - # temp fix https://github.com/mono/CppSharp/pull/1514/files - - name: fix intrin.h file - Windows - if: startsWith(runner.os,'Windows') - shell: powershell - run: .github/hacks/intrin.ps1 - - name: Caching uses: actions/cache@v2 with: @@ -198,6 +168,13 @@ jobs: cp -v "$GITHUB_WORKSPACE/target/release/tari_merge_mining_proxy${TBN_EXT}" . cp -v "$GITHUB_WORKSPACE/target/release/tari_mining_node${TBN_EXT}" . + - name: Build the windows installer + if: startsWith(runner.os,'Windows') + run: | + cd buildtools + "%programfiles(x86)%\Inno Setup 6\iscc.exe" "windows_inno_installer.iss" + shell: cmd + - name: Archive and Sign Binaries shell: bash run: | @@ -245,3 +222,39 @@ jobs: S3OPTIONS: '--recursive --exclude "*" --include "*.zip*"' # S3OPTIONS: '--recursive --exclude "*" --include "*.zip*"' # S3OPTIONS: '--acl public-read' + + - name: Copy tags to latest s3 - Bash +# if: ${{ env.AWS_SECRET_ACCESS_KEY != '' && matrix.os != 'self-hosted' }} + if: ${{ env.AWS_SECRET_ACCESS_KEY != '' && matrix.os != 'self-hosted' && startsWith(github.ref, 'refs/tags/v') }} + shell: bash + run: | + echo "Starting upload ... ${{ env.SOURCE }}" + if [ "$RUNNER_OS" == "Windows" ]; then + echo "No ls for 'D:' on Windows" + else + ls -al ${{ env.SOURCE }} + fi + + aws s3 ${{ env.S3CMD }} --region ${{ secrets.AWS_REGION }} \ + "${{ env.SOURCE }}" \ + s3://${{ secrets.AWS_S3_BUCKET }}/current/${{ env.DEST_DIR }} \ + ${{ env.S3OPTIONS }} + + aws s3 rm --region ${{ secrets.AWS_REGION }} \ + s3://${{ secrets.AWS_S3_BUCKET }}/latest/${{ env.DEST_DIR }}/* + + aws s3 ${{ env.S3CMD }} --region ${{ secrets.AWS_REGION }} \ + "${{ env.SOURCE }}" \ + s3://${{ secrets.AWS_S3_BUCKET }}/latest/${{ env.DEST_DIR }} \ + ${{ env.S3OPTIONS }} + echo "Done - $?" + exit 0 + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + SOURCE: '${{ github.workspace }}${{ env.TBN_DIST }}' + DEST_DIR: '${{ env.S3DESTOVERRIDE }}${{ env.S3DESTDIR }}/' + S3CMD: 'cp' + S3OPTIONS: '--recursive --exclude "*" --include "*.zip*"' + # S3OPTIONS: '--recursive --exclude "*" --include "*.zip*"' + # S3OPTIONS: '--acl public-read' diff --git a/.github/workflows/libwallet.yml b/.github/workflows/libwallet.yml index c406ea0fa0..5785ddb189 100644 --- a/.github/workflows/libwallet.yml +++ b/.github/workflows/libwallet.yml @@ -2,13 +2,13 @@ name: Build libwallet on: push: -# branches: -# - development -# - libwallet + branches: + - "libwallet-*" + # - development tags: - "libwallet-*" jobs: - build-and-upload: + android: runs-on: ubuntu-latest steps: # Checkout the code @@ -16,7 +16,7 @@ jobs: # Build and package the libraries - name: Build libwallet id: build-libwallet - uses: tari-project/action-buildlibs@v0.3.1 + uses: tari-project/action-buildlibs@v0.3.2 with: platforms: "x86_64-linux-android;aarch64-linux-android;armv7-linux-androideabi" level: "24" @@ -24,10 +24,11 @@ jobs: - name: Upload artifacts uses: actions/upload-artifact@v2 with: - name: libwallet + name: libwallet-android path: ${{ github.workspace }}/libwallet/ # Copy tarballs to S3 - name: Sync to S3 + continue-on-error: true # Don't break if s3 upload fails uses: jakejarvis/s3-sync-action@v0.5.1 with: args: --acl public-read --follow-symlinks @@ -35,6 +36,49 @@ jobs: AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_REGION: 'us-east-1' # optional: defaults to us-east-1 - SOURCE_DIR: '$GITHUB_WORKSPACE/libwallet' - DEST_DIR: 'libwallet' + AWS_REGION: "us-east-1" # optional: defaults to us-east-1 + SOURCE_DIR: "$GITHUB_WORKSPACE/libwallet" + DEST_DIR: "libwallet" + ios: + runs-on: macos-10.15 + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: nightly-2021-05-09 + target: aarch64-apple-ios + components: rustfmt + override: true + - uses: actions-rs/toolchain@v1 + with: + toolchain: nightly-2021-05-09 + target: x86_64-apple-ios + components: rustfmt + override: true + - name: Install macOS dependencies + run: brew install cmake zip + - name: Build + run: | + mkdir -p MobileWallet/TariLib/ + cd base_layer/wallet_ffi + mv ios.config build.config + ./mobile_build.sh + ls -alht $GITHUB_WORKSPACE/MobileWallet/TariLib/ + - name: Upload artifacts + uses: actions/upload-artifact@v2 + with: + name: libwallet-ios + path: ${{ github.workspace }}/MobileWallet/TariLib/ + + # "Error: Container action is only supported on Linux" + # - name: Sync to S3 + # uses: jakejarvis/s3-sync-action@v0.5.1 + # with: + # args: --acl public-read --follow-symlinks + # env: + # AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + # AWS_REGION: "us-east-1" # optional: defaults to us-east-1 + # SOURCE_DIR: "$GITHUB_WORKSPACE/MobileWallet/TariLib/" + # DEST_DIR: "libwallet-ios" diff --git a/.github/workflows/pr_title.yml b/.github/workflows/pr_title.yml new file mode 100644 index 0000000000..0ae571681c --- /dev/null +++ b/.github/workflows/pr_title.yml @@ -0,0 +1,20 @@ +name: PR +on: + pull_request: + types: + - opened + - reopened + - edited + - synchronize + +jobs: + check-title: + runs-on: ubuntu-latest + steps: + - name: install + run: | + npm install -g @commitlint/cli @commitlint/config-conventional + echo "module.exports = {extends: ['@commitlint/config-conventional']}" > commitlint.config.js + - name: lint + run: | + echo ${{github.event.pull_request.title}} | commitlint diff --git a/Cargo.lock b/Cargo.lock index 3156483982..f3a00b0139 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.15.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" +checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" dependencies = [ "gimli", ] @@ -28,9 +28,9 @@ dependencies = [ [[package]] name = "aead" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922b33332f54fc0ad13fa3e514601e8d30fb54e1f3eadc36643f6526db645621" +checksum = "6e3e798aa0c8239776f54415bc06f3d74b1850f3f830b45c35cfc80556973f70" dependencies = [ "generic-array", ] @@ -85,15 +85,15 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" +checksum = "b2a930fd487faaa92a30afa92cc9dd1526a5cff67124abbbb1c617ce070f4dcf" dependencies = [ - "aead 0.4.1", + "aead 0.4.2", "aes 0.7.4", "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.2", + "ctr 0.8.0", + "ghash 0.4.3", "subtle", ] @@ -173,9 +173,9 @@ checksum = "34fde25430d87a9388dadbe6e34d7f72a462c8b43ac8d309b42b0a8505d7e2a5" [[package]] name = "anyhow" -version = "1.0.41" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61" +checksum = "28ae2b3dec75a406790005a200b1bd89785afc02517a00ca99ecfe093ee9e6cf" [[package]] name = "arc-swap" @@ -183,6 +183,12 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc4662175ead9cd84451d5c35070517777949a2ed84551764129cedb88384841" +[[package]] +name = "arc-swap" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dabe5a181f83789739c194cbe5a897dde195078fac08568d09221fd6137a7ba8" + [[package]] name = "arrayref" version = "0.3.6" @@ -214,20 +220,20 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25f9db3b38af870bf7e5cc649167533b493928e50744e2c30ae350230b414670" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] name = "async-trait" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" +checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -255,9 +261,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.60" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7815ea54e4d821e791162e078acbebfd6d8c8939cd559c9335dceb1c8ca7282" +checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" dependencies = [ "addr2line", "cc", @@ -331,7 +337,7 @@ version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -340,7 +346,7 @@ version = "0.56.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da379dbebc0b76ef63ca68d8fc6e71c0f13e59432e0987e508c1820e6ab5239" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "cexpr", "clang-sys", "clap", @@ -349,7 +355,7 @@ dependencies = [ "lazycell", "log 0.4.14", "peeking_take_while", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", "regex", "rustc-hash", @@ -371,9 +377,9 @@ checksum = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitstring" @@ -446,14 +452,14 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" +checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279" dependencies = [ "lazy_static 1.4.0", "memchr", "regex-automata", - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -466,6 +472,12 @@ dependencies = [ "safemem", ] +[[package]] +name = "bufstream" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" + [[package]] name = "bumpalo" version = "3.7.0" @@ -474,9 +486,9 @@ checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" [[package]] name = "bytemuck" -version = "1.7.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9966d2ab714d0f785dbac0a0396251a35280aeb42413281617d0209ab4898435" +checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" [[package]] name = "byteorder" @@ -500,7 +512,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" dependencies = [ - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -529,11 +541,11 @@ checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "cast" -version = "0.2.3" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" +checksum = "4c24dab4283a142afa2fdca129b80ad2c6284e073930f964c3a1293c225ee39a" dependencies = [ - "rustc_version 0.2.3", + "rustc_version 0.4.0", ] [[package]] @@ -557,20 +569,20 @@ dependencies = [ "heck", "indexmap", "log 0.4.14", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "serde 1.0.126", + "serde 1.0.129", "serde_json", - "syn 1.0.73", + "syn 1.0.75", "tempfile", "toml 0.5.8", ] [[package]] name = "cc" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787" +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" [[package]] name = "cexpr" @@ -604,9 +616,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fee7ad89dc1128635074c268ee661f90c3f7e83d9fd12910608c36b47d6c3412" +checksum = "ea8756167ea0aca10e066cdbe7813bd71d2f24e69b0bc7b50509590cef2ce0b9" dependencies = [ "cfg-if 1.0.0", "cipher 0.3.0", @@ -616,11 +628,11 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1580317203210c517b6d44794abfbe600698276db18127e37ad3e69bf5e848e5" +checksum = "175a11316f33592cf2b71416ee65283730b5b7849813c4891d02a12906ed9acc" dependencies = [ - "aead 0.4.1", + "aead 0.4.2", "chacha20", "cipher 0.3.0", "poly1305", @@ -642,7 +654,7 @@ dependencies = [ "libc", "num-integer", "num-traits 0.2.14", - "serde 1.0.126", + "serde 1.0.129", "time", "winapi 0.3.9", ] @@ -665,7 +677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6316c62053228eddd526a5e6deb6344c80bf2bc1e9786e7f90b3083e73197c1" dependencies = [ "bitstring", - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -694,9 +706,9 @@ checksum = "b0fc239e0f6cb375d2402d48afb92f76f5404fd1df208a41930ec81eda078bea" [[package]] name = "clang-sys" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" +checksum = "81cf2cc85830eae84823884db23c5306442a6c3d5bfd3beb2f2a2c829faa1816" dependencies = [ "glob", "libc", @@ -711,7 +723,7 @@ checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "ansi_term 0.11.0", "atty", - "bitflags 1.2.1", + "bitflags 1.3.2", "strsim 0.8.0", "textwrap", "unicode-width", @@ -733,7 +745,7 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", ] [[package]] @@ -751,7 +763,7 @@ dependencies = [ "lazy_static 1.4.0", "nom 4.2.3", "rust-ini", - "serde 1.0.126", + "serde 1.0.129", "serde-hjson", "serde_json", "toml 0.4.10", @@ -824,7 +836,7 @@ dependencies = [ "rand_xoshiro", "rayon", "rayon-core", - "serde 1.0.126", + "serde 1.0.129", "serde_derive", "serde_json", "tinytemplate", @@ -899,9 +911,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", @@ -957,12 +969,12 @@ version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f4919d60f26ae233e14233cc39746c8c8bb8cd7b05840ace83604917b51b6c7" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "crossterm_winapi", "lazy_static 1.4.0", "libc", "mio 0.7.13", - "parking_lot", + "parking_lot 0.10.2", "signal-hook", "winapi 0.3.9", ] @@ -1002,7 +1014,7 @@ dependencies = [ "csv-core", "itoa", "ryu", - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -1025,18 +1037,18 @@ dependencies = [ [[package]] name = "ctr" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" +checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ "cipher 0.3.0", ] [[package]] name = "curl-sys" -version = "0.4.44+curl-7.77.0" +version = "0.4.45+curl-7.78.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b6d85e9322b193f117c966e79c2d6929ec08c02f339f950044aba12e20bbaf1" +checksum = "de9e5a72b1c744eb5dd20b2be4d7eb84625070bb5c4ab9b347b70464ab1e62eb" dependencies = [ "cc", "libc", @@ -1049,14 +1061,14 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639891fde0dbea823fc3d798a0fdf9d2f9440a42d64a78ab3488b0ca025117b3" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest", "rand_core 0.5.1", - "serde 1.0.126", + "serde 1.0.129", "subtle", "zeroize", ] @@ -1071,7 +1083,7 @@ dependencies = [ "digest", "packed_simd_2", "rand_core 0.6.3", - "serde 1.0.126", + "serde 1.0.129", "subtle-ng", "zeroize", ] @@ -1094,10 +1106,10 @@ checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", "strsim 0.9.3", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -1108,7 +1120,7 @@ checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" dependencies = [ "darling_core", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -1117,6 +1129,17 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2 1.0.28", + "quote 1.0.9", + "syn 1.0.75", +] + [[package]] name = "derive-error" version = "0.0.4" @@ -1136,9 +1159,9 @@ checksum = "a2658621297f2cf68762a6f7dc0bb7e1ff2cfd6583daef8ee0fed6f7ec468ec0" dependencies = [ "darling", "derive_builder_core", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -1148,9 +1171,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2791ea3e372c8495c0bc2033991d76b512cd799d07491fbd6890124db9458bef" dependencies = [ "darling", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -1187,9 +1210,9 @@ version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -1211,15 +1234,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "dirs" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" -dependencies = [ - "dirs-sys", -] - [[package]] name = "dirs-next" version = "1.0.2" @@ -1230,17 +1244,6 @@ dependencies = [ "dirs-sys-next", ] -[[package]] -name = "dirs-sys" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" -dependencies = [ - "libc", - "redox_users", - "winapi 0.3.9", -] - [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -1260,9 +1263,9 @@ checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] name = "ed25519" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d0860415b12243916284c67a9be413e044ee6668247b99ba26d94b2bc06c8f6" +checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" dependencies = [ "signature", ] @@ -1276,7 +1279,7 @@ dependencies = [ "curve25519-dalek", "ed25519", "rand 0.7.3", - "serde 1.0.126", + "serde 1.0.129", "sha2", "zeroize", ] @@ -1309,9 +1312,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" dependencies = [ "heck", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -1437,7 +1440,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "fuchsia-zircon-sys", ] @@ -1455,9 +1458,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27" +checksum = "1adc00f486adfc9ce99f77d717836f0c5aa84965eb0b4f051f4e83f7cab53f8b" dependencies = [ "futures-channel", "futures-core", @@ -1470,9 +1473,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2" +checksum = "74ed2411805f6e4e3d9bc904c95d5d423b89b3b25dc0250aa74729de20629ff9" dependencies = [ "futures-core", "futures-sink", @@ -1490,9 +1493,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1" +checksum = "af51b1b4a7fdff033703db39de8802c673eb91855f2e0d47dcf3bf2c0ef01f99" [[package]] name = "futures-core-preview" @@ -1502,9 +1505,9 @@ checksum = "b35b6263fb1ef523c3056565fa67b1d16f0a8604ff12b11b08c25f28a734c60a" [[package]] name = "futures-executor" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79" +checksum = "4d0d535a57b87e1ae31437b892713aee90cd2d7b0ee48727cd11fc72ef54761c" dependencies = [ "futures-core", "futures-task", @@ -1524,9 +1527,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1" +checksum = "0b0e06c393068f3a6ef246c75cdca793d6a46347e75286933e5e75fd2fd11582" [[package]] name = "futures-io-preview" @@ -1536,15 +1539,15 @@ checksum = "f4914ae450db1921a56c91bde97a27846287d062087d4a652efc09bb3a01ebda" [[package]] name = "futures-macro" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121" +checksum = "c54913bae956fb8df7f4dc6fc90362aa72e69148e3f39041fbe8742d21e0ac57" dependencies = [ "autocfg 1.0.1", "proc-macro-hack", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -1563,9 +1566,9 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282" +checksum = "c0f30aaa67363d119812743aa5f33c201a7a66329f97d1a887022971feea4b53" [[package]] name = "futures-sink-preview" @@ -1575,15 +1578,15 @@ checksum = "86f148ef6b69f75bb610d4f9a2336d4fc88c4b5b67129d1a340dd0fd362efeec" [[package]] name = "futures-task" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae" +checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" [[package]] name = "futures-test" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e771858b95154d86bc76b412e4cea3bc104803a7838179e5a1315d9c8a4c2b6" +checksum = "3a5ac667be097531d74ff9fff9c9da7820dd63afd2312bb9c6f589211ae32080" dependencies = [ "futures-core", "futures-executor", @@ -1592,7 +1595,7 @@ dependencies = [ "futures-sink", "futures-task", "futures-util", - "pin-project 1.0.7", + "pin-project 1.0.8", "pin-utils", ] @@ -1621,9 +1624,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967" +checksum = "67eb846bfd58e44a8481a00049e82c43e0ccb5d61f8dc071057cb19249dd4d78" dependencies = [ "autocfg 1.0.1", "futures 0.1.31", @@ -1728,19 +1731,19 @@ dependencies = [ [[package]] name = "ghash" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bbd60caa311237d508927dbba7594b483db3ef05faa55172fcf89b1bcda7853" +checksum = "b442c439366184de619215247d24e908912b175e824a530253845ac4c251a5c1" dependencies = [ "opaque-debug", - "polyval 0.5.1", + "polyval 0.5.2", ] [[package]] name = "gimli" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" [[package]] name = "git2" @@ -1748,7 +1751,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7339329bfa14a00223244311560d11f8f489b453fb90092af97f267a6090ab0" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "libc", "libgit2-sys", "log 0.4.14", @@ -1777,12 +1780,31 @@ dependencies = [ "http", "indexmap", "slab", - "tokio", + "tokio 0.2.25", "tokio-util 0.3.1", "tracing", "tracing-futures", ] +[[package]] +name = "h2" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio 1.10.1", + "tokio-util 0.6.7", + "tracing", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -1815,9 +1837,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76505e26b6ca3bbdbbb360b68472abbb80998c5fa5dc43672eca34f28258e138" +checksum = "21e4590e13640f19f249fe3e4eca5113bc4289f2497710378190e7f4bd96f45b" [[package]] name = "http" @@ -1840,11 +1862,22 @@ dependencies = [ "http", ] +[[package]] +name = "http-body" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +dependencies = [ + "bytes 1.0.1", + "http", + "pin-project-lite 0.2.7", +] + [[package]] name = "httparse" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a87b616e37e93c22fb19bcd386f02f3af5ea98a25670ad0fce773de23c5e68" +checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" @@ -1852,6 +1885,12 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +[[package]] +name = "httpdate" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" + [[package]] name = "humantime" version = "1.3.0" @@ -1896,15 +1935,39 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.2.7", "http", - "http-body", + "http-body 0.3.1", "httparse", - "httpdate", + "httpdate 0.3.2", "itoa", - "pin-project 1.0.7", - "socket2", - "tokio", + "pin-project 1.0.8", + "socket2 0.3.19", + "tokio 0.2.25", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "0.14.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" +dependencies = [ + "bytes 1.0.1", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.3.4", + "http", + "http-body 0.4.3", + "httparse", + "httpdate 1.0.1", + "itoa", + "pin-project-lite 0.2.7", + "socket2 0.4.1", + "tokio 1.10.1", "tower-service", "tracing", "want", @@ -1919,10 +1982,23 @@ dependencies = [ "bytes 0.5.6", "hyper 0.13.10", "native-tls", - "tokio", + "tokio 0.2.25", "tokio-tls", ] +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes 1.0.1", + "hyper 0.14.12", + "native-tls", + "tokio 1.10.1", + "tokio-native-tls", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1976,12 +2052,20 @@ dependencies = [ ] [[package]] -name = "installer" -version = "0.1.0" +name = "instant" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" dependencies = [ - "dirs", + "cfg-if 1.0.0", ] +[[package]] +name = "integer-encoding" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" + [[package]] name = "iovec" version = "0.1.4" @@ -2008,15 +2092,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "js-sys" -version = "0.3.51" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83bdfbace3a0e81a4253f73b49e960b053e396a11012cbd49b9b74d6a2b67062" +checksum = "e4bf49d50e2961077d9c99f4b7997d770a1114f087c3c2e0069b36c13fc2979d" dependencies = [ "wasm-bindgen", ] @@ -2028,7 +2112,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "436f3455a8a4e9c7b14de9f1206198ee5d0bdc2db1b560339d2141093d7dd389" dependencies = [ "hyper 0.10.16", - "serde 1.0.126", + "serde 1.0.129", "serde_derive", "serde_json", ] @@ -2078,9 +2162,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.97" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" +checksum = "a1fa8cddc8fbbee11227ef194b5317ed014b8acbf15139bd716a18ad3fe99ec5" [[package]] name = "libgit2-sys" @@ -2203,6 +2287,15 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lock_api" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +dependencies = [ + "scopeguard", +] + [[package]] name = "log" version = "0.3.9" @@ -2219,7 +2312,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" dependencies = [ "cfg-if 1.0.0", - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -2235,21 +2328,45 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "100052474df98158c0738a7d3f4249c99978490178b5f9f68cd835ac57adbd1b" dependencies = [ "antidote", - "arc-swap", + "arc-swap 0.3.11", "chrono", - "flate2", "fnv", "humantime 1.3.0", "libc", "log 0.4.14", "log-mdc", - "serde 1.0.126", - "serde-value", + "serde 1.0.129", + "serde-value 0.5.3", "serde_derive", + "serde_yaml", + "thread-id", + "typemap", + "winapi 0.3.9", +] + +[[package]] +name = "log4rs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1572a880d1115ff867396eee7ae2bc924554225e67a0d3c85c745b3e60ca211" +dependencies = [ + "anyhow", + "arc-swap 0.4.8", + "chrono", + "derivative", + "fnv", + "humantime 2.1.0", + "libc", + "log 0.4.14", + "log-mdc", + "parking_lot 0.11.1", + "regex", + "serde 1.0.129", + "serde-value 0.7.0", "serde_json", "serde_yaml", + "thiserror", "thread-id", - "toml 0.5.8", "typemap", "winapi 0.3.9", ] @@ -2265,9 +2382,9 @@ dependencies = [ [[package]] name = "matches" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "md-5" @@ -2282,9 +2399,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" +checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" @@ -2323,9 +2440,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9753f12909fd8d923f75ae5c3258cae1ed3c8ec052e1b38c93c21a6d157f789c" dependencies = [ "migrations_internals", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -2438,7 +2555,7 @@ dependencies = [ "fixed-hash", "hex", "hex-literal", - "serde 1.0.126", + "serde 1.0.129", "serde-big-array", "thiserror", "tiny-keccak", @@ -2463,9 +2580,9 @@ checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99" dependencies = [ "proc-macro-crate", "proc-macro-error", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", "synstructure", ] @@ -2477,9 +2594,9 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "native-tls" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" +checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" dependencies = [ "lazy_static 1.4.0", "libc", @@ -2522,7 +2639,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83450fe6a6142ddd95fb064b746083fc4ef1705fe81f64a64e1d4b39f54a1055" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "cc", "cfg-if 0.1.10", "libc", @@ -2619,7 +2736,7 @@ dependencies = [ "num-iter", "num-traits 0.2.14", "rand 0.7.3", - "serde 1.0.126", + "serde 1.0.129", "smallvec", "zeroize", ] @@ -2639,9 +2756,9 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -2717,9 +2834,9 @@ dependencies = [ [[package]] name = "object" -version = "0.25.3" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7" +checksum = "ee2766204889d09937d00bfbb7fec56bb2a199e2ade963cab19185d8a6104c7c" dependencies = [ "memchr", ] @@ -2738,11 +2855,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.35" +version = "0.10.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549430950c79ae24e6d02e0b7404534ecf311d94cc9f861e9e4020187d13d885" +checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "cfg-if 1.0.0", "foreign-types", "libc", @@ -2758,18 +2875,18 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-src" -version = "111.15.0+1.1.1k" +version = "111.16.0+1.1.1l" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a5f6ae2ac04393b217ea9f700cd04fa9bf3d93fae2872069f3d15d908af70a" +checksum = "7ab2173f69416cf3ec12debb5823d244127d23a9b127d5a5189aa97c5fa2859f" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.65" +version = "0.9.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a7907e3bfa08bb85105209cdfcb6c63d109f8f6c1ed6ca318fff5c1853fbc1d" +checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82" dependencies = [ "autocfg 1.0.1", "cc", @@ -2779,6 +2896,49 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" +dependencies = [ + "async-trait", + "crossbeam-channel 0.5.1", + "futures 0.3.16", + "js-sys", + "lazy_static 1.4.0", + "percent-encoding 2.1.0", + "pin-project 1.0.8", + "rand 0.8.4", + "thiserror", + "tokio 1.10.1", + "tokio-stream", +] + +[[package]] +name = "opentelemetry-jaeger" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db22f492873ea037bc267b35a0e8e4fb846340058cb7c864efe3d0bf23684593" +dependencies = [ + "async-trait", + "lazy_static 1.4.0", + "opentelemetry", + "opentelemetry-semantic-conventions", + "thiserror", + "thrift", + "tokio 1.10.1", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffeac823339e8b0f27b961f4385057bf9f97f2863bc745bd015fd6091f2270e9" +dependencies = [ + "opentelemetry", +] + [[package]] name = "ordered-float" version = "1.1.1" @@ -2788,6 +2948,15 @@ dependencies = [ "num-traits 0.2.14", ] +[[package]] +name = "ordered-float" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "039f02eb0f69271f26abe3202189275d7aa2258b903cb0281b5de710a2570ff3" +dependencies = [ + "num-traits 0.2.14", +] + [[package]] name = "packed_simd_2" version = "0.3.5" @@ -2810,7 +2979,7 @@ dependencies = [ "data-encoding", "multihash", "percent-encoding 2.1.0", - "serde 1.0.126", + "serde 1.0.129", "static_assertions", "unsigned-varint 0.6.0", "url 2.2.2", @@ -2822,8 +2991,19 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" dependencies = [ - "lock_api", - "parking_lot_core", + "lock_api 0.3.4", + "parking_lot_core 0.7.2", +] + +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api 0.4.4", + "parking_lot_core 0.8.3", ] [[package]] @@ -2840,6 +3020,20 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "parking_lot_core" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall 0.2.10", + "smallvec", + "winapi 0.3.9", +] + [[package]] name = "path-clean" version = "0.1.0" @@ -2954,11 +3148,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7509cc106041c40a4518d2af7a61530e1eed0e6285296a3d8c5472806ccc4a4" +checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" dependencies = [ - "pin-project-internal 1.0.7", + "pin-project-internal 1.0.8", ] [[package]] @@ -2967,20 +3161,20 @@ version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] name = "pin-project-internal" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c950132583b500556b1efd71d45b319029f2b71518d979fcc208e16b42426f" +checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -3009,9 +3203,9 @@ checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" [[package]] name = "poly1305" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe800695325da85083cd23b56826fccb2e2dc29b218e7811a6f33bc93f414be" +checksum = "9fcffab1f78ebbdf4b93b68c1ffebc24037eedf271edaca795732b24e5e4e349" dependencies = [ "cpufeatures", "opaque-debug", @@ -3031,9 +3225,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e597450cbf209787f0e6de80bf3795c6b2356a380ee87837b545aded8dbc1823" +checksum = "a6ba6a405ef63530d6cb12802014b22f9c5751bd17cdcddbe9e46d5c8ae83287" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -3064,9 +3258,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", "version_check 0.9.3", ] @@ -3076,7 +3270,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", "version_check 0.9.3", ] @@ -3104,9 +3298,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038" +checksum = "5c7ed8b8c7b886ea3ed7dde405212185f423ab44682667c8c6dd14aa1d9f6612" dependencies = [ "unicode-xid 0.2.2", ] @@ -3147,9 +3341,9 @@ checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -3199,7 +3393,7 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", ] [[package]] @@ -3348,7 +3542,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da103db7f8022a2646a11e8f58de98d137089f90c3eb0bb54ed18f12ecb73b7" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "git2", "libc", "thiserror", @@ -3356,9 +3550,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" +checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" dependencies = [ "autocfg 1.0.1", "crossbeam-deque", @@ -3368,9 +3562,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" +checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" dependencies = [ "crossbeam-channel 0.5.1", "crossbeam-deque", @@ -3396,11 +3590,11 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] name = "redox_syscall" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee" +checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", ] [[package]] @@ -3410,7 +3604,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ "getrandom 0.2.3", - "redox_syscall 0.2.9", + "redox_syscall 0.2.10", ] [[package]] @@ -3460,9 +3654,9 @@ dependencies = [ "futures-core", "futures-util", "http", - "http-body", + "http-body 0.3.1", "hyper 0.13.10", - "hyper-tls", + "hyper-tls 0.4.3", "ipnet", "js-sys", "lazy_static 1.4.0", @@ -3472,10 +3666,10 @@ dependencies = [ "native-tls", "percent-encoding 2.1.0", "pin-project-lite 0.2.7", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "serde_urlencoded", - "tokio", + "tokio 0.2.25", "tokio-tls", "url 2.2.2", "wasm-bindgen", @@ -3484,6 +3678,41 @@ dependencies = [ "winreg", ] +[[package]] +name = "reqwest" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" +dependencies = [ + "base64 0.13.0", + "bytes 1.0.1", + "encoding_rs", + "futures-core", + "futures-util", + "http", + "http-body 0.4.3", + "hyper 0.14.12", + "hyper-tls 0.5.0", + "ipnet", + "js-sys", + "lazy_static 1.4.0", + "log 0.4.14", + "mime 0.3.16", + "native-tls", + "percent-encoding 2.1.0", + "pin-project-lite 0.2.7", + "serde 1.0.129", + "serde_json", + "serde_urlencoded", + "tokio 1.10.1", + "tokio-native-tls", + "url 2.2.2", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg", +] + [[package]] name = "ring" version = "0.16.20" @@ -3528,7 +3757,7 @@ checksum = "011e1d58446e9fa3af7cdc1fb91295b10621d3ac4cb3a85cc86385ee9ca50cd3" dependencies = [ "byteorder", "rmp", - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -3571,9 +3800,9 @@ checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" [[package]] name = "rustc-demangle" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dead70b0b5e03e9c814bcb6b01e03e68f7c57a80aa48c72ec92152ab3e818d49" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" [[package]] name = "rustc-hash" @@ -3589,20 +3818,20 @@ checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] name = "rustc_version" -version = "0.2.3" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" dependencies = [ - "semver 0.9.0", + "semver 0.11.0", ] [[package]] name = "rustc_version" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 0.11.0", + "semver 1.0.4", ] [[package]] @@ -3644,7 +3873,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54a50e29610a5be68d4a586a5cce3bfb572ed2c2a74227e4168444b7bf4e5235" dependencies = [ "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -3706,7 +3935,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23a2ac85147a3a11d77ecf1bc7166ec0b92febfa4461c37944e180f319ece467" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -3723,35 +3952,20 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser 0.7.0", -] - [[package]] name = "semver" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser 0.10.2", + "semver-parser", ] [[package]] name = "semver" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f3aac57ee7f3272d8395c6e4f502f434f0e289fcd62876f70daa008c20dcabe" - -[[package]] -name = "semver-parser" -version = "0.7.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" [[package]] name = "semver-parser" @@ -3770,9 +3984,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.126" +version = "1.0.129" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +checksum = "d1f72836d2aa753853178eda473a3b9d8e4eefdaf20523b919677e6de489f8f1" dependencies = [ "serde_derive", ] @@ -3783,7 +3997,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62" dependencies = [ - "serde 1.0.126", + "serde 1.0.129", "serde_derive", ] @@ -3806,30 +4020,40 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a663f873dedc4eac1a559d4c6bc0d0b2c34dc5ac4702e105014b8281489e44f" dependencies = [ - "ordered-float", - "serde 1.0.126", + "ordered-float 1.1.1", + "serde 1.0.129", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float 2.7.0", + "serde 1.0.129", ] [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.129" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +checksum = "e57ae87ad533d9a56427558b516d0adac283614e347abf85b0dc0cbbf0a249f3" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "336b10da19a12ad094b59d870ebde26a45402e5b470add4b5fd03c5048a32127" dependencies = [ "itoa", "ryu", - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -3838,9 +4062,9 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98d0516900518c29efa217c298fa1f4e6c6ffc85ae29fd7f4ee48f176e1a9ed5" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -3861,26 +4085,26 @@ dependencies = [ "form_urlencoded", "itoa", "ryu", - "serde 1.0.126", + "serde 1.0.129", ] [[package]] name = "serde_yaml" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" +checksum = "6375dbd828ed6964c3748e4ef6d18e7a175d408ffe184bca01698d0c73f915a9" dependencies = [ "dtoa", - "linked-hash-map 0.5.4", - "serde 1.0.126", + "indexmap", + "serde 1.0.129", "yaml-rust", ] [[package]] name = "sha-1" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c4cfa741c5832d0ef7fab46cabed29c2aae926db0b11bb2069edd8db5e64e16" +checksum = "1a0c8611594e2ab4ebbf06ec7cbbf0a99450b8570e96cbf5188b5d5f6ef18d81" dependencies = [ "block-buffer", "cfg-if 1.0.0", @@ -3916,9 +4140,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982" dependencies = [ "lazy_static 1.4.0", ] @@ -3968,9 +4192,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527" +checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" [[package]] name = "smallvec" @@ -3984,7 +4208,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6142f7c25e94f6fd25a32c3348ec230df9109b463f59c8c7acc4bd34936babb7" dependencies = [ - "aes-gcm 0.9.2", + "aes-gcm 0.9.3", "blake2", "chacha20poly1305", "rand 0.8.4", @@ -4006,6 +4230,16 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "socket2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "spin" version = "0.5.2" @@ -4068,9 +4302,9 @@ checksum = "7813934aecf5f51a54775e00068c237de98489463968231a51746bbbc03f9c10" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -4086,9 +4320,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6e163a520367c465f59e0a61a23cfae3b10b6546d78b6f672a382be79f7110" dependencies = [ "heck", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -4098,9 +4332,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87c85aa3f8ea653bfd3ddf25f7ee357ee4d204731f6aa9ad04002306f6e2774c" dependencies = [ "heck", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -4110,16 +4344,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e61bb0be289045cb80bfce000512e32d09f8337e54c186725da381377ad1f8d5" dependencies = [ "heck", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] name = "subtle" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "subtle-ng" @@ -4157,11 +4391,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.73" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7" +checksum = "b7f58f7e8eaa0009c5fec437aabf511bd9933e4b2d7407bd05273c01a8906ea7" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", "unicode-xid 0.2.2", ] @@ -4177,19 +4411,19 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", "unicode-xid 0.2.2", ] [[package]] name = "tari_app_grpc" -version = "0.9.0" +version = "0.9.5" dependencies = [ "chrono", "prost", @@ -4205,11 +4439,11 @@ dependencies = [ [[package]] name = "tari_app_utilities" -version = "0.9.0" +version = "0.9.5" dependencies = [ "config", "dirs-next", - "futures 0.3.15", + "futures 0.3.16", "log 0.4.14", "qrcode", "rand 0.8.4", @@ -4224,21 +4458,22 @@ dependencies = [ "tari_p2p", "tari_wallet", "thiserror", - "tokio", + "tokio 0.2.25", "tonic", ] [[package]] name = "tari_base_node" -version = "0.9.0" +version = "0.9.5" dependencies = [ "anyhow", "bincode", "chrono", "config", - "futures 0.3.15", + "futures 0.3.16", "log 0.4.14", - "log4rs", + "opentelemetry", + "opentelemetry-jaeger", "regex", "rustyline", "rustyline-derive", @@ -4257,8 +4492,11 @@ dependencies = [ "tari_shutdown", "tari_wallet", "thiserror", - "tokio", + "tokio 0.2.25", "tonic", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", ] [[package]] @@ -4274,7 +4512,7 @@ dependencies = [ "merlin", "rand 0.8.4", "rand_core 0.6.3", - "serde 1.0.126", + "serde 1.0.129", "serde_derive", "sha3", "subtle-ng", @@ -4283,7 +4521,7 @@ dependencies = [ [[package]] name = "tari_common" -version = "0.9.0" +version = "0.9.5" dependencies = [ "anyhow", "config", @@ -4291,11 +4529,13 @@ dependencies = [ "get_if_addrs", "git2", "log 0.4.14", - "log4rs", + "log4rs 1.0.0", + "opentelemetry", + "opentelemetry-jaeger", "parity-multiaddr", "path-clean", "prost-build", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "sha2", "structopt", @@ -4303,26 +4543,29 @@ dependencies = [ "tari_test_utils", "tempfile", "toml 0.5.8", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", ] [[package]] name = "tari_common_types" -version = "0.9.0" +version = "0.9.5" dependencies = [ - "futures 0.3.15", + "futures 0.3.16", "rand 0.8.4", - "serde 1.0.126", + "serde 1.0.129", "tari_crypto", - "tokio", + "tokio 0.2.25", ] [[package]] name = "tari_comms" -version = "0.9.0" +version = "0.9.5" dependencies = [ "anyhow", "async-trait", - "bitflags 1.2.1", + "bitflags 1.3.2", "blake2", "bytes 0.5.6", "chrono", @@ -4331,41 +4574,46 @@ dependencies = [ "data-encoding", "digest", "env_logger 0.7.1", - "futures 0.3.15", + "futures 0.3.16", "lazy_static 1.4.0", "lmdb-zero", "log 0.4.14", "nom 5.1.2", "openssl", + "opentelemetry", + "opentelemetry-jaeger", "parity-multiaddr", "pin-project 0.4.28", "prost", "rand 0.8.4", - "serde 1.0.126", + "serde 1.0.129", "serde_derive", "serde_json", "snow", "tari_common", + "tari_comms_rpc_macros", "tari_crypto", "tari_shutdown", "tari_storage", "tari_test_utils", "tempfile", "thiserror", - "tokio", + "tokio 0.2.25", "tokio-macros", - "tokio-util 0.2.0", + "tokio-util 0.3.1", "tower", "tower-make", + "tracing", + "tracing-futures", "yamux", ] [[package]] name = "tari_comms_dht" -version = "0.9.0" +version = "0.9.5" dependencies = [ "anyhow", - "bitflags 1.2.1", + "bitflags 1.3.2", "bytes 0.4.12", "chacha20", "chrono", @@ -4374,7 +4622,7 @@ dependencies = [ "diesel_migrations", "digest", "env_logger 0.7.1", - "futures 0.3.15", + "futures 0.3.16", "futures-test-preview", "futures-util", "lazy_static 1.4.0", @@ -4386,7 +4634,7 @@ dependencies = [ "prost", "prost-types", "rand 0.8.4", - "serde 1.0.126", + "serde 1.0.129", "serde_derive", "serde_repr", "tari_common", @@ -4399,7 +4647,7 @@ dependencies = [ "tari_utilities", "tempfile", "thiserror", - "tokio", + "tokio 0.2.25", "tokio-macros", "tokio-test", "tower", @@ -4409,32 +4657,35 @@ dependencies = [ [[package]] name = "tari_comms_rpc_macros" -version = "0.9.0" +version = "0.9.5" dependencies = [ - "futures 0.3.15", - "proc-macro2 1.0.27", + "futures 0.3.16", + "proc-macro2 1.0.28", "prost", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", "tari_comms", "tari_test_utils", - "tokio", + "tokio 0.2.25", "tokio-macros", "tower-service", ] [[package]] name = "tari_console_wallet" -version = "0.9.0" +version = "0.9.5" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "chrono", "chrono-english", "crossterm", - "futures 0.3.15", + "futures 0.3.16", "log 0.4.14", + "opentelemetry", + "opentelemetry-jaeger", "qrcode", "rand 0.8.4", + "regex", "rpassword", "rustyline", "strum", @@ -4451,8 +4702,11 @@ dependencies = [ "tari_shutdown", "tari_wallet", "thiserror", - "tokio", + "tokio 0.2.25", "tonic", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", "tui", "unicode-segmentation", "unicode-width", @@ -4460,10 +4714,10 @@ dependencies = [ [[package]] name = "tari_core" -version = "0.9.0" +version = "0.9.5" dependencies = [ "bincode", - "bitflags 1.2.1", + "bitflags 1.3.2", "blake2", "bytes 0.4.12", "chrono", @@ -4472,7 +4726,7 @@ dependencies = [ "digest", "env_logger 0.7.1", "fs2", - "futures 0.3.15", + "futures 0.3.16", "hex", "lmdb-zero", "log 0.4.14", @@ -4484,7 +4738,7 @@ dependencies = [ "prost-types", "rand 0.8.4", "randomx-rs", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "sha3", "strum_macros 0.17.1", @@ -4502,8 +4756,11 @@ dependencies = [ "tari_test_utils", "tempfile", "thiserror", - "tokio", + "tokio 0.2.25", "tokio-macros", + "tracing", + "tracing-attributes", + "tracing-futures", "ttl_cache", "uint", ] @@ -4524,7 +4781,7 @@ dependencies = [ "merlin", "rand 0.8.4", "rmp-serde", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "sha2", "sha3", @@ -4535,7 +4792,7 @@ dependencies = [ [[package]] name = "tari_infra_derive" -version = "0.9.0" +version = "0.9.5" dependencies = [ "blake2", "proc-macro2 0.4.30", @@ -4545,11 +4802,11 @@ dependencies = [ [[package]] name = "tari_key_manager" -version = "0.9.0" +version = "0.9.5" dependencies = [ "digest", "rand 0.8.4", - "serde 1.0.126", + "serde 1.0.129", "serde_derive", "serde_json", "sha2", @@ -4559,7 +4816,7 @@ dependencies = [ [[package]] name = "tari_merge_mining_proxy" -version = "0.9.0" +version = "0.9.5" dependencies = [ "anyhow", "bincode", @@ -4568,15 +4825,15 @@ dependencies = [ "config", "derive-error", "env_logger 0.7.1", - "futures 0.3.15", + "futures 0.3.16", "futures-test", "hex", "hyper 0.13.10", "jsonrpc", "log 0.4.14", "rand 0.8.4", - "reqwest", - "serde 1.0.126", + "reqwest 0.10.10", + "serde 1.0.129", "serde_json", "structopt", "tari_app_grpc", @@ -4586,7 +4843,7 @@ dependencies = [ "tari_crypto", "tari_utilities", "thiserror", - "tokio", + "tokio 0.2.25", "tokio-macros", "tonic", "tracing", @@ -4597,16 +4854,22 @@ dependencies = [ [[package]] name = "tari_mining_node" -version = "0.9.0" +version = "0.9.5" dependencies = [ + "bufstream", "chrono", "crossbeam", - "futures 0.3.15", + "futures 0.3.16", + "hex", + "jsonrpc", "log 0.4.14", + "native-tls", "num_cpus", "prost-types", "rand 0.8.4", - "serde 1.0.126", + "reqwest 0.11.4", + "serde 1.0.129", + "serde_json", "sha3", "tari_app_grpc", "tari_app_utilities", @@ -4614,13 +4877,14 @@ dependencies = [ "tari_core", "tari_crypto", "thiserror", - "tokio", + "time", + "tokio 0.2.25", "tonic", ] [[package]] name = "tari_mmr" -version = "0.9.0" +version = "0.9.5" dependencies = [ "bincode", "blake2", @@ -4629,7 +4893,7 @@ dependencies = [ "digest", "log 0.4.14", "rand 0.8.4", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "tari_crypto", "tari_infra_derive", @@ -4639,7 +4903,7 @@ dependencies = [ [[package]] name = "tari_p2p" -version = "0.9.0" +version = "0.9.5" dependencies = [ "anyhow", "bytes 0.5.6", @@ -4647,18 +4911,18 @@ dependencies = [ "clap", "env_logger 0.6.2", "fs2", - "futures 0.3.15", + "futures 0.3.16", "futures-timer", "lazy_static 1.4.0", "lmdb-zero", "log 0.4.14", - "log4rs", + "log4rs 0.8.3", "pgp", "prost", "rand 0.8.4", - "reqwest", - "semver 1.0.3", - "serde 1.0.126", + "reqwest 0.10.10", + "semver 1.0.4", + "serde 1.0.129", "serde_derive", "stream-cancel", "tari_common", @@ -4672,7 +4936,7 @@ dependencies = [ "tari_utilities", "tempfile", "thiserror", - "tokio", + "tokio 0.2.25", "tokio-macros", "tower", "tower-service", @@ -4681,17 +4945,17 @@ dependencies = [ [[package]] name = "tari_service_framework" -version = "0.9.0" +version = "0.9.5" dependencies = [ "anyhow", "async-trait", - "futures 0.3.15", + "futures 0.3.16", "futures-test", "log 0.4.14", "tari_shutdown", "tari_test_utils", "thiserror", - "tokio", + "tokio 0.2.25", "tokio-macros", "tower", "tower-service", @@ -4699,15 +4963,15 @@ dependencies = [ [[package]] name = "tari_shutdown" -version = "0.9.0" +version = "0.9.5" dependencies = [ - "futures 0.3.15", - "tokio", + "futures 0.3.16", + "tokio 0.2.25", ] [[package]] name = "tari_storage" -version = "0.9.0" +version = "0.9.5" dependencies = [ "bincode", "bytes 0.4.12", @@ -4717,22 +4981,77 @@ dependencies = [ "rand 0.8.4", "rmp", "rmp-serde", - "serde 1.0.126", + "serde 1.0.129", "serde_derive", "tari_utilities", "thiserror", ] [[package]] -name = "tari_test_utils" +name = "tari_stratum_ffi" +version = "0.0.1" +dependencies = [ + "hex", + "libc", + "serde 1.0.129", + "serde_json", + "tari_app_grpc", + "tari_common", + "tari_comms", + "tari_core", + "tari_crypto", + "tari_utilities", + "thiserror", +] + +[[package]] +name = "tari_stratum_transcoder" version = "0.9.0" dependencies = [ - "futures 0.3.15", + "bincode", + "bytes 0.5.6", + "chrono", + "config", + "derive-error", + "env_logger 0.7.1", + "futures 0.3.16", + "futures-test", + "hex", + "hyper 0.13.10", + "jsonrpc", + "log 0.4.14", + "rand 0.7.3", + "reqwest 0.10.10", + "serde 1.0.129", + "serde_json", + "structopt", + "tari_app_grpc", + "tari_common", + "tari_core", + "tari_crypto", + "tari_utilities", + "thiserror", + "tokio 0.2.25", + "tokio-macros", + "tonic", + "tonic-build", + "tracing", + "tracing-futures", + "tracing-subscriber", + "url 2.2.2", +] + +[[package]] +name = "tari_test_utils" +version = "0.9.5" +dependencies = [ + "futures 0.3.16", "futures-test", "lazy_static 1.4.0", "rand 0.8.4", + "tari_shutdown", "tempfile", - "tokio", + "tokio 0.2.25", ] [[package]] @@ -4743,19 +5062,19 @@ checksum = "22966aea452f806a83b75d59d54d34f638e48b94a1ea2b2e0efce9aacf532635" dependencies = [ "base64 0.10.1", "bincode", - "bitflags 1.2.1", + "bitflags 1.3.2", "chrono", "clear_on_drop", "newtype-ops", "rand 0.7.3", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "thiserror", ] [[package]] name = "tari_wallet" -version = "0.9.0" +version = "0.9.5" dependencies = [ "aes-gcm 0.8.0", "bincode", @@ -4767,15 +5086,15 @@ dependencies = [ "digest", "env_logger 0.7.1", "fs2", - "futures 0.3.15", + "futures 0.3.16", "lazy_static 1.4.0", "libsqlite3-sys", "lmdb-zero", "log 0.4.14", - "log4rs", + "log4rs 1.0.0", "prost", "rand 0.8.4", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "tari_common_types", "tari_comms", @@ -4791,22 +5110,22 @@ dependencies = [ "tempfile", "thiserror", "time", - "tokio", + "tokio 0.2.25", "tokio-macros", "tower", ] [[package]] name = "tari_wallet_ffi" -version = "0.17.0" +version = "0.17.4" dependencies = [ "chrono", "env_logger 0.7.1", - "futures 0.3.15", + "futures 0.3.16", "lazy_static 1.4.0", "libc", "log 0.4.14", - "log4rs", + "log4rs 1.0.0", "rand 0.8.4", "tari_common_types", "tari_comms", @@ -4821,7 +5140,7 @@ dependencies = [ "tari_wallet", "tempfile", "thiserror", - "tokio", + "tokio 0.2.25", ] [[package]] @@ -4833,7 +5152,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "rand 0.8.4", - "redox_syscall 0.2.9", + "redox_syscall 0.2.10", "remove_dir_all", "winapi 0.3.9", ] @@ -4849,15 +5168,15 @@ dependencies = [ [[package]] name = "test_faucet" -version = "0.9.0" +version = "0.9.5" dependencies = [ "rand 0.8.4", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "tari_core", "tari_crypto", "tari_utilities", - "tokio", + "tokio 0.2.25", ] [[package]] @@ -4884,9 +5203,9 @@ version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -4909,6 +5228,28 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "thrift" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +dependencies = [ + "byteorder", + "integer-encoding", + "log 0.4.14", + "ordered-float 1.1.1", + "threadpool", +] + [[package]] name = "time" version = "0.1.44" @@ -4935,15 +5276,15 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ - "serde 1.0.126", + "serde 1.0.129", "serde_json", ] [[package]] name = "tinyvec" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" dependencies = [ "tinyvec_macros", ] @@ -4977,15 +5318,52 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "tokio" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92036be488bb6594459f2e03b60e42df6f937fe6ca5c5ffdcb539c6b84dc40f5" +dependencies = [ + "autocfg 1.0.1", + "bytes 1.0.1", + "libc", + "memchr", + "mio 0.7.13", + "num_cpus", + "pin-project-lite 0.2.7", + "winapi 0.3.9", +] + [[package]] name = "tokio-macros" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio 1.10.1", +] + +[[package]] +name = "tokio-stream" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +dependencies = [ + "futures-core", + "pin-project-lite 0.2.7", + "tokio 1.10.1", ] [[package]] @@ -4996,7 +5374,7 @@ checksum = "ed0049c119b6d505c4447f5c64873636c7af6c75ab0d45fd9f618d82acb8016d" dependencies = [ "bytes 0.5.6", "futures-core", - "tokio", + "tokio 0.2.25", ] [[package]] @@ -5006,35 +5384,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio", + "tokio 0.2.25", ] [[package]] name = "tokio-util" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" +checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ "bytes 0.5.6", "futures-core", "futures-sink", "log 0.4.14", "pin-project-lite 0.1.12", - "tokio", + "tokio 0.2.25", ] [[package]] name = "tokio-util" -version = "0.3.1" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "futures-core", "futures-sink", "log 0.4.14", - "pin-project-lite 0.1.12", - "tokio", + "pin-project-lite 0.2.7", + "tokio 1.10.1", ] [[package]] @@ -5043,7 +5421,7 @@ version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" dependencies = [ - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -5052,7 +5430,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" dependencies = [ - "serde 1.0.126", + "serde 1.0.129", ] [[package]] @@ -5068,13 +5446,13 @@ dependencies = [ "futures-core", "futures-util", "http", - "http-body", + "http-body 0.3.1", "hyper 0.13.10", "percent-encoding 2.1.0", "pin-project 0.4.28", "prost", "prost-derive", - "tokio", + "tokio 0.2.25", "tokio-util 0.3.1", "tower", "tower-balance", @@ -5091,10 +5469,10 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71d8d21cb568e802d77055ab7fcd43f0992206de5028de95c8d3a41118d32e8e" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "prost-build", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] @@ -5127,7 +5505,7 @@ dependencies = [ "pin-project 0.4.28", "rand 0.7.3", "slab", - "tokio", + "tokio 0.2.25", "tower-discover", "tower-layer", "tower-load", @@ -5145,7 +5523,7 @@ checksum = "c4887dc2a65d464c8b9b66e0e4d51c2fd6cf5b3373afc72805b0a60bce00446a" dependencies = [ "futures-core", "pin-project 0.4.28", - "tokio", + "tokio 0.2.25", "tower-layer", "tower-service", "tracing", @@ -5176,7 +5554,7 @@ checksum = "92c3040c5dbed68abffaa0d4517ac1a454cd741044f33ab0eefab6b8d1361404" dependencies = [ "futures-core", "pin-project 0.4.28", - "tokio", + "tokio 0.2.25", "tower-layer", "tower-load", "tower-service", @@ -5191,7 +5569,7 @@ dependencies = [ "futures-core", "log 0.4.14", "pin-project 0.4.28", - "tokio", + "tokio 0.2.25", "tower-discover", "tower-service", ] @@ -5214,7 +5592,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce50370d644a0364bf4877ffd4f76404156a248d104e2cc234cd391ea5cdc965" dependencies = [ - "tokio", + "tokio 0.2.25", "tower-service", ] @@ -5228,7 +5606,7 @@ dependencies = [ "futures-util", "indexmap", "log 0.4.14", - "tokio", + "tokio 0.2.25", "tower-service", ] @@ -5240,7 +5618,7 @@ checksum = "e6727956aaa2f8957d4d9232b308fe8e4e65d99db30f42b225646e86c9b6a952" dependencies = [ "futures-core", "pin-project 0.4.28", - "tokio", + "tokio 0.2.25", "tower-layer", "tower-service", ] @@ -5259,7 +5637,7 @@ checksum = "9ba4bbc2c1e4a8543c30d4c13a4c8314ed72d6e07581910f665aa13fde0153c8" dependencies = [ "futures-util", "pin-project 0.4.28", - "tokio", + "tokio 0.2.25", "tokio-test", "tower-layer", "tower-service", @@ -5272,7 +5650,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "127b8924b357be938823eaaec0608c482d40add25609481027b96198b2e4b31e" dependencies = [ "pin-project 0.4.28", - "tokio", + "tokio 0.2.25", "tower-layer", "tower-service", ] @@ -5308,16 +5686,16 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", ] [[package]] name = "tracing-core" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9ff14f98b1a4b289c6248a023c1c2fa1491062964e9fed67ab29c4e4da4a052" +checksum = "2ca517f43f0fb96e0c3072ed5c275fe5eece87e8cb52f4a77b69226d3b1c9df8" dependencies = [ "lazy_static 1.4.0", ] @@ -5328,7 +5706,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.7", + "pin-project 1.0.8", "tracing", ] @@ -5343,28 +5721,41 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "599f388ecb26b28d9c1b2e4437ae019a7b336018b45ed911458cd9ebf91129f6" +dependencies = [ + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + [[package]] name = "tracing-serde" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" dependencies = [ - "serde 1.0.126", + "serde 1.0.129", "tracing-core", ] [[package]] name = "tracing-subscriber" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab69019741fca4d98be3c62d2b75254528b5432233fd8a4d2739fec20278de48" +checksum = "b9cbe87a2fa7e35900ce5de20220a582a9483a7063811defce79d7cbd59d4cfe" dependencies = [ "ansi_term 0.12.1", "chrono", "lazy_static 1.4.0", "matchers", "regex", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "sharded-slab", "smallvec", @@ -5390,7 +5781,7 @@ dependencies = [ "cfg-if 1.0.0", "chrono", "data-encoding", - "futures 0.3.15", + "futures 0.3.16", "lazy_static 1.4.0", "log 0.4.14", "radix_trie", @@ -5398,7 +5789,7 @@ dependencies = [ "ring", "rustls", "thiserror", - "tokio", + "tokio 0.2.25", "trust-dns-proto", "webpki", ] @@ -5414,7 +5805,7 @@ dependencies = [ "cfg-if 1.0.0", "data-encoding", "enum-as-inner", - "futures 0.3.15", + "futures 0.3.16", "idna 0.2.3", "lazy_static 1.4.0", "log 0.4.14", @@ -5422,7 +5813,7 @@ dependencies = [ "ring", "smallvec", "thiserror", - "tokio", + "tokio 0.2.25", "url 2.2.2", ] @@ -5456,7 +5847,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2eaeee894a1e9b90f80aa466fe59154fdb471980b5e104d8836fcea309ae17e" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", "cassowary", "crossterm", "unicode-segmentation", @@ -5533,12 +5924,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" -dependencies = [ - "matches", -] +checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" [[package]] name = "unicode-normalization" @@ -5581,9 +5969,9 @@ checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "universal-hash" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ "generic-array", "subtle", @@ -5704,36 +6092,36 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54ee1d4ed486f78874278e63e4069fc1ab9f6a18ca492076ffb90c5eb2997fd" +checksum = "8ce9b1b516211d33767048e5d47fa2a381ed8b76fc48d2ce4aa39877f9f183e0" dependencies = [ "cfg-if 1.0.0", - "serde 1.0.126", + "serde 1.0.129", "serde_json", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b33f6a0694ccfea53d94db8b2ed1c3a8a4c86dd936b13b9f0a15ec4a451b900" +checksum = "cfe8dc78e2326ba5f845f4b5bf548401604fa20b1dd1d365fb73b6c1d6364041" dependencies = [ "bumpalo", "lazy_static 1.4.0", "log 0.4.14", - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fba7978c679d53ce2d0ac80c8c175840feb849a161664365d1287b41f2e67f1" +checksum = "95fded345a6559c2cfee778d562300c581f7d4ff3edb9b0d230d69800d213972" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -5743,9 +6131,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "088169ca61430fe1e58b8096c24975251700e7b1f6fd91cc9d59b04fb9b18bd4" +checksum = "44468aa53335841d9d6b6c023eaab07c0cd4bddbcfdee3e2bb1e8d2cb8069fef" dependencies = [ "quote 1.0.9", "wasm-bindgen-macro-support", @@ -5753,28 +6141,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be2241542ff3d9f241f5e2cb6dd09b37efe786df8851c54957683a49f0987a97" +checksum = "0195807922713af1e67dc66132c7328206ed9766af3858164fb583eedc25fbad" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.74" +version = "0.2.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7cff876b8f18eed75a66cf49b65e7f967cb354a7aa16003fb55dbfd25b44b4f" +checksum = "acdb075a845574a1fa5f09fd77e43f7747599301ea3417a9fbffdeedfc1f4a29" [[package]] name = "web-sys" -version = "0.3.51" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e828417b379f3df7111d3a2a9e5753706cae29c41f7c4029ee9fd77f3e09e582" +checksum = "224b2f6b67919060055ef1a67807367c2066ed520c3862cc013d26cf893a783c" dependencies = [ "js-sys", "wasm-bindgen", @@ -5883,15 +6271,15 @@ dependencies = [ [[package]] name = "yamux" -version = "0.4.7" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd37e58a1256a0b328ce9c67d8b62ecdd02f4803ba443df478835cb1a41a637c" +checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" dependencies = [ - "futures 0.3.15", + "futures 0.3.16", "log 0.4.14", "nohash-hasher", - "parking_lot", - "rand 0.7.3", + "parking_lot 0.11.1", + "rand 0.8.4", "static_assertions", ] @@ -5910,8 +6298,8 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" dependencies = [ - "proc-macro2 1.0.27", + "proc-macro2 1.0.28", "quote 1.0.9", - "syn 1.0.73", + "syn 1.0.75", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 492ae70810..2394f2eb9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,17 +9,19 @@ members = [ "base_layer/service_framework", "base_layer/wallet", "base_layer/wallet_ffi", + "base_layer/tari_stratum_ffi", "comms", "comms/dht", "comms/rpc_macros", "infrastructure/shutdown", "infrastructure/storage", "infrastructure/test_utils", - "applications/installer", + # "applications/installer", "applications/tari_base_node", "applications/tari_console_wallet", "applications/test_faucet", "applications/tari_app_utilities", "applications/tari_merge_mining_proxy", + "applications/tari_stratum_transcoder", "applications/tari_mining_node", ] diff --git a/README.md b/README.md index f93e3825ce..20678f7f7e 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,10 @@ A number of applications have been developed by the Tari community to implement - Tari Console Wallet - Tari Mining Node - Tari Merge Mining Proxy +- Tari Stratum Transcoder - Tari Aurora wallets for Android and iOS -Only the first four applications will be discussed in this README. +Only the first four applications will be discussed in this README (see [wallet-android](https://github.com/tari-project/wallet-android) and [wallet-ios](https://github.com/tari-project/wallet-ios) for mobile wallets' repos). ## Installing using binaries @@ -41,7 +42,7 @@ Assuming you want to install the Tari applications into your home folder, then, cd ~ tar -xf -After this, the Tari applications will be located in `~/tari_stibbons_testnet` with a selection of +After this, the Tari applications will be located in `~/tari_weatherwax_testnet` with a selection of soft links to run them. #### On Windows @@ -406,15 +407,15 @@ Default docker builds for base x86-64 CPU. Better performing builds can be creat ## Mining -The Tari protocol supports hybrid mining; stand-alone SHA3 mining using the Tari Mining Node or merged mining with -Monero using the Tari Merge Mining Proxy in conjunction with XMRig (RandomX based mining). Blocks to be won by -stand-alone SHA3 mining has been apportioned to approximately 40% and with Monero merged mining to approximately 60%. +The Tari protocol supports hybrid mining; stand-alone or pooled SHA3 mining using the Tari Mining Node or merged mining with +Monero using the Tari Merge Mining Proxy in conjunction with XMRig (RandomX-based mining). Blocks to be won by +stand-alone and pooled SHA3 mining has been apportioned to approximately 40% and with Monero merged mining to approximately 60%. This apportionment is deeply baked into the Tari protocol and part of the consensus rules. The 40/60 split is determined by slightly different block target times for each algorithm, that when combined will give an average block time of approximately 120s. Each mining algorithms make use of Linear Weighted Moving Average (LWMA) maths to gracefully adjust the target difficulties to adhere to the respective target block times. Any block won by either mining algorithm will be accepted, and when there is a tie a geometric mean calculation will be used to decide the winner. This system is -completely fair without any additional empirical meddling to try and force a certain outcome. +completely fair without any additional empirical meddling to try force a certain outcome. ### Tari SHA3 mining @@ -424,24 +425,38 @@ In order to perform SHA3 mining with Tari, the following applications are needed - A Tari Console Wallet [_to collect the Tari block rewards (coinbase transactions)_]; - A Tari Mining Node [_to perform the mining_]; +In order to perform pooled SHA3 mining with Tari, the following applications are needed: + +* For a pool operator: + - A Tari Base Node [_to supply blockchain metadata information_]; + - A Tari Console Wallet [_to collect the Tari block rewards (coinbase transactions)_]; + - A Tari Stratum Transocder [_to translate and route requests by the pool to the Tari Base Node and the Tari Console Wallet_]; + - Miningcore [_pool software supporting various cryptocurrencies, configured for Tari_] + +* For a miner: + - A Tari Console Wallet [_to collect the share rewards (pool payouts)_]; + - A Tari Mining Node [_to perform the mining_]; + #### Runtime prerequisites -The Tari Base Node, Tari Console Wallet and Tari Mining Node can all run in the same directory. By performing the +The Tari Base Node, Tari Console Wallet, Tari Stratum Transcoder and Tari Mining Node can all run in the same directory. By performing the default installation as described in [Installing using binaries](#installing-using-binaries), all these applications will be available. +For MiningCore see [here](https://github.com/StriderDM/miningcore/tree/tari#runtime-requirements-on-linux) and [here](https://github.com/StriderDM/miningcore/tree/tari#runtime-requirements-on-windows). + #### Configuration prerequisites -The configuration prerequisites are the same for all three Tari applications. After performing a +The configuration prerequisites are the same for all four Tari applications. After performing a [default installation](#installing-using-binaries), locate the main configuration file (`config.toml`), which -will be created in the `~/tari_stibbons_testnet/config` (on Linux) or `%USERPROFILE%\.tari-testnet\config` (on Windows) +will be created in the `~/tari_weatherwax_testnet/config` (on Linux) or `%USERPROFILE%\.tari-testnet\config` (on Windows) directory. With the main configuration file, in addition to the settings already present, the following must also be enabled for -the Tari Base Node and the Tari Console Wallet, if they are not enabled already. Under section **`base_node.stibbons`**: +the Tari Base Node and the Tari Console Wallet, if they are not enabled already. Under section **`base_node.weatherwax`**: ``` -[base_node.stibbons] +[base_node.weatherwax] transport = "tor" allow_test_addresses = false grpc_enabled = true @@ -449,8 +464,20 @@ grpc_base_node_address = "127.0.0.1:18142" grpc_console_wallet_address = "127.0.0.1:18143" ``` -For the Tari Mining Node there are some additional settings under section **`mining_node`** that can be changed: +For Tari Stratum Transcoder: +``` +[stratum_transcoder] + +# Address of the tari_stratum_transcoder application +transcoder_host_address = "127.0.0.1:7879" +``` + +For MiningCore: +See example configuration [here](https://github.com/StriderDM/miningcore/blob/tari/examples/tari_pool.json). + +For the Tari Mining Node there are some additional settings under section **`mining_node`** that can be changed: +* For SHA3 Mining: ``` [mining_node] # Number of mining threads @@ -477,32 +504,48 @@ For the Tari Mining Node there are some additional settings under section **`min #validate_tip_timeout_sec=30 ``` -#### Perform SHA3 mining +For pooled SHA3 mining: +``` +[mining_node] +# Number of mining threads +# Default: number of logical CPU cores +#num_mining_threads=8 -Tor and the required Tari applications must be started and preferably in this order: +# Stratum Mode configuration +# mining_pool_address = "miningcore.tarilabs.com:3052" +# mining_wallet_address = "YOUR_WALLET_PUBLIC_KEY" +# mining_worker_name = "worker1" +``` +Uncomment `mining_pool_address` and `mining_wallet_address`. Adjust the values to your intended configuration. +`mining_worker_name` is an optional configuration field allowing you to name your worker. -- Tor: +#### Perform SHA3 mining +* For SHA3 mining: + Tor and the required Tari applications must be started and preferably in this order: - - Linux/OSX: Execute `start_tor.sh`. - - Windows: `Start Tor Serviecs` menu item or `start_tor` shortcut in the Tari installation folder. + - Tor: -- Tari Base Node: + - Linux/OSX: Execute `start_tor.sh`. + - Windows: `Start Tor Serviecs` menu item or `start_tor` shortcut in the Tari installation folder. - - Linux/OSX: As per [Runtime links](#runtime-links). - - Windows: As per [Runtime links](#runtime-links) or `Start Base Node` menu item or + - Tari Base Node: + + - Linux/OSX: As per [Runtime links](#runtime-links). + - Windows: As per [Runtime links](#runtime-links) or `Start Base Node` menu item or `start_tari_base_node` shortcut in the Tari installation folder. -- Tari Console Wallet: + - Tari Console Wallet: - - Linux/OSX: As per [Runtime links](#runtime-links). - - Windows: As per [Runtime links](#runtime-links) or `Start Console Wallet` menu item or + - Linux/OSX: As per [Runtime links](#runtime-links). + - Windows: As per [Runtime links](#runtime-links) or `Start Console Wallet` menu item or `start_tari_console_wallet` shortcut in the Tari installation folder. -- Tari Mining Node: - - Linux/OSX: As per [Runtime links](#runtime-links). - - Windows: As per [Runtime links](#runtime-links) or `Start Mining Node` menu item + - Tari Mining Node: + - Linux/OSX: As per [Runtime links](#runtime-links). + - Windows: As per [Runtime links](#runtime-links) or `Start Mining Node` menu item or `start_tari_mining_node` shortcut in the Tari installation folder. + Look out for the following types of messages on the Tari Mining Node console to confirm that it is connected properly and performing mining: @@ -519,6 +562,33 @@ and performing mining: pow_data: [] }), kernel_mmr_size: 24983, output_mmr_size: 125474 } with difficulty 7316856839 ``` +* For pooled SHA3 Mining: + * Pool Operators: + Tor and the required Tari applications must be started in this order: + - Tor: + + - Linux/OSX: Execute `start_tor.sh`. + - Windows: `Start Tor Serviecs` menu item or `start_tor` shortcut in the Tari installation folder. + + - Tari Base Node: + + - Linux/OSX: As per [Runtime links](#runtime-links). + - Windows: As per [Runtime links](#runtime-links) or `Start Base Node` menu item or + `start_tari_base_node` shortcut in the Tari installation folder. + + - Tari Console Wallet: + + - Linux/OSX: As per [Runtime links](#runtime-links). + - Windows: As per [Runtime links](#runtime-links) or `Start Console Wallet` menu item or + `start_tari_console_wallet` shortcut in the Tari installation folder. + - Tari Stratum Transcoder + - MiningCore + * Miners: + - Tari Mining Node: + - Linux/OSX: As per [Runtime links](#runtime-links). + - Windows: As per [Runtime links](#runtime-links) or `Start Mining Node` menu item + or `start_tari_mining_node` shortcut in the Tari installation folder. + ### Tari merge mining In order to perform merge mining with Tari, the following applications are needed: @@ -548,15 +618,15 @@ XMRig can also be build from sources. If that is your preference, follow these i The configuration prerequisites are the same for all three Tari applications. After performing a [default installation](#installing-using-binaries), locate the main configuration file (`config.toml`), which -will be created in the `~/tari_stibbons_testnet/config` (on Linux) or `%USERPROFILE%\.tari-testnet\config` (on Windows) +will be created in the `~/tari_weatherwax_testnet/config` (on Linux) or `%USERPROFILE%\.tari-testnet\config` (on Windows) directory. With the main configuration file, in addition to the settings already present, the following must also be enabled if they are not enabled already: -- For the Tari Base Node and the Tari Console Wallet, under section **`base_node.stibbons`** +- For the Tari Base Node and the Tari Console Wallet, under section **`base_node.weatherwax`** ``` - [base_node.stibbons] + [base_node.weatherwax] transport = "tor" allow_test_addresses = false grpc_enabled = true @@ -568,9 +638,9 @@ And then depending on if you are using solo mining or self-select mining you wil ###### Solo mining -- For the Tari Merge Mining Proxy, under section **`merge_mining_proxy.stibbons`** +- For the Tari Merge Mining Proxy, under section **`merge_mining_proxy.weatherwax`** ``` - [merge_mining_proxy.stibbons] + [merge_mining_proxy.weatherwax] monerod_url = "http://monero-stagenet.exan.tech:38081" proxy_host_address = "127.0.0.1:7878" proxy_submit_to_origin = true @@ -581,9 +651,9 @@ And then depending on if you are using solo mining or self-select mining you wil ###### Self-Select mining -- For the Tari Merge Mining Proxy, under section **`merge_mining_proxy.stibbons`** +- For the Tari Merge Mining Proxy, under section **`merge_mining_proxy.weatherwax`** ``` - [merge_mining_proxy.stibbons] + [merge_mining_proxy.weatherwax] monerod_url = "http://18.132.124.81:18081" proxy_host_address = "127.0.0.1:7878" proxy_submit_to_origin = false diff --git a/RFC/src/RFC-0110_BaseNodes.md b/RFC/src/RFC-0110_BaseNodes.md index 8ab859810a..cf2d6cc25f 100644 --- a/RFC/src/RFC-0110_BaseNodes.md +++ b/RFC/src/RFC-0110_BaseNodes.md @@ -110,18 +110,19 @@ validated and either stored or rejected. The transaction is validated as follows: -* All inputs to the transaction are valid [UTXO]s. +* All inputs to the transaction are valid [UTXO]s in the [UTXO] set or are outputs in the current block. * No inputs are duplicated. * All inputs are able to be spent (they are not time-locked). * All inputs are signed by their owners. * All outputs have valid [range proof]s. -* No outputs currently exist in the [UTXO] set. +* No outputs currently exist in the current [UTXO] set. * The transaction does not have [timelocks] applied, limiting it from being mined and added to the blockchain before a specified block height or timestamp has been reached. * The transaction excess has a valid signature. * The [transaction weight] does not exceed the maximum permitted in a single block as defined by consensus. * The transaction excess is a valid public key. This proves that: $$ \Sigma \left( \mathrm{inputs} - \mathrm{outputs} - \mathrm{fees} \right) = 0 $$. +* The transaction excess has a unique value across the whole chain. * The [Tari script] of each input must execute successfully and return the public key that signs the script signature. * The script offset \\( \so\\) is calculated and verified as per [RFC-0201_TariScript]. @@ -154,6 +155,7 @@ When a new block is received, it is passed to the block validation service. The greater than the current chain tip for some preconfigured period. * The sum of all excesses is a valid public key. This proves that: $$ \Sigma \left( \mathrm{inputs} - \mathrm{outputs} - \mathrm{fees} \right) = 0$$. +* That all kernel excess values are unique for that block and the entire chain. * Check if a block contains already spent outputs, reject that block. * The [Tari script] of every input must execute successfully and return the public key that signs the script signature. * The script offset \\( \so\\) is calculated and verified as per [RFC-0201_TariScript]. This prevents [cut-through] from diff --git a/RFC/src/RFC-0120_Consensus.md b/RFC/src/RFC-0120_Consensus.md index a9919efc94..299eab7360 100644 --- a/RFC/src/RFC-0120_Consensus.md +++ b/RFC/src/RFC-0120_Consensus.md @@ -77,11 +77,14 @@ Every [block] MUST: * be in a canonical order (see [Transaction ordering]) * each [transaction output] MUST: * have a unique hash (`features || commitment || script`) + * have a unique commitment in the current [UTXO] set * be in a canonical order (see [Transaction ordering]) * have a valid [range proof] * have a valid [metadata signature] * have a valid script offset (\\gamma), as per [RFC-0201_TariScript](./RFC-0201_TariScript.md). -* each [transaction kernel] MUST have a valid kernel signature +* each [transaction kernel] MUST + * have a valid kernel signature + * have a unique excess * the transaction commitments and kernels MUST balance, as follows: $$ diff --git a/RFC/src/RFC-0151_StagedWalletSecurity.md b/RFC/src/RFC-0151_StagedWalletSecurity.md new file mode 100644 index 0000000000..f2909a1f0b --- /dev/null +++ b/RFC/src/RFC-0151_StagedWalletSecurity.md @@ -0,0 +1,188 @@ +# RFC-0151/StagedWalletSecurity + +## Staged Wallet Security + +![status: draft](https://github.com/tari-project/tari/raw/master/RFC/src/theme/images/status-draft.svg) + +**Maintainer(s)**: [Yuko Roodt](https://github.com/neonknight64), [Cayle Sharrock](https://github.com/CjS77) + +# Licence + +[The 3-Clause BSD Licence](https://opensource.org/licenses/BSD-3-Clause). + +Copyright 2021 The Tari Development Community + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +following conditions are met: + +1. Redistributions of this document must retain the above copyright notice, this list of conditions and the following + disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS DOCUMENT IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +## Language + +The keywords "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", +"NOT RECOMMENDED", "MAY" and "OPTIONAL" in this document are to be interpreted as described in +[BCP 14](https://tools.ietf.org/html/bcp14) (covering RFC2119 and RFC8174) when, and only when, they appear in all capitals, as +shown here. + +## Disclaimer + +This document and its content are intended for information purposes only and may be subject to change or update +without notice. + +This document may include preliminary concepts that may or may not be in the process of being developed by the Tari +community. The release of this document is intended solely for review and discussion by the community of the +technological merits of the potential system outlined herein. + +## Goals + +This Request for Comment (RFC) aims to describe Tari's ergonomic approach to securing funds in a hot wallet. +The focus is on mobile wallets, but the strategy described here is equally applicable to console or desktop wallets. + +## Related Requests for Comment + +* [RFC-0150: Wallets](RFC-0150_Wallets.md) + +## Description + +### Rationale + +A major UX hurdle when users first interact with a crypto wallet is the friction they experience with the first user +experience. + +A common theme: I want to play with some new wallet X that I saw advertised somewhere, so I download it and run it. +But first I get several screens that + +* ask me to review my seed phrase, +* ask me to write down my seed phrase, +* prevent typical "skip this" tricks like taking a screenshot, +* ask to confirm if I've written down my seed phrase, +* force me to write a test, either by supplying a random sample of my seed phrase, or by getting me to type in the +whole thing. + +After all this, I play with the wallet a bit, and then typically, I uninstall it. + +The goal of this RFC is to **get the user playing with the wallet as quickly as possible**. _Without_ sacrificing +security whatsoever. + +### A staged approach + +This RFC proposes a smart, staged approach to wallet security. One that maximises user experience without compromising +safety. + +Each step enforces more stringent security protocols on the user than the previous step. + +The user moves from one step to another based on criteria that + +1. the user configures based on her preferences, or +2. uses sane predefined defaults. + +The criteria are generally based on the value of the wallet balance. + +Once a user moves to a stage, the wallet does _not_ move to a lower stage if the requirements for the stage are no longer +met. + +Users may also jump to any more advanced stage from their wallet settings / configuration at any time. + +#### Stage zero - zero balance + +When the user has a _zero balance_, there's no risk in letting them skip securing their wallet. + +Therefore, Tari wallets SHOULD just skip the whole seed phrase ritual and let the user jump right into the action. + +#### Stage 1a - a reminder to write down your seed phrase + +Once the user's balance exceeds the `MINIMUM_STAGE_ONE_BALANCE`, they will be prompted to review and write down their +seed phrase. The `MINIMUM_STAGE_ONE_BALANCE` is any non-zero balance by default. + +After the transaction that causes the balance to exceed `MINIMUM_STAGE_ONE_BALANCE` is confirmed, the user is presented +with a friendly message: "You now have _real_ money in your wallet. If you accidentally delete your wallet app or lose +your device, your funds are lost, and there is no way to recover them unless you have safely kept a copy of your +`seed phrase` safe somewhere. Click 'Ok' to review and save the phrase now, or 'Do it later' to do it at a more +convenient time". + +If the user elects _not_ to save the phrase, the message pops up again periodically. Once per day, or when the balance +increases -- whichever is less frequent -- is sufficient without being too intrusive. + + +#### Stage 1b - simple wallet backups + +Users are used to storing their data in the cloud. Although this practice is frowned upon by crypto purists, for small +balances (the type you often keep in a hot wallet), using secure cloud storage for wallet backups is a fair +compromise between keeping the keys safe from attackers and protecting users from themselves. + +The simple wallet backup saves the spending keys and values of the user's wallet to a personal cloud space (e.g. Google Drive, +Apple iCloud, Dropbox). + +This solution does not require any additional input from the user besides providing authorisation to store in the cloud. +This can be done using the standard APIs and Authentication flows that each cloud provider publishes for their platform. + +In particular, we do not ask for a password to encrypt the commitment data. The consequence is that anyone who gains +access to this data -- by stealing the user's cloud credentials -- _could_ steal the user's funds. + +Therefore, the threshold for moving from this stage to Stage 3, `STAGE_TWO_THRESHOLD_BALANCE` is relatively low; +somewhere in the region of $10 to $50. + +The seed phrase MUST NOT be stored on the cloud in Stage 1b. Doing so would result in all _future_ funds of the user being +lost if the backup were ever compromised. Since the backup is unencrypted in Stage 1b, we store the minimum amount of data +needed to recover the funds and limit the potential loss of funds in case of a breach to just that found in the commitments in the +backup, which should not be more than $50. + +Backups MUST be authorised by the user when the first cloud backup is made and SHOULD be automatically updated after each +transaction is confirmed. + +Restoring a wallet from Stage 1b entails _importing_ the UTXO commitments into the user's current wallet. + +Wallet authors MAY choose to exclude Stage 1b from the staged security protocol. + +As usual, the user MUST be able to configure `STAGE_TWO_THRESHOLD_BALANCE` to suit their particular needs. + + +#### Stage 2 - full wallet backups + +Once a user has a significant balance (over `STAGE_TWO_THRESHOLD_BALANCE`), Stage 2 is active. Stage 2 entails a full, +encrypted backup of the user's wallet to the cloud. The user needs to provide a **password** to perform and secure the encryption. + +This makes the user's fund safer while at rest in the cloud. It also introduces an additional point of failure: the user +can forget their wallet's encryption password. + +Stage 1b and 2 are similar in functionality but different in scope (Stage 2 allows us to store all the wallet +metadata, rather than just the commitments). For this reason, Stage 1b is optional. + +Backups MUST be authorised by the user when the first cloud backup is made and SHOULD be automatically updated after each +transaction. + +When migrating from Stage 1 to Stage 2, the Stage 1b backups SHOULD be deleted. + + +#### Stage 3 - Sweep to cold wallet + +Above a given limit -- user-defined, or the default `MAX_HOT_WALLET_BALANCE`, the user should be prompted to transfer +funds into a cold wallet. The amount to sweep can be calculated as `MAX_HOT_WALLET_BALANCE` - `SAFE_HOT_WALLET_BALANCE`. + +If the user ignores the prompt, they SHOULD be reminded one week later. From the second prompt onward, users SHOULD be given +an option to re-configure the values for `MAX_HOT_WALLET_BALANCE` and `SAFE_HOT_WALLET_BALANCE`. + +Assuming one-sided payments are live, the user SHOULD be able to configure a `COLD_WALLET_ADDRESS` in the wallet. + +For security reasons, a user SHOULD be asked for their 2FA confirmation, if it is configured, before broadcasting the +sweep transaction to the blockchain. + +#### Security hygiene + +* From stage 1 onwards Users should be asked periodically whether they still have their seed phrase written down. + Once every two months is sufficient. + + diff --git a/applications/daily_tests/.gitignore b/applications/daily_tests/.gitignore new file mode 100644 index 0000000000..c8b8c7fbd2 --- /dev/null +++ b/applications/daily_tests/.gitignore @@ -0,0 +1,3 @@ +./node_modules +temp/ +logs/ diff --git a/applications/daily_tests/.prettierrc b/applications/daily_tests/.prettierrc new file mode 100644 index 0000000000..168d9d2a0c --- /dev/null +++ b/applications/daily_tests/.prettierrc @@ -0,0 +1,3 @@ +{ + "endOfLine": "auto" +} diff --git a/applications/daily_tests/automatic_recovery_test.js b/applications/daily_tests/automatic_recovery_test.js new file mode 100644 index 0000000000..a87fa7709b --- /dev/null +++ b/applications/daily_tests/automatic_recovery_test.js @@ -0,0 +1,140 @@ +const fs = require("fs/promises"); +const yargs = require("yargs"); +const path = require("path"); +const helpers = require("./helpers"); +const WalletProcess = require("integration_tests/helpers/walletProcess"); + +const RECOVERY_COMPLETE_REGEXP = /Recovery complete! Scanned = (\d+) in/; +const RECOVERY_WORTH_REGEXP = /worth ([0-9\.]+) (µ?T)/; +const FAILURE_REGEXP = + /Attempt (\d+)\/(\d+): Failed to complete wallet recovery/; + +async function main() { + const argv = yargs + .option("seed-words", { + alias: "sw", + description: "Seed words to use during recovery", + type: "string", + default: + "pigeon marble letter canal hard close kit cash coin still melt random require long shaft antenna tent turkey neck divert enrich iron analyst abandon", + }) + .option("log", { + alias: "l", + description: "output logs to this file", + type: "string", + default: "logs/wallet.log", + }) + .option("num-wallets", { + alias: "n", + description: "The number of times a wallet instance is recovered", + type: "integer", + default: 1, + }) + .help() + .alias("help", "h").argv; + + for (let i = 0; i < argv.numWallets; i++) { + let { identity, timeDiffMinutes, height, blockRate, recoveredAmount } = + await run(argv); + + console.log( + "Wallet (Pubkey:", + identity.public_key, + ") recovered to a block height of", + height, + "completed in", + timeDiffMinutes, + "minutes (", + blockRate, + "blocks/min).", + recoveredAmount, + "µT recovered for instance ", + i, + "." + ); + } +} + +async function run(options = {}) { + const wallet = new WalletProcess( + "compile", + true, + { + transport: "tor", + network: "weatherwax", + grpc_console_wallet_address: "127.0.0.1:18111", + baseDir: options.baseDir || "./temp/base-nodes/", + }, + false, + options.seedWords + ); + + await wallet.startNew(); + + let startTime = new Date(); + + await fs.mkdir(path.dirname(options.log), { recursive: true }); + let logfile = await fs.open(options.log, "w+"); + + try { + let recoveryResult = await helpers.monitorProcessOutput({ + process: wallet.ps, + outputStream: logfile, + onData: (data) => { + let successLog = data.match(RECOVERY_COMPLETE_REGEXP); + let recoveredAmount = data.match(RECOVERY_WORTH_REGEXP); + if (successLog && recoveredAmount) { + let recoveredAmount = parseInt(recoveredAmount[1]); + if (recoveredAmount[2] === "T") { + // convert to micro tari + recoveredAmount *= 1000000; + } + return { + height: parseInt(height[1]), + recoveredAmount: parseInt(recoveredAmount[1]), + }; + } + + let errMatch = data.match(FAILURE_REGEXP); + // One extra attempt + if (errMatch && parseInt(errMatch[1]) > 1) { + throw new Error(data); + } + + return null; + }, + }); + + let endTime = new Date(); + const timeDiffMs = endTime - startTime; + const timeDiffMinutes = timeDiffMs / 60000; + + let client = await wallet.connectClient(); + let id = await client.identify(); + + await wallet.stop(); + + const block_rate = recoveryResult.height / timeDiffMinutes; + + return { + identity: id, + height: recoveryResult.height, + timeDiffMinutes, + blockRate: block_rate.toFixed(2), + recoveredAmount: recoveryResult.recoveredAmount, + }; + } catch (err) { + await wallet.stop(); + throw err; + } +} + +if (require.main === module) { + Promise.all([main()]); +} else { + module.exports = Object.assign(run, { + RECOVERY_COMPLETE_REGEXP, + RECOVERY_WORTH_REGEXP, + FAILURE_REGEXP, + }); +} diff --git a/applications/daily_tests/automatic_sync_test.js b/applications/daily_tests/automatic_sync_test.js new file mode 100644 index 0000000000..72752cadd8 --- /dev/null +++ b/applications/daily_tests/automatic_sync_test.js @@ -0,0 +1,123 @@ +const fs = require("fs/promises"); +const yargs = require("yargs"); +const path = require("path"); +const helpers = require("./helpers"); +const BaseNodeProcess = require("integration_tests/helpers/baseNodeProcess"); + +const SyncType = { + Archival: "Archival", + Pruned: "Pruned", +}; + +async function main() { + const argv = yargs + .option("sync-type", { + type: "string", + description: "Specify the sync type.", + default: SyncType.Archival, + choices: [SyncType.Archival, SyncType.Pruned], + }) + .option("force-sync-peer", { + type: "number", + description: "Enable force sync to peer_seeds i-th node.", + }) + .option("log", { + alias: "l", + description: "output logs to this file", + type: "string", + default: "./logs/base-node.log", + }) + .help() + .alias("help", "h").argv; + try { + const { blockHeight, timeDiffMinutes, blockRate, forcedSyncPeer } = + await run(argv); + + console.log( + `${argv.syncType} sync ${ + forcedSyncPeer ? "forced to " + forcedSyncPeer : "" + } to block height ${blockHeight} took ${timeDiffMinutes} minutes for a rate of ${blockRate} blocks/min` + ); + } catch (err) { + console.log(err); + } +} + +async function run(options) { + let forcedSyncPeer; + const baseNode = new BaseNodeProcess("compile", true); + await baseNode.init(); + + // Set pruning horizon in config file if `pruned` command line arg is present + if (options.syncType === SyncType.Pruned) { + process.env.TARI_BASE_NODE__WEATHERWAX__PRUNING_HORIZON = 1000; + } + + // Check if we have a forced peer index. + if (options.forceSyncPeer !== undefined) { + const config = ( + await fs.readFile(baseNode.baseDir + "/config/config.toml") + ).toString(); + const peer = Array.from( + config.match(/\npeer_seeds = \[(.*?)\]/s)[1].matchAll(/\n[^#]*"(.*?)"/g), + (m) => m[1] + )[options.forceSyncPeer]; + if (peer === undefined) { + // Check if index is within bounds of peer_seeds from config. + throw "Forced index out of bounds"; + } + process.env.TARI_BASE_NODE__WEATHERWAX__FORCE_SYNC_PEERS = [peer]; + forcedSyncPeer = peer; + } + + await baseNode.start(); + + await fs.mkdir(path.dirname(options.log), { recursive: true }); + let logfile = await fs.open(options.log, "w+"); + let startTime = new Date(); + + try { + let syncResult = await helpers.monitorProcessOutput({ + process: baseNode.ps, + outputStream: logfile, + onData: (data) => { + // 13:50 v0.9.3, State: Listening, Tip: 20515 (Wed, 18 Aug 2021 08:17:25 +0000), Mempool: 2tx (60g, +/- 1blks), Connections: 17, Banned: 0, Messages (last 60s): 36, Rpc: 3/1000 sessions + let match = data.match(/Tip: (\d+) \(/); + if (!match) { + return null; + } + let height = parseInt(match[1]); + + if (height > 0 && data.toUpperCase().match(/STATE: LISTENING/)) { + return { height }; + } + }, + }); + + let endTime = new Date(); + const timeDiffMs = endTime - startTime; + const timeDiffMinutes = timeDiffMs / 60000; + const blockRate = syncResult.height / timeDiffMinutes; + + await baseNode.stop(); + + return { + blockRate: blockRate.toFixed(2), + timeDiffMinutes: timeDiffMinutes.toFixed(2), + blockHeight: syncResult.height, + forcedSyncPeer, + }; + } catch (err) { + await baseNode.stop(); + throw err; + } +} + +if (require.main === module) { + Promise.all([main()]); +} else { + module.exports = run; + Object.assign(module.exports, { + SyncType, + }); +} diff --git a/applications/daily_tests/cron_jobs.js b/applications/daily_tests/cron_jobs.js new file mode 100644 index 0000000000..f97f1db5e9 --- /dev/null +++ b/applications/daily_tests/cron_jobs.js @@ -0,0 +1,129 @@ +const { CronJob } = require("cron"); +const fs = require("fs/promises"); +const { + sendWebhookNotification, + getWebhookUrl, + readLastNLines, + emptyFile, +} = require("./helpers"); +const walletRecoveryTest = require("./automatic_recovery_test"); +const baseNodeSyncTest = require("./automatic_sync_test"); +const { SyncType } = require("./automatic_sync_test"); + +const WEBHOOK_CHANNEL = "protocol-bot-stuff"; + +function failed(message) { + console.error(message); + if (!!getWebhookUrl()) { + sendWebhookNotification(WEBHOOK_CHANNEL, `🚨 ${message}`); + } + process.exit(1); +} + +function notify(message) { + console.log(message); + if (!!getWebhookUrl()) { + sendWebhookNotification(WEBHOOK_CHANNEL, message); + } +} + +async function runWalletRecoveryTest(instances) { + notify("🚀 Wallet recovery check has begun 🚀"); + + const baseDir = __dirname + "/temp/wallet-recovery"; + // Remove the last run data + try { + await fs.rmdir(baseDir, { + recursive: true, + }); + } catch (err) { + console.error(err); + } + + const LOG_FILE = "./logs/wallet-recovery-test.log"; + await emptyFile(LOG_FILE); + + try { + const { identity, timeDiffMinutes, height, blockRate, recoveredAmount } = + await walletRecoveryTest({ + seedWords: + "spare man patrol essay divide hollow trip visual actress sadness country hungry toy blouse body club depend capital sleep aim high recycle crystal abandon", + log: LOG_FILE, + numWallets: instances, + }); + + notify( + "🙌 Wallet (Pubkey:", + identity.public_key, + ") recovered to a block height of", + height, + "completed in", + timeDiffMinutes, + "minutes (", + blockRate, + "blocks/min).", + recoveredAmount, + "µT recovered for ", + instances, + " instance(s)." + ); + } catch (err) { + console.error(err); + let logLines = await readLastNLines(LOG_FILE, 15); + failed(`Wallet recovery test failed. + ${err} +Last log lines: +\`\`\` +${logLines.join("\n")} +\`\`\` + `); + } +} + +async function runBaseNodeSyncTest(syncType) { + notify(`🚀 ${syncType} basenode sync check has begun 🚀`); + + const baseDir = __dirname + "/temp/base-node-sync"; + + // Remove the last run data + try { + await fs.rmdir(baseDir, { + recursive: true, + }); + } catch (err) { + console.error(err); + } + + const LOG_FILE = `./logs/${syncType.toLowerCase()}-sync-test.log`; + await emptyFile(LOG_FILE); + + try { + const { blockHeight, timeDiffMinutes, blockRate } = await baseNodeSyncTest({ + log: LOG_FILE, + syncType, + baseDir, + }); + + notify( + `⛓ Syncing ${syncType} to block height ${blockHeight} took ${timeDiffMinutes} minutes for a rate of ${blockRate} blocks/min` + ); + } catch (err) { + console.error(err); + let logLines = await readLastNLines(LOG_FILE, 15); + failed(`Base node ${syncType} sync test failed. +${err} +Last log lines: +\`\`\` +${logLines.join("\n")} +\`\`\` + `); + } +} + +// ------------------------- CRON ------------------------- // +new CronJob("0 7 * * *", runWalletRecoveryTest(1)).start(); +new CronJob("0 7 * * *", runWalletRecoveryTest(5)).start(); +new CronJob("0 6 * * *", () => runBaseNodeSyncTest(SyncType.Archival)).start(); +new CronJob("30 6 * * *", () => runBaseNodeSyncTest(SyncType.Pruned)).start(); + +console.log("Cron jobs started."); diff --git a/applications/daily_tests/helpers.js b/applications/daily_tests/helpers.js new file mode 100644 index 0000000000..8a97482599 --- /dev/null +++ b/applications/daily_tests/helpers.js @@ -0,0 +1,163 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +const { exec } = require("child_process"); +const { hideBin } = require("yargs/helpers"); +const readline = require("readline"); +const path = require("path"); + +const WEBHOOK_URL_ENV_NAME = "WEBHOOK_URL"; + +const yargs = () => require("yargs")(hideBin(process.argv)); + +/** + * Send a webhook notification to `webhookUrl` or else the WEBHOOK_URL environment var + * @param channel - the channel to send + * @param message - the message to send + * @param webhookUrlOverride - the optional webhook URL to send, if not supplied WEBHOOK_URL is used + * @returns {Promise>} + */ +function sendWebhookNotification(channel, message, webhookUrlOverride = null) { + const hook = webhookUrlOverride || getWebhookUrlFromEnv(); + if (!hook) { + return; + } + const data = JSON.stringify({ channel, text: message }); + const args = ` -i -X POST -H 'Content-Type: application/json' -d '${data}' ${hook}`; + return new Promise((resolve, reject) => { + exec("curl " + args, function (error, stdout, stderr) { + if (error) { + return reject(error); + } + if (stdout) console.log(stdout); + if (stderr) console.error(stderr); + resolve(null); + }); + }); +} + +function getWebhookUrlFromEnv() { + return process.env[WEBHOOK_URL_ENV_NAME]; +} + +function readLastNLines(file, n) { + const fs = require("fs"); + return new Promise((resolve, reject) => { + try { + const stream = fs.createReadStream(file, {}); + let lineBuf = new Array(n); + let s = readline.createInterface({ input: stream, crlfDelay: Infinity }); + s.on("line", (line) => { + if (lineBuf.length + 1 > n) { + lineBuf.shift(); + } + lineBuf.push(line); + }); + s.on("close", () => { + resolve(lineBuf.filter((l) => l.trim().length > 0)); + }); + s.on("error", reject); + } catch (err) { + console.error(err); + reject(err); + } + }); +} + +async function emptyFile(file) { + const fs = require("fs/promises"); + await fs.mkdir(path.dirname(file), { recursive: true }); + try { + await fs.truncate(file, 0); + } catch (_e) { + await fs.writeFile(file, ""); + } +} + +async function monitorProcessOutput({ + process, + onData, + outputStream, + noStdout, +}) { + return new Promise((resolve, reject) => { + let isResolved = false; + process.stderr.on("data", (buf) => { + let data = buf.toString().trim(); + if (!noStdout) { + console.error(data); + } + if (outputStream) { + outputStream.write(data); + } + }); + + process.stdout.on("data", (buf) => { + let data = buf.toString(); + if (!noStdout) { + console.log(data); + } + if (outputStream) { + outputStream.write(data); + } + if (isResolved) { + return; + } + try { + let ret = onData(data); + if (ret === null || ret === undefined) { + // No result yet + return; + } + + isResolved = true; + resolve(ret); + } catch (err) { + isResolved = true; + reject(err); + } + }); + + process.once("error", (err) => { + isResolved = true; + reject(err); + }); + process.once("exit", (code) => { + isResolved = true; + if (code === 0) { + // If already resolved, this is a noop + resolve(null); + } else { + reject(new Error(`Exited with error code ${code}`)); + } + }); + }); +} + +module.exports = { + sendWebhookNotification, + getWebhookUrl: getWebhookUrlFromEnv, + readLastNLines, + emptyFile, + yargs, + monitorProcessOutput, +}; diff --git a/applications/daily_tests/package-lock.json b/applications/daily_tests/package-lock.json new file mode 100644 index 0000000000..82eef8c37f --- /dev/null +++ b/applications/daily_tests/package-lock.json @@ -0,0 +1,1881 @@ +{ + "name": "daily_tests", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "cron": "^1.8.2", + "debug": "^4.3.2", + "integration_tests": "file:../../integration_tests", + "luxon": "^2.0.2", + "yargs": "^17.0.1" + }, + "devDependencies": { + "mocha": "^9.1.0" + } + }, + "../../integration_tests": { + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "archiver": "^5.3.0", + "axios": "^0.21.1", + "clone-deep": "^4.0.1", + "csv-parser": "^3.0.0", + "dateformat": "^3.0.3", + "glob": "^7.1.7", + "hex64": "^0.4.0", + "jszip": "^3.7.0", + "sha3": "^2.1.3", + "synchronized-promise": "^0.3.1", + "tari_crypto": "^0.9.1", + "utf8": "^3.0.0", + "wallet-grpc-client": "file:../clients/wallet_grpc_client" + }, + "devDependencies": { + "@babel/core": "^7.15.0", + "@babel/eslint-parser": "^7.15.0", + "@babel/eslint-plugin": "^7.14.5", + "@grpc/grpc-js": "^1.3.6", + "@grpc/proto-loader": "^0.5.5", + "blakejs": "^1.1.0", + "chai": "^4.2.0", + "cucumber": "^6.0.5", + "cucumber-html-reporter": "^5.3.0", + "cucumber-pretty": "^6.0.0", + "eslint": "^7.25.0", + "eslint-config-prettier": "^8.3.0", + "eslint-config-standard": "^16.0.2", + "eslint-plugin-import": "^2.2.0", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-prettier": "^3.4.0", + "eslint-plugin-promise": "^4.3.1", + "ffi-napi": "^4.0.3", + "grpc-promise": "^1.4.0", + "husky": "^6.0.0", + "prettier": "^2.2.1", + "ref-napi": "^3.0.3" + } + }, + "node_modules/@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", + "dev": true + }, + "node_modules/ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", + "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "node_modules/camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.2.tgz", + "integrity": "sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ==", + "dev": true, + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "node_modules/cron": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/cron/-/cron-1.8.2.tgz", + "integrity": "sha512-Gk2c4y6xKEO8FSAUTklqtfSr7oTq0CiPQeLBG5Fl0qoXpZyMcj1SG59YL+hqq04bu6/IuEA7lMkYDAplQNKkyg==", + "dependencies": { + "moment-timezone": "^0.5.x" + } + }, + "node_modules/debug": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", + "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/diff": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", + "dev": true, + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "bin": { + "flat": "cli.js" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob": { + "version": "7.1.7", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", + "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true, + "engines": { + "node": ">=4.x" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "bin": { + "he": "bin/he" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/integration_tests": { + "resolved": "../../integration_tests", + "link": true + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/luxon": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.0.2.tgz", + "integrity": "sha512-ZRioYLCgRHrtTORaZX1mx+jtxKtKuI5ZDvHNAmqpUzGqSrR+tL4FVLn/CUGMA3h0+AKD1MAxGI5GnCqR5txNqg==", + "engines": { + "node": "*" + } + }, + "node_modules/minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/mocha": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.1.0.tgz", + "integrity": "sha512-Kjg/XxYOFFUi0h/FwMOeb6RoroiZ+P1yOfya6NK7h3dNhahrJx1r2XIT3ge4ZQvJM86mdjNA+W5phqRQh7DwCg==", + "dev": true, + "dependencies": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.2", + "debug": "4.3.1", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.7", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", + "minimatch": "3.0.4", + "ms": "2.1.3", + "nanoid": "3.1.23", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "wide-align": "1.1.3", + "workerpool": "6.1.5", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mochajs" + } + }, + "node_modules/mocha/node_modules/debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/mocha/node_modules/debug/node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/mocha/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "node_modules/mocha/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha/node_modules/yargs-parser": { + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/moment": { + "version": "2.29.1", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.1.tgz", + "integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==", + "engines": { + "node": "*" + } + }, + "node_modules/moment-timezone": { + "version": "0.5.33", + "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.33.tgz", + "integrity": "sha512-PTc2vcT8K9J5/9rDEPe5czSIKgLoGsH8UNpA4qZTVw0Vd/Uz19geE9abbIOQKaAQFcnQ3v5YEXrbSc5BpshH+w==", + "dependencies": { + "moment": ">= 2.9.0" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/nanoid": { + "version": "3.1.23", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz", + "integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==", + "dev": true, + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/picomatch": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", + "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/string-width": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", + "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "dependencies": { + "ansi-regex": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wide-align": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", + "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "dev": true, + "dependencies": { + "string-width": "^1.0.2 || 2" + } + }, + "node_modules/wide-align/node_modules/ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/wide-align/node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/wide-align/node_modules/string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "dependencies": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/wide-align/node_modules/strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/workerpool": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.5.tgz", + "integrity": "sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw==", + "dev": true + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.1.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.1.tgz", + "integrity": "sha512-c2k48R0PwKIqKhPMWjeiF6y2xY/gPMUlro0sgxqXpbOIohWiLNXWslsootttv7E1e73QPAMQSg5FeySbVcpsPQ==", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + }, + "dependencies": { + "@ungap/promise-all-settled": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz", + "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==", + "dev": true + }, + "ansi-colors": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", + "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", + "dev": true + }, + "ansi-regex": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", + "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" + }, + "ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "requires": { + "color-convert": "^2.0.1" + } + }, + "anymatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz", + "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==", + "dev": true, + "requires": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + } + }, + "argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "binary-extensions": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", + "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "dev": true + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "requires": { + "fill-range": "^7.0.1" + } + }, + "browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true + }, + "camelcase": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.2.0.tgz", + "integrity": "sha512-c7wVvbw3f37nuobQNtgsgG9POC9qMbNuMQmTCqZv23b6MIz0fcYpBiOlv9gEN/hdLdnZTDQhg6e9Dq5M1vKvfg==", + "dev": true + }, + "chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "requires": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "dependencies": { + "supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + } + } + }, + "chokidar": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.2.tgz", + "integrity": "sha512-ekGhOnNVPgT77r4K/U3GDhu+FQ2S8TnK/s2KbIGXi0SZWuwkZ2QNyfWdZW+TVfn84DpEP7rLeCt2UI6bJ8GwbQ==", + "dev": true, + "requires": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "fsevents": "~2.3.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + } + }, + "cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "requires": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "requires": { + "color-name": "~1.1.4" + } + }, + "color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "cron": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/cron/-/cron-1.8.2.tgz", + "integrity": "sha512-Gk2c4y6xKEO8FSAUTklqtfSr7oTq0CiPQeLBG5Fl0qoXpZyMcj1SG59YL+hqq04bu6/IuEA7lMkYDAplQNKkyg==", + "requires": { + "moment-timezone": "^0.5.x" + } + }, + "debug": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", + "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", + "requires": { + "ms": "2.1.2" + } + }, + "decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true + }, + "diff": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", + "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", + "dev": true + }, + "emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" + }, + "escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true + }, + "fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "requires": { + "to-regex-range": "^5.0.1" + } + }, + "find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "requires": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + } + }, + "flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "fsevents": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", + "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "dev": true, + "optional": true + }, + "get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" + }, + "glob": { + "version": "7.1.7", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", + "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "dev": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "growl": { + "version": "1.10.5", + "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz", + "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", + "dev": true + }, + "has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true + }, + "he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "integration_tests": { + "version": "file:../../integration_tests", + "requires": { + "@babel/core": "^7.15.0", + "@babel/eslint-parser": "^7.15.0", + "@babel/eslint-plugin": "^7.14.5", + "@grpc/grpc-js": "^1.3.6", + "@grpc/proto-loader": "^0.5.5", + "archiver": "^5.3.0", + "axios": "^0.21.1", + "blakejs": "^1.1.0", + "chai": "^4.2.0", + "clone-deep": "^4.0.1", + "csv-parser": "^3.0.0", + "cucumber": "^6.0.5", + "cucumber-html-reporter": "^5.3.0", + "cucumber-pretty": "^6.0.0", + "dateformat": "^3.0.3", + "eslint": "^7.25.0", + "eslint-config-prettier": "^8.3.0", + "eslint-config-standard": "^16.0.2", + "eslint-plugin-import": "^2.2.0", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-prettier": "^3.4.0", + "eslint-plugin-promise": "^4.3.1", + "ffi-napi": "^4.0.3", + "glob": "^7.1.7", + "grpc-promise": "^1.4.0", + "hex64": "^0.4.0", + "husky": "^6.0.0", + "jszip": "^3.7.0", + "prettier": "^2.2.1", + "ref-napi": "^3.0.3", + "sha3": "^2.1.3", + "synchronized-promise": "^0.3.1", + "tari_crypto": "^0.9.1", + "utf8": "^3.0.0", + "wallet-grpc-client": "file:../clients/wallet_grpc_client" + } + }, + "is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "requires": { + "binary-extensions": "^2.0.0" + } + }, + "is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" + }, + "is-glob": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", + "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", + "dev": true, + "requires": { + "is-extglob": "^2.1.1" + } + }, + "is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true + }, + "is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true + }, + "is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "requires": { + "argparse": "^2.0.1" + } + }, + "locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "requires": { + "p-locate": "^5.0.0" + } + }, + "log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "requires": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + } + }, + "luxon": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/luxon/-/luxon-2.0.2.tgz", + "integrity": "sha512-ZRioYLCgRHrtTORaZX1mx+jtxKtKuI5ZDvHNAmqpUzGqSrR+tL4FVLn/CUGMA3h0+AKD1MAxGI5GnCqR5txNqg==" + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "mocha": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-9.1.0.tgz", + "integrity": "sha512-Kjg/XxYOFFUi0h/FwMOeb6RoroiZ+P1yOfya6NK7h3dNhahrJx1r2XIT3ge4ZQvJM86mdjNA+W5phqRQh7DwCg==", + "dev": true, + "requires": { + "@ungap/promise-all-settled": "1.1.2", + "ansi-colors": "4.1.1", + "browser-stdout": "1.3.1", + "chokidar": "3.5.2", + "debug": "4.3.1", + "diff": "5.0.0", + "escape-string-regexp": "4.0.0", + "find-up": "5.0.0", + "glob": "7.1.7", + "growl": "1.10.5", + "he": "1.2.0", + "js-yaml": "4.1.0", + "log-symbols": "4.1.0", + "minimatch": "3.0.4", + "ms": "2.1.3", + "nanoid": "3.1.23", + "serialize-javascript": "6.0.0", + "strip-json-comments": "3.1.1", + "supports-color": "8.1.1", + "which": "2.0.2", + "wide-align": "1.1.3", + "workerpool": "6.1.5", + "yargs": "16.2.0", + "yargs-parser": "20.2.4", + "yargs-unparser": "2.0.0" + }, + "dependencies": { + "debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dev": true, + "requires": { + "ms": "2.1.2" + }, + "dependencies": { + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + } + } + }, + "ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true + }, + "yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "requires": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + } + }, + "yargs-parser": { + "version": "20.2.4", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", + "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", + "dev": true + } + } + }, + "moment": { + "version": "2.29.1", + "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.1.tgz", + "integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==" + }, + "moment-timezone": { + "version": "0.5.33", + "resolved": "https://registry.npmjs.org/moment-timezone/-/moment-timezone-0.5.33.tgz", + "integrity": "sha512-PTc2vcT8K9J5/9rDEPe5czSIKgLoGsH8UNpA4qZTVw0Vd/Uz19geE9abbIOQKaAQFcnQ3v5YEXrbSc5BpshH+w==", + "requires": { + "moment": ">= 2.9.0" + } + }, + "ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "nanoid": { + "version": "3.1.23", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.1.23.tgz", + "integrity": "sha512-FiB0kzdP0FFVGDKlRLEQ1BgDzU87dy5NnzjeW9YZNt+/c3+q82EQDUwniSAUxp/F0gFNI1ZhKU1FqYsMuqZVnw==", + "dev": true + }, + "normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1" + } + }, + "p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "requires": { + "yocto-queue": "^0.1.0" + } + }, + "p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "requires": { + "p-limit": "^3.0.2" + } + }, + "path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "picomatch": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.0.tgz", + "integrity": "sha512-lY1Q/PiJGC2zOv/z391WOTD+Z02bCgsFfvxoXXf6h7kv9o+WmsmzYqrAwY63sNgOxE4xEdq0WyUnXfKeBrSvYw==", + "dev": true + }, + "randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "requires": { + "safe-buffer": "^5.1.0" + } + }, + "readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "requires": { + "picomatch": "^2.2.1" + } + }, + "require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" + }, + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true + }, + "serialize-javascript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", + "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", + "dev": true, + "requires": { + "randombytes": "^2.1.0" + } + }, + "string-width": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", + "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", + "requires": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.0" + } + }, + "strip-ansi": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", + "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", + "requires": { + "ansi-regex": "^5.0.0" + } + }, + "strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true + }, + "supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "requires": { + "has-flag": "^4.0.0" + } + }, + "to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "requires": { + "is-number": "^7.0.0" + } + }, + "which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "requires": { + "isexe": "^2.0.0" + } + }, + "wide-align": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", + "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", + "dev": true, + "requires": { + "string-width": "^1.0.2 || 2" + }, + "dependencies": { + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "dev": true + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "dev": true + }, + "string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dev": true, + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + } + }, + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dev": true, + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + }, + "workerpool": { + "version": "6.1.5", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.1.5.tgz", + "integrity": "sha512-XdKkCK0Zqc6w3iTxLckiuJ81tiD/o5rBE/m+nXpRCB+/Sq4DqkfXZ/x0jW02DG1tGsfUGXbTJyZDP+eu67haSw==", + "dev": true + }, + "wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "requires": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==" + }, + "yargs": { + "version": "17.1.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.1.1.tgz", + "integrity": "sha512-c2k48R0PwKIqKhPMWjeiF6y2xY/gPMUlro0sgxqXpbOIohWiLNXWslsootttv7E1e73QPAMQSg5FeySbVcpsPQ==", + "requires": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + } + }, + "yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==" + }, + "yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "requires": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + } + }, + "yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true + } + } +} diff --git a/applications/daily_tests/package.json b/applications/daily_tests/package.json new file mode 100644 index 0000000000..d7e8038021 --- /dev/null +++ b/applications/daily_tests/package.json @@ -0,0 +1,21 @@ +{ + "name": "daily_tests", + "version": "1.0.0", + "description": "Protocol Dailies", + "main": "cron_jobs.js", + "dependencies": { + "cron": "^1.8.2", + "debug": "^4.3.2", + "integration_tests": "file:../../integration_tests", + "luxon": "^2.0.2", + "yargs": "^17.0.1" + }, + "scripts": { + "test": "mocha" + }, + "author": "", + "license": "ISC", + "devDependencies": { + "mocha": "^9.1.0" + } +} diff --git a/applications/daily_tests/test.js b/applications/daily_tests/test.js new file mode 100644 index 0000000000..71499a6762 --- /dev/null +++ b/applications/daily_tests/test.js @@ -0,0 +1,73 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +const assert = require("assert"); + +describe("Automatic recovery", () => { + let { + RECOVERY_COMPLETE_REGEXP, + RECOVERY_WORTH_REGEXP, + FAILURE_REGEXP, + } = require("./automatic_recovery_test"); + + describe("Log monitoring", () => { + const matchingExample = + "Recovery complete! Scanned = 156637 in 846.87s (184.96 utxos/s), Recovered 261584 worth 65313128.904449 T"; + + const nonMatchingExmaple = + "Recovery process 100% complete (626188 of 626188 utxos)."; + + const failureExample = "Attempt 1/10: Failed to complete wallet recovery"; + + it("correctly recognises the success log entry", () => { + let matching = RECOVERY_COMPLETE_REGEXP.exec(matchingExample); + assert(matching, "expected match"); + assert.equal( + +matching[1], + 156637, + "expected num scanned to be extracted" + ); + }); + + it("ignores a non-matching entry", () => { + let matching = RECOVERY_COMPLETE_REGEXP.exec(nonMatchingExmaple); + assert(!matching, "expected no match"); + }); + + it("extracts the value", () => { + let matching = RECOVERY_WORTH_REGEXP.exec(matchingExample); + assert(matching, "expected match"); + assert.equal( + +matching[1], + 65313128.904449, + "expected value to be extracted" + ); + }); + + it("matches a failure", () => { + let matching = FAILURE_REGEXP.exec(failureExample); + assert(matching, "expected match"); + assert.equal(+matching[1], 1, "expected value to be extracted"); + assert.equal(+matching[2], 10, "expected value to be extracted"); + }); + }); +}); diff --git a/applications/daily_tests/washing_machine.js b/applications/daily_tests/washing_machine.js new file mode 100644 index 0000000000..eaf028ed69 --- /dev/null +++ b/applications/daily_tests/washing_machine.js @@ -0,0 +1,413 @@ +const WalletProcess = require("integration_tests/helpers/walletProcess"); +const WalletClient = require("integration_tests/helpers/walletClient"); +const { getFreePort } = require("integration_tests/helpers/util"); +const { sleep } = require("integration_tests/helpers/util"); +const { PaymentType } = require("integration_tests/helpers/types"); +const debug = require("debug")("washing-machine"); +const { DateTime, Interval } = require("luxon"); +const { sendWebhookNotification, getWebhookUrl, yargs } = require("./helpers"); + +// FPG to use for transactions +const FEE_PER_GRAM = 5; +const WEBHOOK_CHANNEL = "protocol-bot-stuff"; + +/// To start, create a normal console wallet, and send it funds. Then run it with GRPC set to 18143. For quickest results, +/// set the confirmation time to 0 (must be mined) +/// This test will send txtr between two wallets and then back again. It waits for a block to be mined, so could take a while +async function main() { + const argObj = yargs() + .option("base-node", { + alias: "b", + description: + "Base node for wallet2. This is ignored if wallet2-grpc is set", + type: "string", + default: + // ncal + "e2cef0473117da34108dd85d4425536b8a1f317478686a6d7a0bbb5c800a747d::/onion3/3eiacmnozk7rcvrx7brhlssnpueqsjdsfbfmq63d2bk7h3vtah35tcyd:18141", + }) + .option("wallet1-grpc", { + alias: "w1", + description: "Wallet 1 GRPC address", + type: "string", + default: "127.0.0.1:18143", + }) + .option("wallet2-grpc", { + alias: "w2", + description: + "Wallet 2 GRPC address (If not supplied, a new wallet will be started)", + type: "string", + default: null, + }) + .option("num-txns", { + alias: "t", + type: "number", + default: 10, + description: + "The number of transactions that are sent each way within a washing round.", + }) + .option("num-rounds", { + alias: "n", + type: "number", + default: Infinity, + description: "The number of send back and forth washing rounds.", + }) + .option("amount-range", { + type: "string", + description: "The start/end range for per txn amounts in millitari (mT)", + default: "50-200", + }) + .option("sleep-after-round", { + alias: "s", + type: "number", + description: "Interval in seconds between rounds", + default: null, + }) + .option("one-sided", { type: "boolean", default: false }) + .option("routing-mechanism", { + alias: "r", + type: "string", + default: "DirectAndStoreAndForward", + description: + "Possible values: DirectOnly, StoreAndForwardOnly, DirectAndStoreAndForward.", + }) + .alias("help", "h"); + + argObj.help(); + + const webhookNotificationsEnabled = !!getWebhookUrl(); + if (!webhookNotificationsEnabled) { + console.warn( + "Matter most notifications are disabled because WEBHOOK_URL environment variable is not set" + ); + } + + let washingMachine = WashingMachine.new({ + webhookNotificationsEnabled, + ...argObj.argv, + }); + try { + await washingMachine.run(); + } catch (err) { + console.error(err); + logNotify(`🚨 Washing machine failed: ${err.toString()}`); + } +} + +function WashingMachine(options) { + debug(`Washing machine initialized - ${JSON.stringify(options, null, 2)}`); + + this.wallet1 = new WalletClient(); + this.wallet2 = null; + + this.showWalletDetails = async function () { + for (let [name, wallet] of [ + ["Wallet 1", this.wallet1], + ["Wallet 2", this.wallet2], + ]) { + const walletIdentity = await wallet.identify(); + const { status, num_node_connections } = await wallet.getNetworkStatus(); + console.log( + `${name}: ${walletIdentity.public_key} status = ${status}, num_node_connections = ${num_node_connections}` + ); + } + }; + + this.run = async () => { + const { + baseNode: baseNodeSeed, + numTxns: numTransactions, + oneSided, + numRounds, + sleepAfterRound, + wallet1Grpc, + wallet2Grpc, + amountRange, + routingMechanism, + } = options; + + logNotify( + `🚀 Launching washing machine (numTransactions = ${numTransactions}, numRounds = ${numRounds}, sleep = ${sleepAfterRound}s)` + ); + + await this.wallet1.connect(wallet1Grpc); + + debug("Compiling and starting applications..."); + // Start wallet2 + if (wallet2Grpc) { + this.wallet2 = new WalletClient(); + await this.wallet2.connect(wallet2Grpc); + } else { + const port = await getFreePort(20000, 25000); + const wallet2Process = createGrpcWallet( + baseNodeSeed, + { + routingMechanism, + grpc_console_wallet_address: `127.0.0.1:${port}`, + }, + true + ); + wallet2Process.baseDir = "./wallet"; + await wallet2Process.startNew(); + this.wallet2 = await wallet2Process.connectClient(); + } + + await this.showWalletDetails(); + + let [minAmount, maxAmount] = amountRange + .split("-", 2) + .map((n) => parseInt(n) * 1000); + + const minRequiredBalance = + ((maxAmount - minAmount) / 2) * numTransactions + + calcPossibleFee(FEE_PER_GRAM, numTransactions); + + let currentBalance = await waitForBalance(this.wallet1, minRequiredBalance); + + debug( + `Required balance (${minRequiredBalance}uT) reached: ${currentBalance.available_balance}uT. Sending transactions between ${minAmount}uT and ${maxAmount}uT` + ); + + let roundCount = 0; + let counters = { numSuccess: 0, numFailed: 0 }; + const startTime = DateTime.now(); + let lastNotifiedAt = DateTime.now(); + while (true) { + debug(`Wallet 1 -> Wallet 2`); + { + let { + totalSent: wallet1AmountSent, + numSuccess, + numFailed, + } = await sendFunds(this.wallet1, this.wallet2, { + minAmount: minAmount, + maxAmount: maxAmount, + oneSided, + numTransactions, + feePerGram: FEE_PER_GRAM, + }); + + counters.numSuccess += numSuccess; + counters.numFailed += numFailed; + debug( + `Waiting for wallet2 to have a balance of ${wallet1AmountSent}uT` + ); + + await waitForBalance(this.wallet2, wallet1AmountSent); + } + + debug(`Wallet 2 -> Wallet 1`); + const { + totalSent: wallet2AmountSent, + numSuccess, + numFailed, + } = await sendFunds(this.wallet2, this.wallet1, { + minAmount: minAmount, + maxAmount: maxAmount, + oneSided, + numTransactions, + feePerGram: FEE_PER_GRAM, + }); + + counters.numSuccess += numSuccess; + counters.numFailed += numFailed; + + roundCount++; + if (isFinite(numRounds) && roundCount >= numRounds) { + break; + } + + // Status update every couple of hours + if (DateTime.now().diff(lastNotifiedAt).hours >= 4) { + lastNotifiedAt = DateTime.now(); + const uptime = Interval.fromDateTimes(startTime, DateTime.now()); + const rate = + (counters.numSuccess / (uptime.toDuration("seconds") * 1.0 || 1.0)) * + 60.0; + const upstimeStr = uptime + .toDuration(["days", "hours", "minutes", "seconds"]) + .toFormat("d'd' h'h' m'm' s's'"); + logNotify( + `Washing machine status. ${counters.numSuccess} sent,` + + `${counters.numFailed} failed in ${roundCount} rounds. ` + + `Uptime: ${upstimeStr}, Avg. rate: ${rate.toFixed(2)}txs/m` + ); + } + + if (sleepAfterRound) { + debug(`Taking a break for ${sleepAfterRound}s`); + await sleep(sleepAfterRound * 1000); + } + + await waitForBalance(this.wallet1, wallet2AmountSent); + } + + const uptime = Interval.fromDateTimes(startTime, DateTime.now()); + logNotify( + `Washing machine completed. ${counters.numSuccess} sent, ${counters.numFailed} failed in ${numRounds} rounds. Uptime is ${uptime}` + ); + if (wallet2Process) { + await wallet2Process.stop(); + } + }; +} + +WashingMachine.new = (...args) => new WashingMachine(...args); + +async function sendFunds(senderWallet, receiverWallet, options) { + const { available_balance: senderBalance } = await senderWallet.getBalance(); + const receiverInfo = await receiverWallet.identify(); + const paymentType = options.oneSided + ? PaymentType.ONE_SIDED + : PaymentType.STANDARD_MIMBLEWIMBLE; + + const transactionIter = transactionGenerator({ + address: receiverInfo.public_key, + feePerGram: options.feePerGram, + minAmount: options.minAmount, + maxAmount: options.maxAmount, + numTransactions: options.numTransactions, + balance: senderBalance, + paymentType, + }); + + let transactions = collect(transactionIter); + let totalToSend = transactions.reduce( + (total, { amount }) => total + amount, + 0 + ); + // For interactive transactions, a coin split is needed first + if (!options.oneSided) { + let avgAmountPerTransaction = totalToSend / transactions.length; + debug(`COINSPLIT: amount = ${avgAmountPerTransaction}uT`); + if (transactions.length > 1) { + let leftToSplit = transactions.length; + while (leftToSplit > 499) { + let split_result = await senderWallet.coin_split({ + amount_per_split: avgAmountPerTransaction, + split_count: 499, + fee_per_gram: options.feePerGram, + }); + debug(`Split: ${JSON.stringify(split_result)}`); + leftToSplit -= 499; + } + if (leftToSplit > 0) { + let split_result = await senderWallet.coin_split({ + amount_per_split: avgAmountPerTransaction, + split_count: leftToSplit, + fee_per_gram: options.feePerGram, + }); + debug(`Last Split: ${JSON.stringify(split_result)}`); + } + } + await waitForBalance(senderWallet, totalToSend); + } + + let { results } = await senderWallet.transfer({ + recipients: transactions, + }); + + let numSuccess = 0; + let numFailed = 0; + for (const result of results) { + if (result.is_success) { + numSuccess += 1; + debug(`✅ transaction #${result.transaction_id}`); + } else { + numFailed += 1; + debug( + `❌ ${result.failure_message} transaction #${result.transaction_id}` + ); + } + } + + // TODO: total sent should take failures into account + return { totalSent: totalToSend, numSuccess, numFailed }; +} + +function* transactionGenerator(options) { + // Loosely account for fees + const avgSpendPerTransaction = + options.minAmount + + (options.maxAmount - options.minAmount) / 2 + + calcPossibleFee(options.feePerGram, 1); + debug( + `Generating ${options.numTransactions} transactions averaging ${avgSpendPerTransaction}uT (incl fees)` + ); + + let amountToSend = options.minAmount; + let i = 0; + let total = 0; + while (i < options.numTransactions && total < options.balance) { + total += amountToSend + calcPossibleFee(options.feePerGram, 1); + yield { + address: options.address, + amount: amountToSend, + fee_per_gram: options.feePerGram, + message: `Washing machine funds ${i + 1} of ${options.numTransactions}`, + payment_type: options.paymentType, + }; + + amountToSend = Math.max( + options.minAmount, + (amountToSend + 1) % options.maxAmount + ); + i++; + } +} + +function createGrpcWallet(baseNode, opts = {}, excludeTestEnvars = true) { + let process = new WalletProcess("sender", excludeTestEnvars, { + transport: "tor", + network: "weatherwax", + num_confirmations: 0, + ...opts, + }); + process.setPeerSeeds([baseNode]); + return process; +} + +async function waitForBalance(client, balance) { + if (isNaN(balance)) { + throw new Error("balance is not a number"); + } + let i = 0; + let r = 1; + let newBalance = await client.getBalance(); + debug( + `Waiting for available wallet balance (${newBalance.available_balance}uT, pending=${newBalance.pending_incoming_balance}uT) to reach at least ${balance}uT...` + ); + while (true) { + newBalance = await client.getBalance(); + if (newBalance.available_balance >= balance) { + return newBalance; + } + await sleep(1000); + if (i >= 60) { + debug(`Still waiting... [t=${r * i}s]`); + i = 0; + r++; + } else { + i++; + } + } +} + +function collect(iter) { + let arr = []; + for (i of iter) { + arr.push(i); + } + return arr; +} + +function calcPossibleFee(feePerGram, numTransactions) { + const TRANSACTION_WEIGHT = (1 + 3 + 13) * 2; + return feePerGram * TRANSACTION_WEIGHT * numTransactions; +} + +function logNotify(message) { + console.log(message); + sendWebhookNotification(WEBHOOK_CHANNEL, message); +} + +Promise.all([main()]); diff --git a/applications/installer/src/config/base_node.rs b/applications/installer/src/config/base_node.rs index 5c7077a0ab..d3ab644bcc 100644 --- a/applications/installer/src/config/base_node.rs +++ b/applications/installer/src/config/base_node.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::config::common::{SourceLocation, InstallLocation}; +use crate::config::common::{InstallLocation, SourceLocation}; pub struct BaseNodeOptions { // Where do we get the base node code from? @@ -28,7 +28,7 @@ pub struct BaseNodeOptions { // Where do the base node and related files live? install_location: InstallLocation, // Tor setup - tor_options: TorOptions + tor_options: TorOptions, } pub enum TorLocation { @@ -41,5 +41,5 @@ pub enum TorLocation { } pub struct TorOptions { - location: TorLocation + location: TorLocation, } diff --git a/applications/installer/src/config/combined.rs b/applications/installer/src/config/combined.rs index 581bd2bb5a..c0bfb17542 100644 --- a/applications/installer/src/config/combined.rs +++ b/applications/installer/src/config/combined.rs @@ -20,8 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::config::{BaseNodeOptions, MiningOptions, WalletOptions}; -use crate::config::common::{TariConfig, Network}; +use crate::config::{ + common::{Network, TariConfig}, + BaseNodeOptions, + MiningOptions, + WalletOptions, +}; pub struct InstallerOptions { // Selects the netowrk to run @@ -34,4 +38,4 @@ pub struct InstallerOptions { wallet_options: Option, // Installer options for mining mining_options: Option, -} \ No newline at end of file +} diff --git a/applications/installer/src/config/common.rs b/applications/installer/src/config/common.rs index 291018428e..8c26809355 100644 --- a/applications/installer/src/config/common.rs +++ b/applications/installer/src/config/common.rs @@ -21,10 +21,11 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::path::PathBuf; -use std::env::JoinPathsError; +#[cfg(target_os = "windows")] const TARI_FOLDER: &str = "tari"; -const TARI_HIDDEN_FOLDER: &str = ".tari"; +#[cfg(any(target_os = "macos", target_os = "unix"))] +const TARI_FOLDER: &str = ".tari"; pub enum SourceLocation { SourceCode(SourceCodeOptions), @@ -46,14 +47,16 @@ impl Default for InstallLocation { let mut home = dirs::home_dir().expect("No default home folder"); let mut bin: PathBuf; let data = dirs::data_dir().expect("No default data folder"); - #[cfg(target_os = "windows")] { + #[cfg(target_os = "windows")] + { home.push(TARI_FOLDER); bin = PathBuf::from("C:\\Program Files"); } - #[cfg(any(target_os = "macos", target_os = "unix"))] { + #[cfg(any(target_os = "macos", target_os = "unix"))] + { bin = dirs::home_dir().expect("No default home folder"); bin.push("bin"); - home.push(TARI_HIDDEN_FOLDER); + home.push(TARI_FOLDER); } Self { diff --git a/applications/installer/src/config/mining.rs b/applications/installer/src/config/mining.rs index b9f0a6d775..f344d57c5a 100644 --- a/applications/installer/src/config/mining.rs +++ b/applications/installer/src/config/mining.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::config::{SourceLocation, InstallLocation}; +use crate::config::{InstallLocation, SourceLocation}; pub enum PoolStrategy { Solo, @@ -31,7 +31,10 @@ pub enum Miners { None, Sha3Only(PoolStrategy), MergeMineOnly(PoolStrategy), - Both{ sha3: PoolStrategy, merge_mine: PoolStrategy }, + Both { + sha3: PoolStrategy, + merge_mine: PoolStrategy, + }, } pub struct MiningOptions { @@ -44,4 +47,3 @@ pub struct XmRigOptions { source: SourceLocation, install_location: InstallLocation, } - diff --git a/applications/installer/src/config/mod.rs b/applications/installer/src/config/mod.rs index 9eb4966e21..284fb5100d 100644 --- a/applications/installer/src/config/mod.rs +++ b/applications/installer/src/config/mod.rs @@ -20,14 +20,14 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -mod common; mod base_node; -mod wallet; -mod mining; mod combined; +mod common; +mod mining; +mod wallet; -pub use common::{SourceLocation, InstallLocation}; -pub use base_node::{BaseNodeOptions}; -pub use mining::{Miners, MiningOptions}; -pub use wallet::{WalletOptions}; +pub use base_node::BaseNodeOptions; pub use combined::InstallerOptions; +pub use common::{InstallLocation, SourceLocation}; +pub use mining::{Miners, MiningOptions}; +pub use wallet::WalletOptions; diff --git a/applications/installer/src/config/wallet.rs b/applications/installer/src/config/wallet.rs index c550a88904..9f2d6fe6fa 100644 --- a/applications/installer/src/config/wallet.rs +++ b/applications/installer/src/config/wallet.rs @@ -20,11 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::config::{SourceLocation, InstallLocation}; +use crate::config::{InstallLocation, SourceLocation}; pub struct WalletOptions { // Where to pull the wallet source or binary from source: SourceLocation, // Where the wallet and its data will live. SQLite installation will follow the settings chosen here. install_location: InstallLocation, -} \ No newline at end of file +} diff --git a/applications/installer/src/main.rs b/applications/installer/src/main.rs index 527d29c896..e0265f2609 100644 --- a/applications/installer/src/main.rs +++ b/applications/installer/src/main.rs @@ -1,8 +1,3 @@ mod config; -fn main() { - -} - - - +fn main() {} diff --git a/applications/tari_app_grpc/Cargo.toml b/applications/tari_app_grpc/Cargo.toml index 03b631833a..bec7e05720 100644 --- a/applications/tari_app_grpc/Cargo.toml +++ b/applications/tari_app_grpc/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "This crate is to provide a single source for all cross application grpc files and conversions to and from tari::core" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] diff --git a/applications/tari_app_grpc/proto/base_node.proto b/applications/tari_app_grpc/proto/base_node.proto index 47c1bd7dd5..8d6f282ced 100644 --- a/applications/tari_app_grpc/proto/base_node.proto +++ b/applications/tari_app_grpc/proto/base_node.proto @@ -80,6 +80,8 @@ service BaseNode { rpc GetNetworkStatus(Empty) returns (NetworkStatusResponse); // List currently connected peers rpc ListConnectedPeers(Empty) returns (ListConnectedPeersResponse); + // Get mempool stats + rpc GetMempoolStats(Empty) returns (MempoolStatsResponse); } message SubmitBlockResponse { @@ -337,3 +339,10 @@ message SoftwareUpdate { string sha = 3; string download_url = 4; } + +message MempoolStatsResponse { + uint64 total_txs = 1; + uint64 unconfirmed_txs = 2; + uint64 reorg_txs = 3; + uint64 total_weight = 4; +} \ No newline at end of file diff --git a/applications/tari_app_grpc/proto/wallet.proto b/applications/tari_app_grpc/proto/wallet.proto index 74747787be..bc561f0e02 100644 --- a/applications/tari_app_grpc/proto/wallet.proto +++ b/applications/tari_app_grpc/proto/wallet.proto @@ -50,6 +50,8 @@ service Wallet { rpc GetNetworkStatus(Empty) returns (NetworkStatusResponse); // List currently connected peers rpc ListConnectedPeers(Empty) returns (ListConnectedPeersResponse); + // Cancel pending transaction + rpc CancelTransaction (CancelTransactionRequest) returns (CancelTransactionResponse); } message GetVersionRequest { } @@ -114,7 +116,6 @@ message TransactionInfo { google.protobuf.Timestamp timestamp = 10; string message = 11; bool valid = 12; - bool is_found = 13; } enum TransactionDirection { @@ -138,6 +139,8 @@ enum TransactionStatus { TRANSACTION_STATUS_COINBASE = 5; // This transaction is mined and confirmed at the current base node's height TRANSACTION_STATUS_MINED_CONFIRMED = 6; + // The transaction was not found by the wallet its in transaction database + TRANSACTION_STATUS_NOT_FOUND = 7; } message GetCompletedTransactionsRequest { } @@ -184,3 +187,11 @@ message ImportUtxosResponse { repeated uint64 tx_ids = 1; } +message CancelTransactionRequest { + uint64 tx_id = 1; +} + +message CancelTransactionResponse { + bool is_success = 1; + string failure_message = 2; +} \ No newline at end of file diff --git a/applications/tari_app_grpc/src/conversions/transaction.rs b/applications/tari_app_grpc/src/conversions/transaction.rs index 323149fa3a..97dcaf8795 100644 --- a/applications/tari_app_grpc/src/conversions/transaction.rs +++ b/applications/tari_app_grpc/src/conversions/transaction.rs @@ -83,7 +83,7 @@ impl grpc::TransactionInfo { pub fn not_found(tx_id: TxId) -> Self { Self { tx_id, - is_found: false, + status: grpc::TransactionStatus::NotFound as i32, ..Default::default() } } diff --git a/applications/tari_app_utilities/Cargo.toml b/applications/tari_app_utilities/Cargo.toml index b9f6f20e2a..333af5f959 100644 --- a/applications/tari_app_utilities/Cargo.toml +++ b/applications/tari_app_utilities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_app_utilities" -version = "0.9.1" +version = "0.9.5" authors = ["The Tari Development Community"] edition = "2018" diff --git a/applications/tari_app_utilities/src/identity_management.rs b/applications/tari_app_utilities/src/identity_management.rs index 59db6b745e..40bfcf5cee 100644 --- a/applications/tari_app_utilities/src/identity_management.rs +++ b/applications/tari_app_utilities/src/identity_management.rs @@ -24,12 +24,14 @@ use crate::utilities::ExitCodes; use log::*; use rand::rngs::OsRng; use std::{clone::Clone, fs, path::Path, string::ToString, sync::Arc}; +use tari_common::configuration::bootstrap::prompt; use tari_comms::{multiaddr::Multiaddr, peer_manager::PeerFeatures, NodeIdentity}; use tari_core::transactions::types::PrivateKey; use tari_crypto::{ keys::SecretKey, tari_utilities::{hex::Hex, message_format::MessageFormat}, }; + pub const LOG_TARGET: &str = "tari_application"; /// Loads the node identity, or creates a new one if the --create-id flag was specified @@ -51,13 +53,17 @@ pub fn setup_node_identity>( Ok(id) => Ok(Arc::new(id)), Err(e) => { if !create_id { - let msg =format!( - "Node identity information not found. {}. You can update the configuration file to point to a \ - valid node identity file, or re-run the node with the --create-id flag to create a new identity.", - e - ); - error!(target: LOG_TARGET, "{}", msg); - return Err(ExitCodes::ConfigError(msg)); + let prompt = prompt("Node identity does not exist.\nWould you like to to create one (Y/n)?"); + if !prompt { + let msg = format!( + "Node identity information not found. {}. You can update the configuration file to point to a \ + valid node identity file, or re-run the node with the --create-id flag to create a new \ + identity.", + e + ); + error!(target: LOG_TARGET, "{}", msg); + return Err(ExitCodes::ConfigError(msg)); + }; } debug!(target: LOG_TARGET, "Node id not found. {}. Creating new ID", e); diff --git a/applications/tari_app_utilities/src/utilities.rs b/applications/tari_app_utilities/src/utilities.rs index ddc640fd87..d963f212a7 100644 --- a/applications/tari_app_utilities/src/utilities.rs +++ b/applications/tari_app_utilities/src/utilities.rs @@ -26,7 +26,7 @@ use log::*; use tari_common::{CommsTransport, GlobalConfig, SocksAuthentication, TorControlAuthentication}; use tari_comms::{ connectivity::ConnectivityError, - peer_manager::NodeId, + peer_manager::{NodeId, PeerManagerError}, protocol::rpc::RpcError, socks, tor, @@ -146,6 +146,12 @@ impl From for ExitCodes { } } +impl From for ExitCodes { + fn from(err: PeerManagerError) -> Self { + ExitCodes::NetworkError(err.to_string()) + } +} + impl ExitCodes { pub fn grpc(err: M) -> Self { ExitCodes::GrpcError(format!("GRPC connection error: {}", err)) diff --git a/applications/tari_base_node/Cargo.toml b/applications/tari_base_node/Cargo.toml index 3cadda50b8..db832f8a9a 100644 --- a/applications/tari_base_node/Cargo.toml +++ b/applications/tari_base_node/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari full base node implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] @@ -27,7 +27,6 @@ chrono = "0.4" config = { version = "0.9.3" } futures = { version = "^0.3.1", default-features = false, features = ["alloc"]} log = { version = "0.4.8", features = ["std"] } -log4rs = { version = "0.8.3", features = ["toml_format", "rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] } regex = "1" rustyline = "6.0" rustyline-derive = "0.3" @@ -36,7 +35,16 @@ strum = "^0.19" strum_macros = "0.18.0" thiserror = "^1.0.20" tonic = "0.2" +tracing = "0.1.26" +tracing-opentelemetry = "0.15.0" +tracing-subscriber = "0.2.20" + +# network tracing, rt-tokio for async batch export +opentelemetry = { version = "0.16", default-features = false, features = ["trace","rt-tokio"] } +opentelemetry-jaeger = { version="0.15", features=["rt-tokio"]} [features] avx2 = ["tari_core/avx2", "tari_crypto/avx2", "tari_p2p/avx2", "tari_wallet/avx2", "tari_comms/avx2", "tari_comms_dht/avx2"] safe = [] + + diff --git a/applications/tari_base_node/src/bootstrap.rs b/applications/tari_base_node/src/bootstrap.rs index 9953483b4b..cd10870e6d 100644 --- a/applications/tari_base_node/src/bootstrap.rs +++ b/applications/tari_base_node/src/bootstrap.rs @@ -88,7 +88,12 @@ where B: BlockchainBackend + 'static pubsub_connector(runtime::Handle::current(), buf_size, config.buffer_rate_limit_base_node); let peer_message_subscriptions = Arc::new(peer_message_subscriptions); - let node_config = BaseNodeServiceConfig::default(); // TODO - make this configurable + let node_config = BaseNodeServiceConfig { + service_request_timeout: config.service_request_timeout, + fetch_blocks_timeout: config.fetch_blocks_timeout, + fetch_utxos_timeout: config.fetch_utxos_timeout, + ..Default::default() + }; let mempool_config = MempoolServiceConfig::default(); // TODO - make this configurable let comms_config = self.create_comms_config(); diff --git a/applications/tari_base_node/src/builder.rs b/applications/tari_base_node/src/builder.rs index 87efdaf9bf..dc36f64f59 100644 --- a/applications/tari_base_node/src/builder.rs +++ b/applications/tari_base_node/src/builder.rs @@ -67,6 +67,7 @@ pub struct BaseNodeContext { impl BaseNodeContext { /// Starts the node container. This entails the base node state machine. /// This call consumes the NodeContainer instance. + #[tracing::instrument(name = "base_node::run", skip(self))] pub async fn run(self) { info!(target: LOG_TARGET, "Tari base node has STARTED"); diff --git a/applications/tari_base_node/src/command_handler.rs b/applications/tari_base_node/src/command_handler.rs index 472c147b68..114cee7ad2 100644 --- a/applications/tari_base_node/src/command_handler.rs +++ b/applications/tari_base_node/src/command_handler.rs @@ -60,6 +60,11 @@ use tari_p2p::auto_update::SoftwareUpdaterHandle; use tari_wallet::util::emoji::EmojiId; use tokio::{runtime, sync::watch}; +pub enum StatusOutput { + Log, + Full, +} + pub struct CommandHandler { executor: runtime::Handle, config: Arc, @@ -95,7 +100,7 @@ impl CommandHandler { } } - pub fn status(&self) { + pub fn status(&self, output: StatusOutput) { let mut state_info = self.state_machine_info.clone(); let mut node = self.node_service.clone(); let mut mempool = self.mempool_service.clone(); @@ -171,8 +176,14 @@ impl CommandHandler { ), ); - info!(target: "base_node::app::status", "{}", status_line); - println!("{}", status_line); + let target = "base_node::app::status"; + match output { + StatusOutput::Full => { + println!("{}", status_line); + info!(target: target, "{}", status_line); + }, + StatusOutput::Log => info!(target: target, "{}", status_line), + }; }); } @@ -325,11 +336,8 @@ impl CommandHandler { ); }, Ok(mut data) => match data.pop() { - Some(v) => println!("{}", v.block()), - _ => println!( - "Pruned node: kernel found, but block not found for kernel signature {}", - hex_sig - ), + Some(v) => println!("{}", v), + _ => println!("No kernel with signature {} found", hex_sig), }, }; }); diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 86ffce82bc..00474a5cb4 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1120,6 +1120,27 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { Ok(Response::new(resp)) } + + async fn get_mempool_stats( + &self, + _: Request, + ) -> Result, Status> { + let mut mempool_handle = self.mempool_service.clone(); + + let mempool_stats = mempool_handle.get_mempool_stats().await.map_err(|e| { + error!(target: LOG_TARGET, "Error submitting query:{}", e); + Status::internal(e.to_string()) + })?; + + let response = tari_rpc::MempoolStatsResponse { + total_txs: mempool_stats.total_txs as u64, + unconfirmed_txs: mempool_stats.unconfirmed_txs as u64, + reorg_txs: mempool_stats.reorg_txs as u64, + total_weight: mempool_stats.total_weight, + }; + + Ok(Response::new(response)) + } } enum BlockGroupType { diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index 5b51179c4a..8ef2c9b68a 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -95,12 +95,14 @@ mod recovery; mod status_line; mod utils; -use crate::command_handler::CommandHandler; -use futures::{pin_mut, FutureExt}; +use crate::command_handler::{CommandHandler, StatusOutput}; +use futures::{future::Fuse, pin_mut, FutureExt}; use log::*; +use opentelemetry::{self, global, KeyValue}; use parser::Parser; use rustyline::{config::OutputStreamType, error::ReadlineError, CompletionType, Config, EditMode, Editor}; use std::{ + env, net::SocketAddr, process, sync::Arc, @@ -114,8 +116,13 @@ use tari_app_utilities::{ use tari_common::{configuration::bootstrap::ApplicationType, ConfigBootstrap, GlobalConfig}; use tari_comms::{peer_manager::PeerFeatures, tor::HiddenServiceControllerError}; use tari_shutdown::{Shutdown, ShutdownSignal}; -use tokio::{runtime, task, time}; +use tokio::{ + runtime, + task, + time::{self, Delay}, +}; use tonic::transport::Server; +use tracing_subscriber::{layer::SubscriberExt, Registry}; const LOG_TARGET: &str = "base_node::app"; /// Application entry point @@ -144,12 +151,14 @@ fn main_inner() -> Result<(), ExitCodes> { })?; rt.block_on(run_node(node_config.into(), bootstrap))?; - + // Shutdown and send any traces + global::shutdown_tracer_provider(); Ok(()) } /// Sets up the base node and runs the cli_loop async fn run_node(node_config: Arc, bootstrap: ConfigBootstrap) -> Result<(), ExitCodes> { + enable_tracing_if_specified(&bootstrap); // Load or create the Node identity let node_identity = setup_node_identity( &node_config.base_node_identity_file, @@ -221,10 +230,11 @@ async fn run_node(node_config: Arc, bootstrap: ConfigBootstrap) -> } // Run, node, run! + let command_handler = Arc::new(CommandHandler::new(runtime::Handle::current(), &ctx)); if bootstrap.non_interactive_mode { + task::spawn(status_loop(command_handler, shutdown)); println!("Node started in non-interactive mode (pid = {})", process::id()); } else { - let command_handler = Arc::new(CommandHandler::new(runtime::Handle::current(), &ctx)); let parser = Parser::new(command_handler); cli::print_banner(parser.get_commands(), 3); @@ -242,6 +252,25 @@ async fn run_node(node_config: Arc, bootstrap: ConfigBootstrap) -> Ok(()) } +fn enable_tracing_if_specified(bootstrap: &ConfigBootstrap) { + if bootstrap.tracing_enabled { + // To run: docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 -p14268:14268 \ + // jaegertracing/all-in-one:latest + global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); + let tracer = opentelemetry_jaeger::new_pipeline() + .with_service_name("tari::base_node") + .with_tags(vec![KeyValue::new("pid", process::id().to_string()), KeyValue::new("current_exe", env::current_exe().unwrap().to_str().unwrap_or_default().to_owned())]) + // TODO: uncomment when using tokio 1 + // .install_batch(opentelemetry::runtime::Tokio) + .install_simple() + .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + let subscriber = Registry::default().with(telemetry); + tracing::subscriber::set_global_default(subscriber) + .expect("Tracing could not be set. Try running without `--tracing-enabled`"); + } +} + /// Runs the gRPC server async fn run_grpc( grpc: crate::grpc::base_node_grpc_server::BaseNodeGrpcServer, @@ -264,7 +293,7 @@ async fn run_grpc( } async fn read_command(mut rustyline: Editor) -> Result<(String, Editor), String> { - task::spawn(async { + task::spawn_blocking(|| { let readline = rustyline.readline(">> "); match readline { @@ -291,7 +320,31 @@ async fn read_command(mut rustyline: Editor) -> Result<(String, Editor

Fuse { + let duration = match start_time.elapsed().as_secs() { + 0..=120 => Duration::from_secs(5), + _ => Duration::from_secs(30), + }; + time::delay_for(duration).fuse() +} + +async fn status_loop(command_handler: Arc, shutdown: Shutdown) { + let start_time = Instant::now(); + let mut shutdown_signal = shutdown.to_signal(); + loop { + let mut interval = status_interval(start_time); + futures::select! { + _ = interval => { + command_handler.status(StatusOutput::Log); + }, + _ = shutdown_signal => { + break; + } + } + } +} + +/// Runs the Base Node CLI loop /// ## Parameters /// `parser` - The parser to process input commands /// `shutdown` - The trigger for shutting down @@ -315,24 +368,17 @@ async fn cli_loop(parser: Parser, mut shutdown: Shutdown) { let start_time = Instant::now(); let mut software_update_notif = command_handler.get_software_updater().new_update_notifier().clone(); loop { - let delay_time = if start_time.elapsed() < Duration::from_secs(120) { - Duration::from_secs(2) - } else if start_time.elapsed() < Duration::from_secs(300) { - Duration::from_secs(10) - } else { - Duration::from_secs(30) - }; - - let mut interval = time::delay_for(delay_time).fuse(); - + let mut interval = status_interval(start_time); futures::select! { res = read_command_fut => { match res { Ok((line, mut rustyline)) => { if let Some(p) = rustyline.helper_mut().as_deref_mut() { - p.handle_command(line.as_str(), &mut shutdown) + p.handle_command(line.as_str(), &mut shutdown); + } + if !shutdown.is_triggered() { + read_command_fut.set(read_command(rustyline).fuse()); } - read_command_fut.set(read_command(rustyline).fuse()); }, Err(err) => { // This happens when the node is shutting down. @@ -353,7 +399,7 @@ async fn cli_loop(parser: Parser, mut shutdown: Shutdown) { } } _ = interval => { - command_handler.status(); + command_handler.status(StatusOutput::Full); }, _ = shutdown_signal => { break; diff --git a/applications/tari_base_node/src/parser.rs b/applications/tari_base_node/src/parser.rs index 3fb265cb22..f280019ee0 100644 --- a/applications/tari_base_node/src/parser.rs +++ b/applications/tari_base_node/src/parser.rs @@ -21,7 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use super::LOG_TARGET; -use crate::command_handler::{CommandHandler, Format}; +use crate::command_handler::{CommandHandler, Format, StatusOutput}; use futures::future::Either; use log::*; use rustyline::{ @@ -174,7 +174,7 @@ impl Parser { ); }, Status => { - self.command_handler.status(); + self.command_handler.status(StatusOutput::Full); }, GetStateInfo => { self.command_handler.state_info(); diff --git a/applications/tari_base_node/windows/README.md b/applications/tari_base_node/windows/README.md index dddf5172b7..defa2e1279 100644 --- a/applications/tari_base_node/windows/README.md +++ b/applications/tari_base_node/windows/README.md @@ -10,7 +10,6 @@ The Tari applications have the following dependencies, which can be installed automatically if selected: - SQLite -- OpenSSL - Tor Services - Redistributable for Microsoft Visual Studio 2019 - XMRig @@ -45,9 +44,7 @@ Notes: |---config | config.toml |---runtime - get_openssl_win.ps1 get_xmrig_win.ps1 - install_openssl.bat install_sqlite.bat install_tor_services.bat install_vs2019_redist.bat @@ -152,17 +149,6 @@ depending on the choices you make when prompted: - Ensure folder containing `sqlite3.dll`, is in the user or system path environment variable (hint: type `path` in a command console to verify). -- OpenSSL: - - Download full version of the 64bit Precompiled Binaries for Windows for - [OpenSSL](https://slproweb.com/products/Win32OpenSSL.html) - - Install using all the default prompts - - **Note**: It is important that the dlls are available in the path. To test: - ``` - where libcrypto-1_1-x64.dll - where libssl-1_1-x64.dll - ``` - - Tor Services - Donwload [Tor Windows Expert Bundle](https://www.torproject.org/download/tor/). diff --git a/applications/tari_base_node/windows/runtime/source_base_node_env.bat b/applications/tari_base_node/windows/runtime/source_base_node_env.bat index 4152a03999..c0ad98aac5 100644 --- a/applications/tari_base_node/windows/runtime/source_base_node_env.bat +++ b/applications/tari_base_node/windows/runtime/source_base_node_env.bat @@ -32,16 +32,6 @@ if ["%sqlite_runtime%"]==[""] ( pause exit /b 10101 ) -if ["%openssl_runtime_1%"]==[""] ( - echo Problem with "sopenssl_runtime" environment variable: '%openssl_runtime_1%' - pause - exit /b 10101 -) -if ["%openssl_runtime_2%"]==[""] ( - echo Problem with "sopenssl_runtime" environment variable: '%openssl_runtime_2%' - pause - exit /b 10101 -) rem Verify SQLite's location and prepend the default location to the system path if it exist if exist "%TARI_SQLITE_DIR%\%sqlite_runtime%" ( @@ -72,38 +62,6 @@ if exist "%TARI_SQLITE_DIR%\%sqlite_runtime%" ( ) ) -rem Verify OpenSSL's location -set FOUND_OPENSSL= -if exist "%my_exe_path%\%openssl_runtime_1%" ( - if exist "%my_exe_path%\%openssl_runtime_2%" ( - echo. - echo Using OpenSSL dlls found in "%my_exe_path%" - echo. - set FOUND_OPENSSL=true - ) -) -if not defined FOUND_OPENSSL ( - set FOUND_1= - set FOUND_2= - for %%X in (%openssl_runtime_1%) do (set FOUND_1=%%~$PATH:X) - for %%X in (%openssl_runtime_2%) do (set FOUND_2=%%~$PATH:X) - if defined FOUND_1 ( - if defined FOUND_2 ( - set FOUND_OPENSSL=true - echo. - echo OpenSSL dlls found in system path: - where "%openssl_runtime_1%" - where "%openssl_runtime_2%" - ) - ) -) -if not defined FOUND_OPENSSL ( - echo. - echo Note: OpenSSL dlls not found in "%my_exe_path%" or in the system path; this may be a problem - echo. - pause -) - rem Find the base node executable if exist "%my_exe_path%\%my_exe%" ( set base_node=%my_exe_path%\%my_exe% diff --git a/applications/tari_base_node/windows/runtime/start_tari_base_node.bat b/applications/tari_base_node/windows/runtime/start_tari_base_node.bat index a11b7a6049..e9e2412cf2 100644 --- a/applications/tari_base_node/windows/runtime/start_tari_base_node.bat +++ b/applications/tari_base_node/windows/runtime/start_tari_base_node.bat @@ -6,8 +6,6 @@ echo ---------------------------- rem These are the base node executable and SQLite dynamic link library names set my_exe=tari_base_node.exe set sqlite_runtime=sqlite3.dll -set openssl_runtime_1=libcrypto-1_1-x64.dll -set openssl_runtime_2=libssl-1_1-x64.dll rem This is the location of the configuration and identity files set config_path=%~dp0..\config diff --git a/applications/tari_console_wallet/Cargo.toml b/applications/tari_console_wallet/Cargo.toml index 1ed493e360..5163910d37 100644 --- a/applications/tari_console_wallet/Cargo.toml +++ b/applications/tari_console_wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_console_wallet" -version = "0.9.1" +version = "0.9.5" authors = ["The Tari Development Community"] edition = "2018" @@ -26,6 +26,7 @@ unicode-width = "0.1" unicode-segmentation = "1.6.0" log = { version = "0.4.8", features = ["std"] } qrcode = { version = "0.12" } +regex = "1.5.4" rpassword = "5.0" rustyline = "6.0" strum = "^0.19" @@ -34,6 +35,15 @@ tokio = { version="0.2.10", features = ["signal"] } thiserror = "1.0.20" tonic = "0.2" +tracing = "0.1.26" +tracing-opentelemetry = "0.15.0" +tracing-subscriber = "0.2.20" + +# network tracing, rt-tokio for async batch export +opentelemetry = { version = "0.16", default-features = false, features = ["trace","rt-tokio"] } +opentelemetry-jaeger = { version="0.15", features=["rt-tokio"]} + + [dependencies.tari_core] path = "../../base_layer/core" version = "^0.9" diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index e0edaf86a1..b7e53ae6a2 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -266,7 +266,6 @@ impl wallet_server::Wallet for WalletGrpcServer { .to_vec(), message: txn.message, valid: txn.valid, - is_found: true, }), }; match sender.send(Ok(response)).await { @@ -394,6 +393,33 @@ impl wallet_server::Wallet for WalletGrpcServer { Ok(Response::new(resp)) } + + async fn cancel_transaction( + &self, + request: Request, + ) -> Result, Status> { + let message = request.into_inner(); + debug!( + target: LOG_TARGET, + "Incoming gRPC request to Cancel Transaction (TxId: {})", message.tx_id, + ); + let mut transaction_service = self.get_transaction_service(); + + match transaction_service.cancel_transaction(message.tx_id).await { + Ok(_) => { + return Ok(Response::new(tari_rpc::CancelTransactionResponse { + is_success: true, + failure_message: "".to_string(), + })) + }, + Err(e) => { + return Ok(Response::new(tari_rpc::CancelTransactionResponse { + is_success: false, + failure_message: e.to_string(), + })) + }, + } + } } fn convert_wallet_transaction_into_transaction_info( @@ -415,7 +441,6 @@ fn convert_wallet_transaction_into_transaction_info( timestamp: Some(naive_datetime_to_timestamp(tx.timestamp)), message: tx.message, valid: true, - is_found: true, }, PendingOutbound(tx) => TransactionInfo { tx_id: tx.tx_id, @@ -430,7 +455,6 @@ fn convert_wallet_transaction_into_transaction_info( timestamp: Some(naive_datetime_to_timestamp(tx.timestamp)), message: tx.message, valid: true, - is_found: true, }, Completed(tx) => TransactionInfo { tx_id: tx.tx_id, @@ -449,7 +473,6 @@ fn convert_wallet_transaction_into_transaction_info( .unwrap_or_default(), message: tx.message, valid: tx.valid, - is_found: true, }, } } diff --git a/applications/tari_console_wallet/src/init/mod.rs b/applications/tari_console_wallet/src/init/mod.rs index 0348ebc319..b5c0c9d805 100644 --- a/applications/tari_console_wallet/src/init/mod.rs +++ b/applications/tari_console_wallet/src/init/mod.rs @@ -325,7 +325,6 @@ pub async fn init_wallet( peer_database_name: "peers".to_string(), max_concurrent_inbound_tasks: 100, outbound_buffer_size: 100, - // TODO - make this configurable dht: DhtConfig { database_url: DbConnectionUrl::File(config.data_dir.join("dht-console-wallet.db")), auto_join: true, @@ -335,7 +334,7 @@ pub async fn init_wallet( dedup_cache_capacity: config.dedup_cache_capacity, ..Default::default() }, - // TODO: This should be false unless testing locally - make this configurable + // This should be false unless testing locally allow_test_addresses: config.allow_test_addresses, listener_liveness_allowlist_cidrs: Vec::new(), listener_liveness_max_sessions: 0, @@ -348,10 +347,11 @@ pub async fn init_wallet( let base_node_service_config = BaseNodeServiceConfig::new( config.wallet_base_node_service_refresh_interval, config.wallet_base_node_service_request_max_age, + config.base_node_event_channel_size, ); let factories = CryptoFactories::default(); - let mut wallet_config = WalletConfig::new( + let wallet_config = WalletConfig::new( comms_config.clone(), factories, Some(TransactionServiceConfig { @@ -363,20 +363,25 @@ pub async fn init_wallet( config.transaction_routing_mechanism.clone(), ), num_confirmations_required: config.transaction_num_confirmations_required, + transaction_event_channel_size: config.transaction_event_channel_size, ..Default::default() }), Some(OutputManagerServiceConfig { base_node_query_timeout: config.base_node_query_timeout, prevent_fee_gt_amount: config.prevent_fee_gt_amount, + event_channel_size: config.output_manager_event_channel_size, + base_node_update_publisher_channel_size: config.base_node_update_publisher_channel_size, ..Default::default() }), config.network.into(), Some(base_node_service_config), - Some(config.buffer_size_base_node_wallet), - Some(config.buffer_rate_limit_base_node_wallet), + Some(std::cmp::max( + BASE_NODE_BUFFER_MIN_SIZE, + config.buffer_size_console_wallet, + )), + Some(config.buffer_rate_limit_console_wallet), Some(config.scan_for_utxo_interval), ); - wallet_config.buffer_size = std::cmp::max(BASE_NODE_BUFFER_MIN_SIZE, config.buffer_size_base_node); let mut wallet = Wallet::start( wallet_config, diff --git a/applications/tari_console_wallet/src/main.rs b/applications/tari_console_wallet/src/main.rs index 25f749034e..4042eb27eb 100644 --- a/applications/tari_console_wallet/src/main.rs +++ b/applications/tari_console_wallet/src/main.rs @@ -19,12 +19,14 @@ use init::{ WalletBoot, }; use log::*; +use opentelemetry::{self, global, KeyValue}; use recovery::prompt_private_key_from_seed_words; -use std::process; +use std::{env, process}; use tari_app_utilities::{consts, initialization::init_configuration, utilities::ExitCodes}; use tari_common::{configuration::bootstrap::ApplicationType, ConfigBootstrap}; use tari_core::transactions::types::PrivateKey; use tari_shutdown::Shutdown; +use tracing_subscriber::{layer::SubscriberExt, Registry}; use wallet_modes::{command_mode, grpc_mode, recovery_mode, script_mode, tui_mode, WalletMode}; pub const LOG_TARGET: &str = "wallet::console_wallet::main"; @@ -90,6 +92,7 @@ fn main_inner() -> Result<(), ExitCodes> { info!(target: LOG_TARGET, "Default configuration created. Done."); } + enable_tracing_if_specified(&bootstrap); // get command line password if provided let arg_password = bootstrap.password.clone(); let seed_words_file_name = bootstrap.seed_words_file_name.clone(); @@ -185,3 +188,22 @@ fn get_recovery_master_key( Ok(None) } } + +fn enable_tracing_if_specified(bootstrap: &ConfigBootstrap) { + if bootstrap.tracing_enabled { + // To run: docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 -p14268:14268 \ + // jaegertracing/all-in-one:latest + global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); + let tracer = opentelemetry_jaeger::new_pipeline() + .with_service_name("tari::console_wallet") + .with_tags(vec![KeyValue::new("pid", process::id().to_string()), KeyValue::new("current_exe", env::current_exe().unwrap().to_str().unwrap_or_default().to_owned())]) + // TODO: uncomment when using tokio 1 + // .install_batch(opentelemetry::runtime::Tokio) + .install_simple() + .unwrap(); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + let subscriber = Registry::default().with(telemetry); + tracing::subscriber::set_global_default(subscriber) + .expect("Tracing could not be set. Try running without `--tracing-enabled`"); + } +} diff --git a/applications/tari_console_wallet/src/recovery.rs b/applications/tari_console_wallet/src/recovery.rs index 547cdd673c..887995f0a5 100644 --- a/applications/tari_console_wallet/src/recovery.rs +++ b/applications/tari_console_wallet/src/recovery.rs @@ -83,11 +83,14 @@ pub async fn wallet_recovery(wallet: &WalletSqlite, base_node_config: &PeerConfi let shutdown = Shutdown::new(); let shutdown_signal = shutdown.to_signal(); - let peer_public_keys = base_node_config - .get_all_peers() - .iter() - .map(|peer| peer.public_key.clone()) - .collect(); + let peers = base_node_config.get_all_peers(); + + let peer_manager = wallet.comms.peer_manager(); + let mut peer_public_keys = Vec::with_capacity(peers.len()); + for peer in peers { + peer_public_keys.push(peer.public_key.clone()); + peer_manager.add_peer(peer).await?; + } let mut recovery_task = UtxoScannerService::::builder() .with_peers(peer_public_keys) @@ -131,8 +134,12 @@ pub async fn wallet_recovery(wallet: &WalletSqlite, base_node_config: &PeerConfi Ok(UtxoScannerEvent::ScanningRoundFailed { num_retries, retry_limit, + error, }) => { - let s = format!("Failed to sync. Attempt {} of {}", num_retries, retry_limit); + let s = format!( + "Attempt {}/{}: Failed to complete wallet recovery {}.", + num_retries, retry_limit, error + ); println!("{}", s); warn!(target: LOG_TARGET, "{}", s); }, diff --git a/applications/tari_console_wallet/src/ui/app.rs b/applications/tari_console_wallet/src/ui/app.rs index b9df53d852..028a37de81 100644 --- a/applications/tari_console_wallet/src/ui/app.rs +++ b/applications/tari_console_wallet/src/ui/app.rs @@ -25,6 +25,7 @@ use crate::{ ui::{ components::{ base_node::BaseNode, + log_tab::LogTab, menu::Menu, network_tab::NetworkTab, receive_tab::ReceiveTab, @@ -85,7 +86,8 @@ impl App { .add("Transactions".into(), Box::new(TransactionsTab::new())) .add("Send".into(), Box::new(SendTab::new())) .add("Receive".into(), Box::new(ReceiveTab::new())) - .add("Network".into(), Box::new(NetworkTab::new(base_node_selected))); + .add("Network".into(), Box::new(NetworkTab::new(base_node_selected))) + .add("Log".into(), Box::new(LogTab::new())); let base_node_status = BaseNode::new(); let menu = Menu::new(); diff --git a/applications/tari_console_wallet/src/ui/components/base_node.rs b/applications/tari_console_wallet/src/ui/components/base_node.rs index 4b2094024f..d9a271e291 100644 --- a/applications/tari_console_wallet/src/ui/components/base_node.rs +++ b/applications/tari_console_wallet/src/ui/components/base_node.rs @@ -21,7 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::ui::{components::Component, state::AppState}; -use tari_wallet::base_node_service::service::OnlineState; +use tari_wallet::connectivity_service::OnlineStatus; use tui::{ backend::Backend, layout::Rect, @@ -45,17 +45,17 @@ impl Component for BaseNode { let base_node_state = app_state.get_base_node_state(); let chain_info = match base_node_state.online { - OnlineState::Connecting => Spans::from(vec![ + OnlineStatus::Connecting => Spans::from(vec![ Span::styled("Chain Tip:", Style::default().fg(Color::Magenta)), Span::raw(" "), Span::styled("Connecting...", Style::default().fg(Color::Reset)), ]), - OnlineState::Offline => Spans::from(vec![ + OnlineStatus::Offline => Spans::from(vec![ Span::styled("Chain Tip:", Style::default().fg(Color::Magenta)), Span::raw(" "), Span::styled("Offline", Style::default().fg(Color::Red)), ]), - OnlineState::Online => { + OnlineStatus::Online => { if let Some(metadata) = base_node_state.clone().chain_metadata { let tip = metadata.height_of_longest_chain(); diff --git a/applications/tari_console_wallet/src/ui/components/log_tab.rs b/applications/tari_console_wallet/src/ui/components/log_tab.rs new file mode 100644 index 0000000000..9efaf0faf8 --- /dev/null +++ b/applications/tari_console_wallet/src/ui/components/log_tab.rs @@ -0,0 +1,107 @@ +use crate::ui::{components::Component, state::AppState}; +use regex::Regex; +use std::fs; +use tui::{ + backend::Backend, + layout::{Constraint, Layout, Rect}, + style::{Color, Modifier, Style}, + text::{Span, Spans}, + widgets::{Block, Borders, Paragraph, Wrap}, + Frame, +}; + +pub struct LogTab { + scroll: u16, + re: Regex, +} + +impl LogTab { + pub fn new() -> Self { + Self { scroll: 1, + re : Regex::new( + r"(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d*) \[(?P[^\]]*)\] (?PINFO|WARN|DEBUG|ERROR|TRACE)\s*(?P .*)", + ) + .unwrap() + } + } + + // Format the log line nicely. If it cannot be parsed then return raw line + fn format_line(&self, line: String) -> Spans { + match self.re.captures(line.as_str()) { + Some(caps) => Spans::from(vec![ + Span::styled(caps["timestamp"].to_string(), Style::default().fg(Color::LightGreen)), + Span::raw(" ["), + Span::styled(caps["target"].to_string(), Style::default().fg(Color::LightMagenta)), + Span::raw("] "), + Span::styled( + caps["level"].to_string(), + Style::default().fg(match &caps["level"] { + "ERROR" => Color::LightRed, + "WARN" => Color::LightYellow, + _ => Color::LightMagenta, + }), + ), + Span::raw(caps["message"].to_string()), + ]), + // In case the line is not well formatted, just print as it is + None => Spans::from(vec![Span::raw(line)]), + } + } + + fn draw_logs(&mut self, f: &mut Frame, area: Rect, _app_state: &AppState) + where B: Backend { + // First render the border and calculate the inner area + let block = Block::default().borders(Borders::ALL).title(Span::styled( + "StdOut log", + Style::default().fg(Color::White).add_modifier(Modifier::BOLD), + )); + f.render_widget(block, area); + let log_area = Layout::default() + .constraints([Constraint::Min(42)].as_ref()) + .margin(1) + .split(area); + // Read the log file + let content = match fs::read_to_string("log/wallet/stdout.log") { + Ok(content) => content, + Err(err) => format!("Error reading log : {}", err), + }; + // Convert the content into Spans + let mut text: Vec = content + .split('\n') + .map(|line| self.format_line(line.to_string())) + .collect(); + // We want newest at the top + text.reverse(); + // Render the Paragraph + let paragraph = Paragraph::new(text.clone()) + .wrap(Wrap { trim: true }) + .scroll((self.scroll, 0)); + f.render_widget(paragraph, log_area[0]); + } +} + +impl Component for LogTab { + fn draw(&mut self, f: &mut Frame, area: Rect, app_state: &AppState) { + let areas = Layout::default() + .constraints([Constraint::Min(42)].as_ref()) + .split(area); + + self.draw_logs(f, areas[0], app_state); + } + + fn on_key(&mut self, _app_state: &mut AppState, _c: char) {} + + fn on_up(&mut self, _app_state: &mut AppState) { + if self.scroll > 1 { + self.scroll -= 1; + } + } + + fn on_down(&mut self, _app_state: &mut AppState) { + self.scroll += 1; + } + + fn on_esc(&mut self, _: &mut AppState) {} + + fn on_backspace(&mut self, _app_state: &mut AppState) {} +} diff --git a/applications/tari_console_wallet/src/ui/components/mod.rs b/applications/tari_console_wallet/src/ui/components/mod.rs index 562c3d5763..acfd103c32 100644 --- a/applications/tari_console_wallet/src/ui/components/mod.rs +++ b/applications/tari_console_wallet/src/ui/components/mod.rs @@ -23,6 +23,7 @@ pub mod balance; pub mod base_node; mod component; +pub mod log_tab; pub(crate) mod menu; pub mod network_tab; pub mod receive_tab; diff --git a/applications/tari_console_wallet/src/ui/components/receive_tab.rs b/applications/tari_console_wallet/src/ui/components/receive_tab.rs index 7058bbe2b2..3bb265ae58 100644 --- a/applications/tari_console_wallet/src/ui/components/receive_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/receive_tab.rs @@ -45,6 +45,7 @@ impl ReceiveTab { Constraint::Length(3), Constraint::Length(3), Constraint::Length(3), + Constraint::Length(3), Constraint::Min(1), ] .as_ref(), @@ -64,15 +65,27 @@ impl ReceiveTab { let public_key = Paragraph::new(app_state.get_identity().public_key.as_str()); f.render_widget(public_key, label_layout[0]); - // Public Address + // NodeId let block = Block::default() .borders(Borders::ALL) - .title(Span::styled("Public Address", Style::default().fg(Color::White))); + .title(Span::styled("Node ID", Style::default().fg(Color::White))); f.render_widget(block, info_chunks[2]); let label_layout = Layout::default() .constraints([Constraint::Length(1)].as_ref()) .margin(1) .split(info_chunks[2]); + let node_id = Paragraph::new(app_state.get_identity().node_id.as_str()); + f.render_widget(node_id, label_layout[0]); + + // Public Address + let block = Block::default() + .borders(Borders::ALL) + .title(Span::styled("Public Address", Style::default().fg(Color::White))); + f.render_widget(block, info_chunks[3]); + let label_layout = Layout::default() + .constraints([Constraint::Length(1)].as_ref()) + .margin(1) + .split(info_chunks[3]); let public_address = Paragraph::new(app_state.get_identity().public_address.as_str()); f.render_widget(public_address, label_layout[0]); @@ -80,11 +93,11 @@ impl ReceiveTab { let block = Block::default() .borders(Borders::ALL) .title(Span::styled("Emoji ID", Style::default().fg(Color::White))); - f.render_widget(block, info_chunks[3]); + f.render_widget(block, info_chunks[4]); let label_layout = Layout::default() .constraints([Constraint::Length(1)].as_ref()) .margin(1) - .split(info_chunks[3]); + .split(info_chunks[4]); let emoji_id = Paragraph::new(app_state.get_identity().emoji_id.as_str()); f.render_widget(emoji_id, label_layout[0]); } diff --git a/applications/tari_console_wallet/src/ui/components/send_tab.rs b/applications/tari_console_wallet/src/ui/components/send_tab.rs index 845abbfff4..2849ae00bb 100644 --- a/applications/tari_console_wallet/src/ui/components/send_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/send_tab.rs @@ -626,13 +626,17 @@ impl Component for SendTab { } fn on_key(&mut self, app_state: &mut AppState, c: char) { - if self.error_message.is_some() && '\n' == c { - self.error_message = None; + if self.error_message.is_some() { + if '\n' == c { + self.error_message = None; + } return; } - if self.success_message.is_some() && '\n' == c { - self.success_message = None; + if self.success_message.is_some() { + if '\n' == c { + self.success_message = None; + } return; } diff --git a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs index d91f2c1bf6..b3ca589452 100644 --- a/applications/tari_console_wallet/src/ui/components/transactions_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/transactions_tab.rs @@ -552,6 +552,9 @@ impl Component for TransactionsTab { } fn on_up(&mut self, app_state: &mut AppState) { + if self.confirmation_dialog { + return; + } match self.selected_tx_list { SelectedTransactionList::None => {}, SelectedTransactionList::PendingTxs => { @@ -575,6 +578,9 @@ impl Component for TransactionsTab { } fn on_down(&mut self, app_state: &mut AppState) { + if self.confirmation_dialog { + return; + } match self.selected_tx_list { SelectedTransactionList::None => {}, SelectedTransactionList::PendingTxs => { @@ -602,6 +608,7 @@ impl Component for TransactionsTab { self.pending_list_state.select(None); self.completed_list_state.select(None); self.detailed_transaction = None; + self.confirmation_dialog = false; } } diff --git a/applications/tari_console_wallet/src/ui/state/app_state.rs b/applications/tari_console_wallet/src/ui/state/app_state.rs index 5aced2b402..6d9293ef47 100644 --- a/applications/tari_console_wallet/src/ui/state/app_state.rs +++ b/applications/tari_console_wallet/src/ui/state/app_state.rs @@ -37,7 +37,11 @@ use bitflags::bitflags; use futures::{stream::Fuse, StreamExt}; use log::*; use qrcode::{render::unicode, QrCode}; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; use tari_common::{configuration::Network, GlobalConfig}; use tari_comms::{ connectivity::ConnectivityEventRx, @@ -54,6 +58,7 @@ use tari_crypto::{ristretto::RistrettoPublicKey, tari_utilities::hex::Hex}; use tari_shutdown::ShutdownSignal; use tari_wallet::{ base_node_service::{handle::BaseNodeEventReceiver, service::BaseNodeState}, + connectivity_service::WalletConnectivityHandle, contacts_service::storage::database::Contact, output_manager_service::{handle::OutputManagerEventReceiver, service::Balance, TxId, TxoValidationType}, transaction_service::{ @@ -64,7 +69,10 @@ use tari_wallet::{ util::emoji::EmojiId, WalletSqlite, }; -use tokio::sync::{watch, RwLock}; +use tokio::{ + sync::{watch, RwLock}, + task, +}; const LOG_TARGET: &str = "wallet::console_wallet::app_state"; @@ -72,8 +80,10 @@ const LOG_TARGET: &str = "wallet::console_wallet::app_state"; pub struct AppState { inner: Arc>, cached_data: AppStateData, + cache_update_cooldown: Option, completed_tx_filter: TransactionFilter, node_config: GlobalConfig, + config: AppStateConfig, } impl AppState { @@ -91,8 +101,10 @@ impl AppState { Self { inner: Arc::new(RwLock::new(inner)), cached_data, + cache_update_cooldown: None, completed_tx_filter: TransactionFilter::ABANDONED_COINBASES, node_config, + config: AppStateConfig::default(), } } @@ -126,10 +138,18 @@ impl AppState { } pub async fn update_cache(&mut self) { - let mut inner = self.inner.write().await; - let updated_state = inner.get_updated_app_state(); - if let Some(data) = updated_state { - self.cached_data = data; + let update = match self.cache_update_cooldown { + Some(last_update) => last_update.elapsed() > self.config.cache_update_cooldown, + None => true, + }; + + if update { + let mut inner = self.inner.write().await; + let updated_state = inner.get_updated_app_state(); + if let Some(data) = updated_state { + self.cached_data = data; + self.cache_update_cooldown = Some(Instant::now()); + } } } @@ -633,6 +653,10 @@ impl AppStateInner { self.wallet.comms.connectivity().get_event_subscription().fuse() } + pub fn get_wallet_connectivity(&self) -> WalletConnectivityHandle { + self.wallet.wallet_connectivity.clone() + } + pub fn get_base_node_event_stream(&self) -> Fuse { self.wallet.base_node_service.clone().get_event_stream_fused() } @@ -649,15 +673,7 @@ impl AppStateInner { ) .await?; - if let Err(e) = self - .wallet - .transaction_service - .validate_transactions(ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating transactions: {}", e); - } - self.validate_outputs().await; + self.spawn_transaction_revalidation_task(); self.data.base_node_previous = self.data.base_node_selected.clone(); self.data.base_node_selected = peer.clone(); @@ -685,15 +701,7 @@ impl AppStateInner { ) .await?; - if let Err(e) = self - .wallet - .transaction_service - .validate_transactions(ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating transactions: {}", e); - } - self.validate_outputs().await; + self.spawn_transaction_revalidation_task(); self.data.base_node_previous = self.data.base_node_selected.clone(); self.data.base_node_selected = peer.clone(); @@ -735,15 +743,7 @@ impl AppStateInner { ) .await?; - if let Err(e) = self - .wallet - .transaction_service - .validate_transactions(ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating transactions: {}", e); - } - self.validate_outputs().await; + self.spawn_transaction_revalidation_task(); self.data.base_node_peer_custom = None; self.data.base_node_selected = previous; @@ -762,33 +762,39 @@ impl AppStateInner { Ok(()) } - pub async fn validate_outputs(&mut self) { - if let Err(e) = self - .wallet - .output_manager_service - .validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating UTXOs: {}", e); - } + pub fn spawn_transaction_revalidation_task(&mut self) { + let mut txn_service = self.wallet.transaction_service.clone(); + let mut output_manager_service = self.wallet.output_manager_service.clone(); - if let Err(e) = self - .wallet - .output_manager_service - .validate_txos(TxoValidationType::Spent, ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating STXOs: {}", e); - } + task::spawn(async move { + if let Err(e) = txn_service + .validate_transactions(ValidationRetryStrategy::UntilSuccess) + .await + { + error!(target: LOG_TARGET, "Problem validating transactions: {}", e); + } - if let Err(e) = self - .wallet - .output_manager_service - .validate_txos(TxoValidationType::Invalid, ValidationRetryStrategy::UntilSuccess) - .await - { - error!(target: LOG_TARGET, "Problem validating Invalid TXOs: {}", e); - } + if let Err(e) = output_manager_service + .validate_txos(TxoValidationType::Unspent, ValidationRetryStrategy::UntilSuccess) + .await + { + error!(target: LOG_TARGET, "Problem validating UTXOs: {}", e); + } + + if let Err(e) = output_manager_service + .validate_txos(TxoValidationType::Spent, ValidationRetryStrategy::UntilSuccess) + .await + { + error!(target: LOG_TARGET, "Problem validating STXOs: {}", e); + } + + if let Err(e) = output_manager_service + .validate_txos(TxoValidationType::Invalid, ValidationRetryStrategy::UntilSuccess) + .await + { + error!(target: LOG_TARGET, "Problem validating Invalid TXOs: {}", e); + } + }); } } @@ -832,6 +838,7 @@ impl AppStateData { public_address: node_identity.public_address().to_string(), emoji_id: eid, qr_code: image, + node_id: node_identity.node_id().to_string(), }; let base_node_previous = base_node_selected.clone(); @@ -879,6 +886,7 @@ pub struct MyIdentity { pub public_address: String, pub emoji_id: String, pub qr_code: String, + pub node_id: String, } #[derive(Clone)] @@ -897,3 +905,16 @@ bitflags! { const ABANDONED_COINBASES = 0b0000_0001; } } + +#[derive(Clone)] +struct AppStateConfig { + pub cache_update_cooldown: Duration, +} + +impl Default for AppStateConfig { + fn default() -> Self { + Self { + cache_update_cooldown: Duration::from_secs(2), + } + } +} diff --git a/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs b/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs index fb0a66b466..2e20999667 100644 --- a/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs +++ b/applications/tari_console_wallet/src/ui/state/wallet_event_monitor.rs @@ -54,6 +54,8 @@ impl WalletEventMonitor { .get_output_manager_service_event_stream(); let mut connectivity_events = self.app_state_inner.read().await.get_connectivity_event_stream(); + let wallet_connectivity = self.app_state_inner.read().await.get_wallet_connectivity(); + let mut connectivity_status = wallet_connectivity.get_connectivity_status_watch().fuse(); let mut base_node_events = self.app_state_inner.read().await.get_base_node_event_stream(); @@ -63,7 +65,7 @@ impl WalletEventMonitor { result = transaction_service_events.select_next_some() => { match result { Ok(msg) => { - trace!(target: LOG_TARGET, "Wallet Event Monitor received wallet event {:?}", msg); + trace!(target: LOG_TARGET, "Wallet Event Monitor received wallet transaction service event {:?}", msg); match (*msg).clone() { TransactionEvent::ReceivedFinalizedTransaction(tx_id) => { self.trigger_tx_state_refresh(tx_id).await; @@ -105,17 +107,18 @@ impl WalletEventMonitor { Err(_) => debug!(target: LOG_TARGET, "Lagging read on Transaction Service event broadcast channel"), } }, + status = connectivity_status.select_next_some() => { + trace!(target: LOG_TARGET, "Wallet Event Monitor received wallet connectivity status {:?}", status); + self.trigger_peer_state_refresh().await; + }, result = connectivity_events.select_next_some() => { match result { Ok(msg) => { - trace!(target: LOG_TARGET, "Wallet Event Monitor received wallet event {:?}", msg); + trace!(target: LOG_TARGET, "Wallet Event Monitor received wallet connectivity event {:?}", msg); match &*msg { ConnectivityEvent::PeerDisconnected(_) | ConnectivityEvent::ManagedPeerDisconnected(_) | - ConnectivityEvent::PeerConnected(_) | - ConnectivityEvent::PeerBanned(_) | - ConnectivityEvent::PeerOffline(_) | - ConnectivityEvent::PeerConnectionWillClose(_, _) => { + ConnectivityEvent::PeerConnected(_) => { self.trigger_peer_state_refresh().await; }, // Only the above variants trigger state refresh diff --git a/applications/tari_console_wallet/src/ui/widgets/list_state.rs b/applications/tari_console_wallet/src/ui/widgets/list_state.rs index 11d72e8827..c95f259d80 100644 --- a/applications/tari_console_wallet/src/ui/widgets/list_state.rs +++ b/applications/tari_console_wallet/src/ui/widgets/list_state.rs @@ -61,7 +61,16 @@ impl WindowedListState { self.offset = self.start; list_state.select(Some(selected - self.start)); } - + // If the window was resized make sure we are within bounds of the list. + if self.end > self.num_items { + let diff = self.end - self.num_items; + self.start -= diff; + self.end -= diff; + if let Some(selected) = self.selected { + list_state.select(Some(selected - self.start)); + } + self.offset = self.start; + } list_state } @@ -193,4 +202,22 @@ mod test { let window = list_state.get_start_end(); assert_eq!(window, (5, 9)); } + + #[test] + fn test_console_resize() { + let mut list_state = WindowedListState::new(); + // Start with 20 items and console size of 5 + list_state.set_num_items(20); + // Go to the last item (2 times previous). + list_state.previous(); + list_state.previous(); + list_state.get_list_state(5); + assert_eq!(list_state.get_start_end(), (15, 20)); + // Resize to 10. + list_state.get_list_state(10); + assert_eq!(list_state.get_start_end(), (10, 20)); + // Resize to 50. + list_state.get_list_state(50); + assert_eq!(list_state.get_start_end(), (0, 20)); + } } diff --git a/applications/tari_console_wallet/src/utils/crossterm_events.rs b/applications/tari_console_wallet/src/utils/crossterm_events.rs index 09c3dabe49..cbf2927a23 100644 --- a/applications/tari_console_wallet/src/utils/crossterm_events.rs +++ b/applications/tari_console_wallet/src/utils/crossterm_events.rs @@ -70,7 +70,9 @@ impl CrosstermEvents { ) { Ok(true) => { if let Ok(CEvent::Key(key)) = event::read() { - tx.send(Event::Input(key)).unwrap(); + if let Err(e) = tx.send(Event::Input(key)) { + warn!(target: LOG_TARGET, "Error sending Tick event on MPSC channel: {}", e); + } } }, Ok(false) => {}, diff --git a/applications/tari_console_wallet/windows/runtime/source_console_wallet_env.bat b/applications/tari_console_wallet/windows/runtime/source_console_wallet_env.bat index c5daae53d1..9ffdd9471e 100644 --- a/applications/tari_console_wallet/windows/runtime/source_console_wallet_env.bat +++ b/applications/tari_console_wallet/windows/runtime/source_console_wallet_env.bat @@ -32,16 +32,6 @@ if ["%sqlite_runtime%"]==[""] ( pause exit /b 10101 ) -if ["%openssl_runtime_1%"]==[""] ( - echo Problem with "sopenssl_runtime" environment variable: '%openssl_runtime_1%' - pause - exit /b 10101 -) -if ["%openssl_runtime_2%"]==[""] ( - echo Problem with "sopenssl_runtime" environment variable: '%openssl_runtime_2%' - pause - exit /b 10101 -) rem Verify SQLite's location and prepend the default location to the system path if it exist if exist "%TARI_SQLITE_DIR%\%sqlite_runtime%" ( @@ -72,38 +62,6 @@ if exist "%TARI_SQLITE_DIR%\%sqlite_runtime%" ( ) ) -rem Verify OpenSSL's location -set FOUND_OPENSSL= -if exist "%my_exe_path%\%openssl_runtime_1%" ( - if exist "%my_exe_path%\%openssl_runtime_2%" ( - echo. - echo Using OpenSSL dlls found in "%my_exe_path%" - echo. - set FOUND_OPENSSL=true - ) -) -if not defined FOUND_OPENSSL ( - set FOUND_1= - set FOUND_2= - for %%X in (%openssl_runtime_1%) do (set FOUND_1=%%~$PATH:X) - for %%X in (%openssl_runtime_2%) do (set FOUND_2=%%~$PATH:X) - if defined FOUND_1 ( - if defined FOUND_2 ( - set FOUND_OPENSSL=true - echo. - echo OpenSSL dlls found in system path: - where "%openssl_runtime_1%" - where "%openssl_runtime_2%" - ) - ) -) -if not defined FOUND_OPENSSL ( - echo. - echo Note: OpenSSL dlls not found in "%my_exe_path%" or in the system path; this may be a problem - echo. - pause -) - rem Find the console wallet executable if exist "%my_exe_path%\%my_exe%" ( set console_wallet=%my_exe_path%\%my_exe% @@ -150,7 +108,8 @@ cd "%base_path%" rem check if Windows Terminal is in path, if so, run it there, to see emojis properly. where /q wt if errorlevel 1 ( -"%console_wallet%" %INIT_FLAG% --config "%config_path%\config.toml" --log_config "%config_path%\log4rs_console_wallet.yml" --base-path "%base_path%" + "%console_wallet%" %INIT_FLAG% --config "%config_path%\config.toml" --log_config "%config_path%\log4rs_console_wallet.yml" --base-path "%base_path%" ) else ( -wt "%console_wallet%" %INIT_FLAG% --config "%config_path%\config.toml" --log_config "%config_path%\log4rs_console_wallet.yml" --base-path "%base_path%" + wt "%console_wallet%" %INIT_FLAG% --config "%config_path%\config.toml" --log_config "%config_path%\log4rs_console_wallet.yml" --base-path "%base_path%" + exit ) diff --git a/applications/tari_console_wallet/windows/runtime/start_tari_console_wallet.bat b/applications/tari_console_wallet/windows/runtime/start_tari_console_wallet.bat index 451f08142c..b41cb22540 100644 --- a/applications/tari_console_wallet/windows/runtime/start_tari_console_wallet.bat +++ b/applications/tari_console_wallet/windows/runtime/start_tari_console_wallet.bat @@ -6,8 +6,6 @@ echo ---------------------------- rem These are the console wallet executable and SQLite dynamic link library names set my_exe=tari_console_wallet.exe set sqlite_runtime=sqlite3.dll -set openssl_runtime_1=libcrypto-1_1-x64.dll -set openssl_runtime_2=libssl-1_1-x64.dll rem This is the location of the configuration and identity files set config_path=%~dp0..\config diff --git a/applications/tari_merge_mining_proxy/Cargo.toml b/applications/tari_merge_mining_proxy/Cargo.toml index ca4ba42977..626d0502b6 100644 --- a/applications/tari_merge_mining_proxy/Cargo.toml +++ b/applications/tari_merge_mining_proxy/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari merge miner proxy for xmrig" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [features] diff --git a/applications/tari_merge_mining_proxy/windows/runtime/source_merge_mining_proxy_env.bat b/applications/tari_merge_mining_proxy/windows/runtime/source_merge_mining_proxy_env.bat index 07d401561d..fc8d1e14e8 100644 --- a/applications/tari_merge_mining_proxy/windows/runtime/source_merge_mining_proxy_env.bat +++ b/applications/tari_merge_mining_proxy/windows/runtime/source_merge_mining_proxy_env.bat @@ -28,38 +28,6 @@ if ["%my_exe%"]==[""] ( exit /b 10101 ) -rem Verify OpenSSL's location -set FOUND_OPENSSL= -if exist "%my_exe_path%\%openssl_runtime_1%" ( - if exist "%my_exe_path%\%openssl_runtime_2%" ( - echo. - echo Using OpenSSL dlls found in "%my_exe_path%" - echo. - set FOUND_OPENSSL=true - ) -) -if not defined FOUND_OPENSSL ( - set FOUND_1= - set FOUND_2= - for %%X in (%openssl_runtime_1%) do (set FOUND_1=%%~$PATH:X) - for %%X in (%openssl_runtime_2%) do (set FOUND_2=%%~$PATH:X) - if defined FOUND_1 ( - if defined FOUND_2 ( - set FOUND_OPENSSL=true - echo. - echo OpenSSL dlls found in system path: - where "%openssl_runtime_1%" - where "%openssl_runtime_2%" - ) - ) -) -if not defined FOUND_OPENSSL ( - echo. - echo Note: OpenSSL dlls not found in "%my_exe_path%" or in the system path; this may be a problem - echo. - pause -) - rem Find the merge mining proxy executable if exist "%my_exe_path%\%my_exe%" ( set merge_mining_proxy=%my_exe_path%\%my_exe% diff --git a/applications/tari_merge_mining_proxy/windows/runtime/start_tari_merge_mining_proxy.bat b/applications/tari_merge_mining_proxy/windows/runtime/start_tari_merge_mining_proxy.bat index dfb769d2d8..66aa48b50e 100644 --- a/applications/tari_merge_mining_proxy/windows/runtime/start_tari_merge_mining_proxy.bat +++ b/applications/tari_merge_mining_proxy/windows/runtime/start_tari_merge_mining_proxy.bat @@ -5,8 +5,6 @@ echo Set up environment variables echo ---------------------------- rem These is the merge mining proxy executable name set my_exe=tari_merge_mining_proxy.exe -set openssl_runtime_1=libcrypto-1_1-x64.dll -set openssl_runtime_2=libssl-1_1-x64.dll rem This is the location of the configuration and identity files set config_path=%~dp0..\config diff --git a/applications/tari_mining_node/Cargo.toml b/applications/tari_mining_node/Cargo.toml index 48892a1d06..a04938c048 100644 --- a/applications/tari_mining_node/Cargo.toml +++ b/applications/tari_mining_node/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "The tari mining node implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] @@ -24,7 +24,14 @@ serde = { version = "1.0", default_features = false, features = ["derive"] } tonic = { version = "0.2", features = ["transport"] } tokio = { version = "0.2", default_features = false, features = ["rt-core"] } thiserror = "1.0" - +jsonrpc = "0.11.0" +reqwest = { version = "0.11", features = ["blocking", "json"] } +serde_json = "1.0.57" +native-tls = "0.2" +bufstream = "0.1" +time = "0.1" +chrono = "0.4" +hex = "0.4.2" [dev-dependencies] tari_crypto = "0.11.1" diff --git a/applications/tari_mining_node/src/config.rs b/applications/tari_mining_node/src/config.rs index a4e4693c48..c65446d051 100644 --- a/applications/tari_mining_node/src/config.rs +++ b/applications/tari_mining_node/src/config.rs @@ -49,6 +49,9 @@ pub struct MinerConfig { pub mine_on_tip_only: bool, pub proof_of_work_algo: ProofOfWork, pub validate_tip_timeout_sec: u64, + pub mining_pool_address: String, + pub mining_wallet_address: String, + pub mining_worker_name: String, } #[derive(Serialize, Deserialize, Debug)] @@ -71,6 +74,9 @@ impl Default for MinerConfig { mine_on_tip_only: true, proof_of_work_algo: ProofOfWork::Sha3, validate_tip_timeout_sec: 30, + mining_pool_address: "".to_string(), + mining_wallet_address: "".to_string(), + mining_worker_name: "".to_string(), } } } diff --git a/applications/tari_mining_node/src/main.rs b/applications/tari_mining_node/src/main.rs index bf20227311..b1f538c1e4 100644 --- a/applications/tari_mining_node/src/main.rs +++ b/applications/tari_mining_node/src/main.rs @@ -35,12 +35,24 @@ mod config; mod difficulty; mod errors; mod miner; +mod stratum; mod utils; -use crate::miner::MiningReport; +use crate::{ + miner::MiningReport, + stratum::{stratum_controller::controller::Controller, stratum_miner::miner::StratumMiner}, +}; use errors::{err_empty, MinerError}; use miner::Miner; -use std::{convert::TryFrom, time::Instant}; +use std::{ + convert::TryFrom, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread, + time::Instant, +}; /// Application entry point fn main() { @@ -48,8 +60,8 @@ fn main() { match rt.block_on(main_inner()) { Ok(_) => std::process::exit(0), Err(exit_code) => { - eprintln!("Fatal error: {}", exit_code); - error!("Exiting with code: {}", exit_code); + eprintln!("Fatal error: {:?}", exit_code); + error!("Exiting with code: {:?}", exit_code); std::process::exit(exit_code.as_i32()) }, } @@ -61,56 +73,114 @@ async fn main_inner() -> Result<(), ExitCodes> { config.mine_on_tip_only = global.mine_on_tip_only; config.num_mining_threads = global.num_mining_threads; config.validate_tip_timeout_sec = global.validate_tip_timeout_sec; + config.mining_worker_name = global.mining_worker_name.clone(); debug!("{:?}", bootstrap); debug!("{:?}", config); - let (mut node_conn, mut wallet_conn) = connect(&config, &global).await.map_err(ExitCodes::grpc)?; + if !config.mining_wallet_address.is_empty() && !config.mining_pool_address.is_empty() { + let url = config.mining_pool_address.clone(); + let mut miner_address = config.mining_wallet_address.clone(); + if !config.mining_worker_name.is_empty() { + miner_address += &format!("{}{}", ".", &config.mining_worker_name); + } + let mut mc = Controller::new().unwrap_or_else(|e| { + panic!("Error loading mining controller: {}", e); + }); + let cc = stratum::controller::Controller::new(&url, Some(miner_address), None, None, mc.tx.clone()) + .unwrap_or_else(|e| { + panic!("Error loading stratum client controller: {:?}", e); + }); + let miner_stopped = Arc::new(AtomicBool::new(false)); + let client_stopped = Arc::new(AtomicBool::new(false)); + + mc.set_client_tx(cc.tx.clone()); + let mut miner = StratumMiner::new(config); + if let Err(e) = miner.start_solvers() { + println!("Error. Please check logs for further info."); + println!("Error details:"); + println!("{:?}", e); + println!("Exiting"); + } + + let miner_stopped_internal = miner_stopped.clone(); + let _ = thread::Builder::new() + .name("mining_controller".to_string()) + .spawn(move || { + if let Err(e) = mc.run(miner) { + error!("Error. Please check logs for further info: {:?}", e); + return; + } + miner_stopped_internal.store(true, Ordering::Relaxed); + }); + + let client_stopped_internal = client_stopped.clone(); + let _ = thread::Builder::new() + .name("client_controller".to_string()) + .spawn(move || { + cc.run(); + client_stopped_internal.store(true, Ordering::Relaxed); + }); + + loop { + if miner_stopped.load(Ordering::Relaxed) && client_stopped.load(Ordering::Relaxed) { + thread::sleep(std::time::Duration::from_millis(100)); + break; + } + thread::sleep(std::time::Duration::from_millis(100)); + } + Ok(()) + } else { + config.mine_on_tip_only = global.mine_on_tip_only; + debug!("mine_on_tip_only is {}", config.mine_on_tip_only); - let mut blocks_found: u64 = 0; - loop { - debug!("Starting new mining cycle"); - match mining_cycle(&mut node_conn, &mut wallet_conn, &config, &bootstrap).await { - err @ Err(MinerError::GrpcConnection(_)) | err @ Err(MinerError::GrpcStatus(_)) => { - // Any GRPC error we will try to reconnect with a standard delay - error!("Connection error: {:?}", err); - loop { + let (mut node_conn, mut wallet_conn) = connect(&config, &global).await.map_err(ExitCodes::grpc)?; + + let mut blocks_found: u64 = 0; + loop { + debug!("Starting new mining cycle"); + match mining_cycle(&mut node_conn, &mut wallet_conn, &config, &bootstrap).await { + err @ Err(MinerError::GrpcConnection(_)) | err @ Err(MinerError::GrpcStatus(_)) => { + // Any GRPC error we will try to reconnect with a standard delay + error!("Connection error: {:?}", err); + loop { + debug!("Holding for {:?}", config.wait_timeout()); + delay_for(config.wait_timeout()).await; + match connect(&config, &global).await { + Ok((nc, wc)) => { + node_conn = nc; + wallet_conn = wc; + break; + }, + Err(err) => { + error!("Connection error: {:?}", err); + continue; + }, + } + } + }, + Err(MinerError::MineUntilHeightReached(h)) => { + info!("Prescribed blockchain height {} reached. Aborting ...", h); + return Ok(()); + }, + Err(MinerError::MinerLostBlock(h)) => { + info!("Height {} already mined by other node. Restarting ...", h); + }, + Err(err) => { + error!("Error: {:?}", err); debug!("Holding for {:?}", config.wait_timeout()); delay_for(config.wait_timeout()).await; - match connect(&config, &global).await { - Ok((nc, wc)) => { - node_conn = nc; - wallet_conn = wc; - break; - }, - Err(err) => { - error!("Connection error: {:?}", err); - continue; - }, + }, + Ok(submitted) => { + if submitted { + blocks_found += 1; } - } - }, - Err(MinerError::MineUntilHeightReached(h)) => { - info!("Prescribed blockchain height {} reached. Aborting ...", h); - return Ok(()); - }, - Err(MinerError::MinerLostBlock(h)) => { - info!("Height {} already mined by other node. Restarting ...", h); - }, - Err(err) => { - error!("Error: {:?}", err); - debug!("Holding for {:?}", config.wait_timeout()); - delay_for(config.wait_timeout()).await; - }, - Ok(submitted) => { - if submitted { - blocks_found += 1; - } - if let Some(max_blocks) = bootstrap.miner_max_blocks { - if blocks_found >= max_blocks { - return Ok(()); + if let Some(max_blocks) = bootstrap.miner_max_blocks { + if blocks_found >= max_blocks { + return Ok(()); + } } - } - }, + }, + } } } } diff --git a/applications/tari_mining_node/src/stratum/controller.rs b/applications/tari_mining_node/src/stratum/controller.rs new file mode 100644 index 0000000000..e5b13cd6d1 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/controller.rs @@ -0,0 +1,385 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::stratum::{error::Error, stratum_types as types, stream::Stream}; + +use log::*; +use std::{ + self, + io::{BufRead, ErrorKind, Write}, + sync::mpsc, + thread, +}; + +pub struct Controller { + server_url: String, + server_login: Option, + server_password: Option, + server_tls_enabled: Option, + stream: Option, + rx: mpsc::Receiver, + pub tx: mpsc::Sender, + miner_tx: mpsc::Sender, + last_request_id: String, +} + +// fn invalid_error_response() -> types::RpcError { +// types::RpcError { +// code: 0, +// message: "Invalid error response received".to_owned(), +// } +// } + +impl Controller { + pub fn new( + server_url: &str, + server_login: Option, + server_password: Option, + server_tls_enabled: Option, + miner_tx: mpsc::Sender, + ) -> Result { + let (tx, rx) = mpsc::channel::(); + Ok(Controller { + server_url: server_url.to_string(), + server_login, + server_password, + server_tls_enabled, + stream: None, + tx, + rx, + miner_tx, + last_request_id: "".to_string(), + }) + } + + pub fn try_connect(&mut self) -> Result<(), Error> { + self.stream = Some(Stream::new()); + self.stream + .as_mut() + .unwrap() + .try_connect(&self.server_url, self.server_tls_enabled)?; + Ok(()) + } + + fn read_message(&mut self) -> Result, Error> { + if self.stream.is_none() { + return Err(Error::ConnectionError("broken pipe".to_string())); + } + let mut line = String::new(); + match self.stream.as_mut().unwrap().read_line(&mut line) { + Ok(_) => { + // stream is not returning a proper error on disconnect + if line.is_empty() { + return Err(Error::ConnectionError("broken pipe".to_string())); + } + Ok(Some(line)) + }, + Err(ref e) if e.kind() == ErrorKind::BrokenPipe => Err(Error::ConnectionError("broken pipe".to_string())), + Err(ref e) if e.kind() == ErrorKind::WouldBlock => Ok(None), + Err(e) => { + error!("Communication error with stratum server: {}", e); + Err(Error::ConnectionError("broken pipe".to_string())) + }, + } + } + + fn send_message(&mut self, message: &str) -> Result<(), Error> { + if self.stream.is_none() { + return Err(Error::ConnectionError(String::from("No server connection"))); + } + debug!("sending request: {}", message); + let _ = self.stream.as_mut().unwrap().write(message.as_bytes()); + let _ = self.stream.as_mut().unwrap().write(b"\n"); + let _ = self.stream.as_mut().unwrap().flush(); + Ok(()) + } + + fn send_message_get_job_template(&mut self) -> Result<(), Error> { + let params = types::worker_identifier::WorkerIdentifier { + id: self.last_request_id.clone(), + }; + let req = types::rpc_request::RpcRequest { + id: Some(self.last_request_id.clone()), + jsonrpc: "2.0".to_string(), + method: "getjob".to_string(), + params: Some(serde_json::to_value(params)?), + }; + let req_str = serde_json::to_string(&req)?; + self.send_message(&req_str) + } + + fn send_login(&mut self) -> Result<(), Error> { + // only send the login request if a login string is configured + let login_str = match self.server_login.clone() { + None => "".to_string(), + Some(server_login) => server_login, + }; + if login_str.is_empty() { + return Ok(()); + } + let password_str = match self.server_password.clone() { + None => "".to_string(), + Some(server_password) => server_password, + }; + let params = types::login_params::LoginParams { + login: login_str, + pass: password_str, + agent: "tari-miner".to_string(), + }; + let req_id = self.last_request_id.to_string(); + let req = types::rpc_request::RpcRequest { + id: if req_id.is_empty() { + Some("0".to_string()) + } else { + Some(req_id) + }, + jsonrpc: "2.0".to_string(), + method: "login".to_string(), + params: Some(serde_json::to_value(params)?), + }; + let req_str = serde_json::to_string(&req)?; + self.send_message(&req_str) + } + + fn send_keepalive(&mut self) -> Result<(), Error> { + let req = types::rpc_request::RpcRequest { + id: Some(self.last_request_id.to_string()), + jsonrpc: "2.0".to_string(), + method: "keepalive".to_string(), + params: None, + }; + let req_str = serde_json::to_string(&req)?; + self.send_message(&req_str) + } + + fn send_message_submit(&mut self, job_id: u64, hash: String, nonce: u64) -> Result<(), Error> { + info!("Submitting Solution with hash {} and nonce {}", hash, nonce); + let params_in = types::submit_params::SubmitParams { + id: self.last_request_id.to_string(), + job_id, + hash, + nonce, + }; + let params = serde_json::to_string(¶ms_in)?; + let req = types::rpc_request::RpcRequest { + id: Some(self.last_request_id.to_string()), + jsonrpc: "2.0".to_string(), + method: "submit".to_string(), + params: Some(serde_json::from_str(¶ms)?), + }; + let req_str = serde_json::to_string(&req)?; + self.send_message(&req_str) + } + + fn send_miner_job(&mut self, job: types::job_params::JobParams) -> Result<(), Error> { + let miner_message = types::miner_message::MinerMessage::ReceivedJob( + job.height, + job.job_id.parse::().unwrap(), + job.target.parse::().unwrap(), + job.blob, + ); + self.miner_tx.send(miner_message).map_err(|e| e.into()) + } + + fn send_miner_stop(&mut self) -> Result<(), Error> { + let miner_message = types::miner_message::MinerMessage::StopJob; + self.miner_tx.send(miner_message).map_err(|e| e.into()) + } + + fn send_miner_resume(&mut self) -> Result<(), Error> { + let miner_message = types::miner_message::MinerMessage::ResumeJob; + self.miner_tx.send(miner_message).map_err(|e| e.into()) + } + + pub fn handle_request(&mut self, req: types::rpc_request::RpcRequest) -> Result<(), Error> { + debug!("Received request type: {}", req.method); + match req.method.as_str() { + "job" => match req.params { + None => Err(Error::RequestError("No params in job request".to_owned())), + Some(params) => { + let job = serde_json::from_value::(params)?; + info!( + "Got a new job for height {} with target difficulty {}", + job.height, job.target + ); + self.send_miner_job(job) + }, + }, + _ => Err(Error::RequestError("Unknown method".to_owned())), + } + } + + pub fn handle_response(&mut self, res: types::rpc_response::RpcResponse) -> Result<(), Error> { + debug!("Received response with id: {}", res.id); + match res.result { + Some(result) => { + let login_response = serde_json::from_value::(result.clone()); + if let Ok(st) = login_response { + info!("Successful login to server, worker identifier is {}", st.id); + self.last_request_id = st.id; + let _ = self.send_miner_job(st.job); + return Ok(()); + }; + let job_response = serde_json::from_value::(result.clone()); + if let Ok(st) = job_response { + info!( + "Got a new job for height {} with target difficulty {}", + st.height, st.target + ); + let _ = self.send_miner_job(st); + return Ok(()); + }; + let rpc_response = serde_json::from_value::(result); + if let Ok(st) = rpc_response { + let error = st.error; + if let Some(error) = error { + if vec![-1, 24].contains(&error.code) { + // unauthorized + let _ = self.send_login(); + } else if vec![21, 20, 22, 23, 25].contains(&error.code) { + // problem with template + let _ = self.send_message_get_job_template(); + } + } else { + info!("{:?}", st.result); + } + return Ok(()); + }; + }, + None => { + error!("{:?}", res); + }, + } + Ok(()) + } + + #[allow(clippy::cognitive_complexity)] + pub fn run(mut self) { + let server_read_interval = 1; + let server_retry_interval = 5; + let mut next_server_read = time::get_time().sec + server_read_interval; + let mut next_server_retry = time::get_time().sec; + // Request the first job template + thread::sleep(std::time::Duration::from_secs(1)); + let mut was_disconnected = true; + loop { + // Check our connection status, and try to correct if possible + if self.stream.is_none() { + if !was_disconnected { + let _ = self.send_miner_stop(); + } + was_disconnected = true; + if time::get_time().sec > next_server_retry { + if self.try_connect().is_err() { + let status = format!( + "Connection Status: Can't establish server connection to {}. Will retry every {} seconds", + self.server_url, server_retry_interval + ); + warn!("{}", status); + self.stream = None; + } else { + let status = format!("Connection Status: Connected to server at {}.", self.server_url); + info!("{}", status); + } + next_server_retry = time::get_time().sec + server_retry_interval; + if self.stream.is_none() { + thread::sleep(std::time::Duration::from_secs(1)); + continue; + } + } + } else { + // get new job template + if was_disconnected { + was_disconnected = false; + let _ = self.send_login(); + let _ = self.send_miner_resume(); + } + // read messages from server + if time::get_time().sec > next_server_read { + match self.read_message() { + Ok(message) => { + if let Some(m) = message { + // figure out what kind of message, + // and dispatch appropriately + debug!("Received message: {}", m); + // Deserialize to see what type of object it is + if let Ok(v) = serde_json::from_str::(&m) { + // Is this a response or request? + if v["method"] == "job" { + // this is a request + match serde_json::from_str::(&m) { + Err(e) => error!("Error parsing request {} : {:?}", m, e), + Ok(request) => { + if let Err(err) = self.handle_request(request) { + error!("Error handling request {} : :{:?}", m, err) + } + }, + } + } else { + // this is a response + match serde_json::from_str::(&m) { + Err(e) => error!("Error parsing response {} : {:?}", m, e), + Ok(response) => { + if let Err(err) = self.handle_response(response) { + error!("Error handling response {} : :{:?}", m, err) + } + }, + } + } + continue; + } else { + error!("Error parsing message: {}", m) + } + } + }, + Err(e) => { + error!("Error reading message: {:?}", e); + self.stream = None; + continue; + }, + } + next_server_read = time::get_time().sec + server_read_interval; + } + } + + // Talk to the miner algorithm + while let Some(message) = self.rx.try_iter().next() { + debug!("Client received message: {:?}", message); + let result = match message { + types::client_message::ClientMessage::FoundSolution(job_id, hash, nonce) => { + self.send_message_submit(job_id, hash, nonce) + }, + types::client_message::ClientMessage::KeepAlive => self.send_keepalive(), + types::client_message::ClientMessage::Shutdown => { + debug!("Shutting down client controller"); + return; + }, + }; + if let Err(e) = result { + error!("Mining Controller Error {:?}", e); + self.stream = None; + } + } + thread::sleep(std::time::Duration::from_millis(10)); + } // loop + } +} diff --git a/applications/tari_mining_node/src/stratum/error.rs b/applications/tari_mining_node/src/stratum/error.rs new file mode 100644 index 0000000000..ff2bba63c2 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/error.rs @@ -0,0 +1,48 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +#[derive(Debug)] +pub enum Error { + ConnectionError(String), + RequestError(String), + // ResponseError(String), + JsonError(String), + GeneralError(String), +} + +impl From for Error { + fn from(error: serde_json::error::Error) -> Self { + Error::JsonError(format!("Failed to parse JSON: {:?}", error)) + } +} + +impl From> for Error { + fn from(error: std::sync::PoisonError) -> Self { + Error::GeneralError(format!("Failed to get lock: {:?}", error)) + } +} + +impl From> for Error { + fn from(error: std::sync::mpsc::SendError) -> Self { + Error::GeneralError(format!("Failed to send to a channel: {:?}", error)) + } +} diff --git a/applications/tari_mining_node/src/stratum/mod.rs b/applications/tari_mining_node/src/stratum/mod.rs new file mode 100644 index 0000000000..b426a7c4c5 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/mod.rs @@ -0,0 +1,28 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +pub mod controller; +pub mod error; +pub mod stratum_controller; +pub mod stratum_miner; +pub mod stratum_types; +pub mod stream; diff --git a/applications/tari_mining_node/src/stratum/stratum_controller/controller.rs b/applications/tari_mining_node/src/stratum/stratum_controller/controller.rs new file mode 100644 index 0000000000..f40e432e6f --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_controller/controller.rs @@ -0,0 +1,117 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::{ + stratum, + stratum::{stratum_miner::miner::StratumMiner, stratum_types as types}, +}; +use log::*; +use std::{self, sync::mpsc, thread, time::SystemTime}; + +pub struct Controller { + rx: mpsc::Receiver, + pub tx: mpsc::Sender, + client_tx: Option>, + current_height: u64, + current_job_id: u64, + current_blob: String, + keep_alive_time: SystemTime, +} + +impl Controller { + pub fn new() -> Result { + let (tx, rx) = mpsc::channel::(); + Ok(Controller { + rx, + tx, + client_tx: None, + current_height: 0, + current_job_id: 0, + current_blob: "".to_string(), + keep_alive_time: SystemTime::now(), + }) + } + + pub fn set_client_tx(&mut self, client_tx: mpsc::Sender) { + self.client_tx = Some(client_tx); + } + + pub fn run(&mut self, mut miner: StratumMiner) -> Result<(), stratum::error::Error> { + loop { + while let Some(message) = self.rx.try_iter().next() { + debug!("Miner received message: {:?}", message); + let result: Result<(), stratum::error::Error> = match message { + types::miner_message::MinerMessage::ReceivedJob(height, job_id, diff, blob) => { + self.current_height = height; + self.current_job_id = job_id; + self.current_blob = blob; + miner.notify( + self.current_job_id, + self.current_height, + self.current_blob.clone(), + diff, + ) + }, + types::miner_message::MinerMessage::StopJob => { + debug!("Stopping jobs"); + miner.pause_solvers(); + Ok(()) + }, + types::miner_message::MinerMessage::ResumeJob => { + debug!("Resuming jobs"); + miner.resume_solvers(); + Ok(()) + }, + types::miner_message::MinerMessage::Shutdown => { + debug!("Stopping jobs and Shutting down mining controller"); + miner.stop_solvers(); + miner.wait_for_solver_shutdown(); + Ok(()) + }, + }; + if let Err(e) = result { + error!("Mining Controller Error {:?}", e); + } + } + + let solutions = miner.get_solutions(); + if let Some(ss) = solutions { + let _ = self + .client_tx + .as_mut() + .unwrap() + .send(types::client_message::ClientMessage::FoundSolution( + ss.job_id, ss.hash, ss.nonce, + )); + self.keep_alive_time = SystemTime::now(); + } else if self.keep_alive_time.elapsed().unwrap().as_secs() >= 30 { + self.keep_alive_time = SystemTime::now(); + let _ = self + .client_tx + .as_mut() + .unwrap() + .send(types::client_message::ClientMessage::KeepAlive); + } + thread::sleep(std::time::Duration::from_millis(100)); + } + } +} diff --git a/applications/tari_mining_node/src/stratum/stratum_controller/mod.rs b/applications/tari_mining_node/src/stratum/stratum_controller/mod.rs new file mode 100644 index 0000000000..ac9690447b --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_controller/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +pub(crate) mod controller; diff --git a/applications/tari_mining_node/src/stratum/stratum_miner/control_message.rs b/applications/tari_mining_node/src/stratum/stratum_miner/control_message.rs new file mode 100644 index 0000000000..bd5256c489 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_miner/control_message.rs @@ -0,0 +1,29 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +#[derive(Debug)] +pub(crate) enum ControlMessage { + Stop, + Pause, + Resume, + SolverStopped(usize), +} diff --git a/applications/tari_mining_node/src/stratum/stratum_miner/job_shared_data.rs b/applications/tari_mining_node/src/stratum/stratum_miner/job_shared_data.rs new file mode 100644 index 0000000000..64987389d3 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_miner/job_shared_data.rs @@ -0,0 +1,59 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::stratum::stratum_miner::solution::Solution; +use std::sync::{Arc, RwLock}; +use tari_core::blocks::BlockHeader; + +pub type JobSharedDataType = Arc>; + +pub struct JobSharedData { + pub job_id: u64, + pub height: u64, + pub header: Option, + pub difficulty: u64, + pub solutions: Vec, +} + +impl Default for JobSharedData { + fn default() -> JobSharedData { + JobSharedData { + job_id: 0, + height: 0, + header: None, + difficulty: 0, + solutions: Vec::new(), + } + } +} + +impl JobSharedData { + pub fn new(_num_solvers: usize) -> JobSharedData { + JobSharedData { + job_id: 0, + height: 0, + header: None, + difficulty: 1, + solutions: Vec::new(), + } + } +} diff --git a/applications/tari_mining_node/src/stratum/stratum_miner/miner.rs b/applications/tari_mining_node/src/stratum/stratum_miner/miner.rs new file mode 100644 index 0000000000..0f9de8a239 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_miner/miner.rs @@ -0,0 +1,275 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::{ + config::MinerConfig, + difficulty::BlockHeaderSha3, + stratum, + stratum::stratum_miner::{ + control_message::ControlMessage, + job_shared_data::{JobSharedData, JobSharedDataType}, + solution::Solution, + solver_instance::SolverInstance, + }, +}; +use log::*; +use std::{ + convert::TryFrom, + sync::{mpsc, Arc, RwLock}, + thread, + time, +}; +use tari_core::{ + blocks::BlockHeader, + crypto::tari_utilities::{hex::Hex, Hashable}, +}; + +pub struct StratumMiner { + config: MinerConfig, + pub shared_data: Arc>, + control_txs: Vec>, + solver_loop_txs: Vec>, + solver_stopped_rxs: Vec>, +} + +impl StratumMiner { + pub fn new(config: MinerConfig) -> StratumMiner { + let threads = config.num_mining_threads; + StratumMiner { + config, + shared_data: Arc::new(RwLock::new(JobSharedData::new(threads))), + control_txs: vec![], + solver_loop_txs: vec![], + solver_stopped_rxs: vec![], + } + } + + fn solver_thread( + mut solver: SolverInstance, + instance: usize, + shared_data: JobSharedDataType, + control_rx: mpsc::Receiver, + solver_loop_rx: mpsc::Receiver, + solver_stopped_tx: mpsc::Sender, + ) { + let stop_handle = thread::spawn(move || loop { + while let Some(message) = control_rx.iter().next() { + match message { + ControlMessage::Stop => { + info!("Stopping Solvers"); + return; + }, + ControlMessage::Pause => { + info!("Pausing Solvers"); + }, + ControlMessage::Resume => { + info!("Resuming Solvers"); + }, + _ => {}, + }; + } + }); + + let mut paused = true; + loop { + if let Some(message) = solver_loop_rx.try_iter().next() { + debug!("solver_thread - solver_loop_rx got msg: {:?}", message); + match message { + ControlMessage::Stop => break, + ControlMessage::Pause => { + paused = true; + solver.solver_reset = true; + }, + ControlMessage::Resume => paused = false, + _ => {}, + } + } + + if paused { + thread::sleep(time::Duration::from_micros(100)); + continue; + } + + let header = { shared_data.read().unwrap().header.clone() }; + match header { + Some(header) => { + let height = { shared_data.read().unwrap().height }; + let job_id = { shared_data.read().unwrap().job_id }; + let target_difficulty = { shared_data.read().unwrap().difficulty }; + + let mut hasher = BlockHeaderSha3::new(tari_app_grpc::tari_rpc::BlockHeader::from(header)).unwrap(); + + if solver.solver_reset { + hasher.random_nonce(); + solver.current_nonce = hasher.nonce; + solver.solver_reset = false; + } else { + hasher.nonce = solver.current_nonce; + hasher.inc_nonce(); + solver.current_nonce = hasher.nonce; + } + + let difficulty = hasher.difficulty(); + if difficulty >= target_difficulty { + let block_header: BlockHeader = BlockHeader::try_from(hasher.into_header()).unwrap(); + info!( + "Miner found block header with hash {}, nonce {} and difficulty {:?}", + block_header.hash().to_hex(), + solver.current_nonce, + difficulty + ); + debug!( + "Miner found block header with hash {}, difficulty {:?} and data {:?}", + block_header.hash().to_hex(), + difficulty, + block_header + ); + + let still_valid = { height == shared_data.read().unwrap().height }; + if still_valid { + let mut s = shared_data.write().unwrap(); + s.solutions.push(Solution { + height, + job_id, + difficulty: target_difficulty, + hash: block_header.hash().to_hex(), + nonce: block_header.nonce, + }); + } + } + solver.solutions = Solution::default(); + }, + None => { + continue; + }, + } + } + + let _ = stop_handle.join(); + let _ = solver_stopped_tx.send(ControlMessage::SolverStopped(instance)); + } + + pub fn start_solvers(&mut self) -> Result<(), stratum::error::Error> { + let num_solvers = self.config.num_mining_threads; + info!("Spawning {} solvers", num_solvers); + let mut solvers = Vec::with_capacity(num_solvers); + while solvers.len() < solvers.capacity() { + solvers.push(SolverInstance::new()?); + } + for (i, s) in solvers.into_iter().enumerate() { + let sd = self.shared_data.clone(); + let (control_tx, control_rx) = mpsc::channel::(); + let (solver_tx, solver_rx) = mpsc::channel::(); + let (solver_stopped_tx, solver_stopped_rx) = mpsc::channel::(); + self.control_txs.push(control_tx); + self.solver_loop_txs.push(solver_tx); + self.solver_stopped_rxs.push(solver_stopped_rx); + thread::spawn(move || { + StratumMiner::solver_thread(s, i, sd, control_rx, solver_rx, solver_stopped_tx); + }); + } + Ok(()) + } + + pub fn notify( + &mut self, + job_id: u64, + height: u64, + blob: String, + difficulty: u64, + ) -> Result<(), stratum::error::Error> { + let header_hex = hex::decode(blob) + .map_err(|_| stratum::error::Error::JsonError("Blob is not a valid hex value".to_string()))?; + let header: BlockHeader = serde_json::from_str(&String::from_utf8_lossy(&header_hex).to_string())?; + + let mut sd = self.shared_data.write().unwrap(); + let paused = if height != sd.height { + // stop/pause any existing jobs if job is for a new + // height + self.pause_solvers(); + true + } else { + false + }; + + sd.job_id = job_id; + sd.height = height; + sd.difficulty = difficulty; + sd.header = Some(header); + if paused { + self.resume_solvers(); + } + Ok(()) + } + + pub fn get_solutions(&self) -> Option { + { + let mut s = self.shared_data.write().unwrap(); + if !s.solutions.is_empty() { + let sol = s.solutions.pop().unwrap(); + return Some(sol); + } + } + None + } + + pub fn stop_solvers(&self) { + for t in self.control_txs.iter() { + let _ = t.send(ControlMessage::Stop); + } + for t in self.solver_loop_txs.iter() { + let _ = t.send(ControlMessage::Stop); + } + debug!("Stop message sent"); + } + + pub fn pause_solvers(&self) { + for t in self.control_txs.iter() { + let _ = t.send(ControlMessage::Pause); + } + for t in self.solver_loop_txs.iter() { + let _ = t.send(ControlMessage::Pause); + } + debug!("Pause message sent"); + } + + pub fn resume_solvers(&self) { + for t in self.control_txs.iter() { + let _ = t.send(ControlMessage::Resume); + } + for t in self.solver_loop_txs.iter() { + let _ = t.send(ControlMessage::Resume); + } + debug!("Resume message sent"); + } + + pub fn wait_for_solver_shutdown(&self) { + for r in self.solver_stopped_rxs.iter() { + while let Some(message) = r.iter().next() { + if let ControlMessage::SolverStopped(i) = message { + debug!("Solver stopped: {}", i); + break; + } + } + } + } +} diff --git a/applications/tari_mining_node/src/stratum/stratum_miner/mod.rs b/applications/tari_mining_node/src/stratum/stratum_miner/mod.rs new file mode 100644 index 0000000000..6bc9a9b8bc --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_miner/mod.rs @@ -0,0 +1,27 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +mod control_message; +mod job_shared_data; +pub(crate) mod miner; +mod solution; +mod solver_instance; diff --git a/applications/tari_mining_node/src/stratum/stratum_miner/solution.rs b/applications/tari_mining_node/src/stratum/stratum_miner/solution.rs new file mode 100644 index 0000000000..74b13431f9 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_miner/solution.rs @@ -0,0 +1,42 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +#[derive(Clone)] +pub struct Solution { + pub height: u64, + pub job_id: u64, + pub difficulty: u64, + pub hash: String, + pub nonce: u64, +} + +impl Default for Solution { + fn default() -> Solution { + Solution { + height: 0, + job_id: 0, + difficulty: 0, + hash: "".to_string(), + nonce: 0, + } + } +} diff --git a/applications/tari_mining_node/src/stratum/stratum_miner/solver_instance.rs b/applications/tari_mining_node/src/stratum/stratum_miner/solver_instance.rs new file mode 100644 index 0000000000..926399e1d6 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_miner/solver_instance.rs @@ -0,0 +1,39 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::{stratum, stratum::stratum_miner::solution::Solution}; + +pub struct SolverInstance { + pub solutions: Solution, + pub current_nonce: u64, + pub solver_reset: bool, +} + +impl SolverInstance { + pub fn new() -> Result { + Ok(SolverInstance { + solutions: Solution::default(), + current_nonce: u64::default(), + solver_reset: true, + }) + } +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/client_message.rs b/applications/tari_mining_node/src/stratum/stratum_types/client_message.rs new file mode 100644 index 0000000000..417156e900 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/client_message.rs @@ -0,0 +1,31 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub enum ClientMessage { + // job_id, hash, nonce + FoundSolution(u64, String, u64), + KeepAlive, + Shutdown, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/job.rs b/applications/tari_mining_node/src/stratum/stratum_types/job.rs new file mode 100644 index 0000000000..17582c3891 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/job.rs @@ -0,0 +1,32 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; +use tari_core::blocks::Block; + +#[derive(Serialize, Deserialize, Debug)] +pub struct Job { + pub job_id: u64, + pub block: Option, + pub target: u64, + pub height: u64, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/job_params.rs b/applications/tari_mining_node/src/stratum/stratum_types/job_params.rs new file mode 100644 index 0000000000..3074086252 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/job_params.rs @@ -0,0 +1,31 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct JobParams { + pub job_id: String, + pub blob: String, + pub target: String, + pub height: u64, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/login_params.rs b/applications/tari_mining_node/src/stratum/stratum_types/login_params.rs new file mode 100644 index 0000000000..0203f5dec2 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/login_params.rs @@ -0,0 +1,30 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct LoginParams { + pub login: String, + pub pass: String, + pub agent: String, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/login_response.rs b/applications/tari_mining_node/src/stratum/stratum_types/login_response.rs new file mode 100644 index 0000000000..c53b8f7337 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/login_response.rs @@ -0,0 +1,30 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::stratum::stratum_types::job_params::JobParams; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct LoginResponse { + pub id: String, + pub job: JobParams, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/miner_message.rs b/applications/tari_mining_node/src/stratum/stratum_types/miner_message.rs new file mode 100644 index 0000000000..7ce55714bb --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/miner_message.rs @@ -0,0 +1,32 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub enum MinerMessage { + // Height, Id, difficulty, HeaderBlob + ReceivedJob(u64, u64, u64, String), + ResumeJob, + StopJob, + Shutdown, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/mod.rs b/applications/tari_mining_node/src/stratum/stratum_types/mod.rs new file mode 100644 index 0000000000..432ab296ce --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/mod.rs @@ -0,0 +1,34 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +pub(crate) mod client_message; +pub(crate) mod job; +pub(crate) mod job_params; +pub(crate) mod login_params; +pub(crate) mod login_response; +pub(crate) mod miner_message; +pub(crate) mod rpc_error; +pub(crate) mod rpc_request; +pub(crate) mod rpc_response; +pub(crate) mod submit_params; +pub(crate) mod worker_identifier; +pub(crate) mod worker_status; diff --git a/applications/tari_mining_node/src/stratum/stratum_types/rpc_error.rs b/applications/tari_mining_node/src/stratum/stratum_types/rpc_error.rs new file mode 100644 index 0000000000..118a4977ed --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/rpc_error.rs @@ -0,0 +1,29 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct RpcError { + pub code: i32, + pub message: String, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/rpc_request.rs b/applications/tari_mining_node/src/stratum/stratum_types/rpc_request.rs new file mode 100644 index 0000000000..c5e12eb59f --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/rpc_request.rs @@ -0,0 +1,32 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Serialize, Deserialize, Debug)] +pub struct RpcRequest { + pub id: Option, + pub jsonrpc: String, + pub method: String, + pub params: Option, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/rpc_response.rs b/applications/tari_mining_node/src/stratum/stratum_types/rpc_response.rs new file mode 100644 index 0000000000..bb1220a1b6 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/rpc_response.rs @@ -0,0 +1,32 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::stratum::stratum_types::rpc_error::RpcError; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Serialize, Deserialize, Debug)] +pub struct RpcResponse { + pub id: String, + pub result: Option, + pub error: Option, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/submit_params.rs b/applications/tari_mining_node/src/stratum/stratum_types/submit_params.rs new file mode 100644 index 0000000000..526bc119b4 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/submit_params.rs @@ -0,0 +1,31 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct SubmitParams { + pub id: String, + pub job_id: u64, + pub nonce: u64, + pub hash: String, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/worker_identifier.rs b/applications/tari_mining_node/src/stratum/stratum_types/worker_identifier.rs new file mode 100644 index 0000000000..9fcbbdf95a --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/worker_identifier.rs @@ -0,0 +1,28 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct WorkerIdentifier { + pub id: String, +} diff --git a/applications/tari_mining_node/src/stratum/stratum_types/worker_status.rs b/applications/tari_mining_node/src/stratum/stratum_types/worker_status.rs new file mode 100644 index 0000000000..142e3fcb67 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stratum_types/worker_status.rs @@ -0,0 +1,33 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug)] +pub struct WorkerStatus { + pub id: String, + pub height: u64, + pub difficulty: u64, + pub accepted: u64, + pub rejected: u64, + pub stale: u64, +} diff --git a/applications/tari_mining_node/src/stratum/stream.rs b/applications/tari_mining_node/src/stratum/stream.rs new file mode 100644 index 0000000000..91902aa040 --- /dev/null +++ b/applications/tari_mining_node/src/stratum/stream.rs @@ -0,0 +1,133 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +use crate::stratum::error::Error; +use bufstream::BufStream; +use native_tls::{TlsConnector, TlsStream}; +use std::{ + self, + io::{self, BufRead, Read, Write}, + net::TcpStream, +}; + +pub(crate) struct Stream { + stream: Option>, + tls_stream: Option>>, +} + +impl Stream { + pub fn new() -> Stream { + Stream { + stream: None, + tls_stream: None, + } + } + + pub fn try_connect(&mut self, server_url: &str, tls: Option) -> Result<(), Error> { + match TcpStream::connect(server_url) { + Ok(conn) => { + if tls.is_some() && tls.unwrap() { + let connector = TlsConnector::new() + .map_err(|e| Error::ConnectionError(format!("Can't create TLS connector: {:?}", e)))?; + let url_port: Vec<&str> = server_url.split(':').collect(); + let split_url: Vec<&str> = url_port[0].split('.').collect(); + let base_host = format!("{}.{}", split_url[split_url.len() - 2], split_url[split_url.len() - 1]); + let mut stream = connector + .connect(&base_host, conn) + .map_err(|e| Error::ConnectionError(format!("Can't establish TLS connection: {:?}", e)))?; + stream + .get_mut() + .set_nonblocking(true) + .map_err(|e| Error::ConnectionError(format!("Can't switch to nonblocking mode: {:?}", e)))?; + self.tls_stream = Some(BufStream::new(stream)); + } else { + conn.set_nonblocking(true) + .map_err(|e| Error::ConnectionError(format!("Can't switch to nonblocking mode: {:?}", e)))?; + self.stream = Some(BufStream::new(conn)); + } + Ok(()) + }, + Err(e) => Err(Error::ConnectionError(format!("{}", e))), + } + } +} + +impl Write for Stream { + fn write(&mut self, b: &[u8]) -> Result { + if self.tls_stream.is_some() { + self.tls_stream.as_mut().unwrap().write(b) + } else { + self.stream.as_mut().unwrap().write(b) + } + } + + fn flush(&mut self) -> Result<(), std::io::Error> { + if self.tls_stream.is_some() { + self.tls_stream.as_mut().unwrap().flush() + } else { + self.stream.as_mut().unwrap().flush() + } + } +} +impl Read for Stream { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + if self.tls_stream.is_some() { + self.tls_stream.as_mut().unwrap().read(buf) + } else { + self.stream.as_mut().unwrap().read(buf) + } + } +} + +impl BufRead for Stream { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + if self.tls_stream.is_some() { + self.tls_stream.as_mut().unwrap().fill_buf() + } else { + self.stream.as_mut().unwrap().fill_buf() + } + } + + fn consume(&mut self, amt: usize) { + if self.tls_stream.is_some() { + self.tls_stream.as_mut().unwrap().consume(amt) + } else { + self.stream.as_mut().unwrap().consume(amt) + } + } + + fn read_until(&mut self, byte: u8, buf: &mut Vec) -> io::Result { + if self.tls_stream.is_some() { + self.tls_stream.as_mut().unwrap().read_until(byte, buf) + } else { + self.stream.as_mut().unwrap().read_until(byte, buf) + } + } + + fn read_line(&mut self, string: &mut String) -> io::Result { + if self.tls_stream.is_some() { + self.tls_stream.as_mut().unwrap().read_line(string) + } else { + self.stream.as_mut().unwrap().read_line(string) + } + } +} diff --git a/applications/tari_stratum_transcoder/Cargo.toml b/applications/tari_stratum_transcoder/Cargo.toml new file mode 100644 index 0000000000..29f95c82da --- /dev/null +++ b/applications/tari_stratum_transcoder/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "tari_stratum_transcoder" +authors = ["The Tari Development Community"] +description = "The tari stratum transcoder for miningcore" +repository = "https://github.com/tari-project/tari" +license = "BSD-3-Clause" +version = "0.9.0" +edition = "2018" + +[features] +default = [] +envlog = ["env_logger"] + +[dependencies] +tari_app_grpc = { path = "../tari_app_grpc" } +tari_common = { path = "../../common" } +tari_core = { path = "../../base_layer/core", default-features = false, features = ["transactions"]} +tari_crypto = "0.11.1" +tari_utilities = "^0.3" +bincode = "1.3.1" +bytes = "0.5.6" +chrono = "0.4.19" +config = { version = "0.9.3" } +derive-error = "0.0.4" +env_logger = { version = "0.7.1", optional = true } +futures = "0.3.5" +hex = "0.4.2" +hyper = "0.13.7" +jsonrpc = "0.11.0" +log = { version = "0.4.8", features = ["std"] } +rand = "0.7.2" +reqwest = {version = "0.10.8", features=["json"]} +serde = { version="1.0.106", features = ["derive"] } +serde_json = "1.0.57" +structopt = { version = "0.3.13", default_features = false } +thiserror = "1.0.15" +tokio = "0.2.10" +tokio-macros = "0.2.5" +tonic = "0.2" +tracing = "0.1" +tracing-futures = "0.2" +tracing-subscriber = "0.2" +url = "2.1.1" + +[build-dependencies] +tonic-build = "0.2" + +[dev-dependencies] +futures-test = "0.3.5" diff --git a/applications/tari_stratum_transcoder/src/common/json_rpc.rs b/applications/tari_stratum_transcoder/src/common/json_rpc.rs new file mode 100644 index 0000000000..976b421b61 --- /dev/null +++ b/applications/tari_stratum_transcoder/src/common/json_rpc.rs @@ -0,0 +1,121 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::error::StratumTranscoderProxyError; +use json::json; +use serde_json as json; +use tari_app_grpc::tari_rpc as grpc; +use tari_utilities::hex::Hex; + +/// Create a standard JSON RPC error response +/// More info: https://www.jsonrpc.org/specification#error_object +pub fn standard_error_response( + req_id: Option, + err: jsonrpc::error::StandardError, + data: Option, +) -> json::Value { + let err = jsonrpc::error::standard_error(err, data); + json!({ + "id": req_id.unwrap_or(-1), + "jsonrpc": "2.0", + "error": err, + }) +} + +/// Create a JSON RPC success response +/// More info: https://www.jsonrpc.org/specification#response_object +pub fn success_response(req_id: Option, result: json::Value) -> json::Value { + json!({ + "id": req_id.unwrap_or(-1), + "jsonrpc": "2.0", + "result": result, + }) +} + +/// Create a JSON RPC error response +/// More info: https://www.jsonrpc.org/specification#error_object +pub fn error_response( + req_id: Option, + err_code: i32, + err_message: &str, + err_data: Option, +) -> json::Value { + let mut err = json!({ + "code": err_code, + "message": err_message, + }); + + if let Some(d) = err_data { + err["data"] = d; + } + + json!({ + "id": req_id.unwrap_or(-1), + "jsonrpc": "2.0", + "error": err + }) +} + +/// Convert a BlockHeaderResponse into a JSON response +pub(crate) fn try_into_json_block_header_response( + header: grpc::BlockHeaderResponse, + request_id: Option, +) -> Result { + let grpc::BlockHeaderResponse { + header, + reward, + confirmations, + difficulty, + num_transactions, + } = header; + let header = header.ok_or_else(|| { + StratumTranscoderProxyError::UnexpectedTariBaseNodeResponse( + "Base node GRPC returned an empty header field when calling get_header_by_hash".into(), + ) + })?; + + let blockheader = json!({ + "block_size": 0, // TODO + "depth": confirmations, + "difficulty": difficulty, + "hash": header.hash.to_hex(), + "height": header.height, + "major_version": header.version, + "minor_version": 0, + "nonce": header.nonce, + "num_txes": num_transactions, + // Cannot be an orphan + "orphan_status": false, + "prev_hash": header.prev_hash.to_hex(), + "reward": reward, + "timestamp": header.timestamp.map(|ts| ts.seconds.into()).unwrap_or_else(|| json!(null)), + }); + + Ok(json!({ + "id": request_id.unwrap_or(-1), + "jsonrpc": "2.0", + "result": { + "blockheader": blockheader.as_object().unwrap(), + }, + "status": "OK", + })) +} diff --git a/applications/tari_stratum_transcoder/src/common/mining.rs b/applications/tari_stratum_transcoder/src/common/mining.rs new file mode 100644 index 0000000000..bf4a714b2b --- /dev/null +++ b/applications/tari_stratum_transcoder/src/common/mining.rs @@ -0,0 +1,54 @@ +// Copyright 2020, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::error::StratumTranscoderProxyError; +use std::convert::TryFrom; +use tari_app_grpc::tari_rpc as grpc; +use tari_core::{ + blocks::NewBlockTemplate, + transactions::transaction::{TransactionKernel, TransactionOutput}, +}; + +pub fn add_coinbase( + coinbase: Option, + mut block: NewBlockTemplate, +) -> Result { + if let Some(tx) = coinbase { + let output = TransactionOutput::try_from(tx.clone().body.unwrap().outputs[0].clone()) + .map_err(StratumTranscoderProxyError::MissingDataError)?; + let kernel = TransactionKernel::try_from(tx.body.unwrap().kernels[0].clone()) + .map_err(StratumTranscoderProxyError::MissingDataError)?; + block.body.add_output(output); + block.body.add_kernel(kernel); + let template = grpc::NewBlockTemplate::try_from(block); + match template { + Ok(template) => Ok(template), + Err(_e) => Err(StratumTranscoderProxyError::MissingDataError( + "Template Invalid".to_string(), + )), + } + } else { + Err(StratumTranscoderProxyError::MissingDataError( + "Coinbase Invalid".to_string(), + )) + } +} diff --git a/applications/tari_stratum_transcoder/src/common/mod.rs b/applications/tari_stratum_transcoder/src/common/mod.rs new file mode 100644 index 0000000000..061292e794 --- /dev/null +++ b/applications/tari_stratum_transcoder/src/common/mod.rs @@ -0,0 +1,25 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +pub mod json_rpc; +pub mod mining; +pub mod proxy; diff --git a/applications/tari_stratum_transcoder/src/common/proxy.rs b/applications/tari_stratum_transcoder/src/common/proxy.rs new file mode 100644 index 0000000000..70276e635d --- /dev/null +++ b/applications/tari_stratum_transcoder/src/common/proxy.rs @@ -0,0 +1,64 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::error::StratumTranscoderProxyError; +use bytes::BytesMut; +use futures::StreamExt; +use hyper::{header, http::response, Body, Response, StatusCode}; +use serde_json as json; +use std::convert::TryInto; + +pub fn json_response(status: StatusCode, body: &json::Value) -> Result, StratumTranscoderProxyError> { + let body_str = json::to_string(body)?; + Response::builder() + .header(header::CONTENT_TYPE, "application/json".to_string()) + .header(header::CONTENT_LENGTH, body_str.len()) + .status(status) + .body(body_str.into()) + .map_err(Into::into) +} + +pub fn into_response(mut parts: response::Parts, content: &json::Value) -> Response { + let resp = json::to_string(content).expect("json::to_string cannot fail when stringifying a json::Value"); + // Ensure that the content length header is correct + parts.headers.insert(header::CONTENT_LENGTH, resp.len().into()); + parts + .headers + .insert(header::CONTENT_TYPE, "application/json".try_into().unwrap()); + Response::from_parts(parts, resp.into()) +} + +pub fn into_body_from_response(resp: Response) -> Response { + let (parts, body) = resp.into_parts(); + into_response(parts, &body) +} + +/// Reads the `Body` until there is no more to read +pub async fn read_body_until_end(body: &mut Body) -> Result { + // TODO: Perhaps there is a more efficient way to do this + let mut bytes = BytesMut::new(); + while let Some(data) = body.next().await { + let data = data?; + bytes.extend(data); + } + Ok(bytes) +} diff --git a/applications/tari_stratum_transcoder/src/error.rs b/applications/tari_stratum_transcoder/src/error.rs new file mode 100644 index 0000000000..67094cdc92 --- /dev/null +++ b/applications/tari_stratum_transcoder/src/error.rs @@ -0,0 +1,77 @@ +// Copyright 2020. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use hex::FromHexError; +use std::io; +use tari_common::{ConfigError, ConfigurationError}; +use tari_core::transactions::CoinbaseBuildError; +use thiserror::Error; +use tonic::transport; + +#[derive(Debug, Error)] +pub enum StratumTranscoderProxyError { + #[error("Configuration error: {0}")] + ConfigurationError(#[from] ConfigurationError), + #[error("Configuration error: {0}")] + ConfigError(#[from] ConfigError), + #[error("Reqwest error: {0}")] + ReqwestError(#[from] reqwest::Error), + #[error("Missing data:{0}")] + MissingDataError(String), + #[error("An IO error occurred: {0}")] + IoError(#[from] io::Error), + #[error("Tonic transport error: {0}")] + TonicTransportError(#[from] transport::Error), + #[error("GRPC response did not contain the expected field: `{0}`")] + GrpcResponseMissingField(&'static str), + #[error("Hyper error: {0}")] + HyperError(#[from] hyper::Error), + #[error("GRPC request failed with `{status}` {details}")] + GrpcRequestError { + #[source] + status: tonic::Status, + details: String, + }, + #[error("HTTP error: {0}")] + HttpError(#[from] hyper::http::Error), + #[error("Could not parse URL: {0}")] + UrlParseError(#[from] url::ParseError), + #[error("Bincode error: {0}")] + BincodeError(#[from] bincode::Error), + #[error("JSON error: {0}")] + JsonError(#[from] serde_json::Error), + #[error("Hex error: {0}")] + HexError(#[from] FromHexError), + #[error("Coinbase builder error: {0}")] + CoinbaseBuilderError(#[from] CoinbaseBuildError), + #[error("Unexpected Tari base node response: {0}")] + UnexpectedTariBaseNodeResponse(String), +} + +impl From for StratumTranscoderProxyError { + fn from(status: tonic::Status) -> Self { + Self::GrpcRequestError { + details: String::from_utf8_lossy(status.details()).to_string(), + status, + } + } +} diff --git a/applications/tari_stratum_transcoder/src/main.rs b/applications/tari_stratum_transcoder/src/main.rs new file mode 100644 index 0000000000..d55d551b5b --- /dev/null +++ b/applications/tari_stratum_transcoder/src/main.rs @@ -0,0 +1,100 @@ +// Copyright 2020. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#![cfg_attr(not(debug_assertions), deny(unused_variables))] +#![cfg_attr(not(debug_assertions), deny(unused_imports))] +#![cfg_attr(not(debug_assertions), deny(dead_code))] +#![cfg_attr(not(debug_assertions), deny(unused_extern_crates))] +#![deny(unused_must_use)] +#![deny(unreachable_patterns)] +#![deny(unknown_lints)] + +mod common; +mod error; +mod proxy; + +use crate::error::StratumTranscoderProxyError; +use futures::future; +use hyper::{service::make_service_fn, Server}; +use proxy::{StratumTranscoderProxyConfig, StratumTranscoderProxyService}; +use std::convert::Infallible; +use structopt::StructOpt; +use tari_app_grpc::tari_rpc as grpc; +use tari_common::{configuration::bootstrap::ApplicationType, ConfigBootstrap, GlobalConfig}; +use tokio::time::Duration; + +#[tokio_macros::main] +async fn main() -> Result<(), StratumTranscoderProxyError> { + let config = initialize()?; + + let config = StratumTranscoderProxyConfig::from(config); + let addr = config.transcoder_host_address; + let client = reqwest::Client::builder() + .connect_timeout(Duration::from_secs(5)) + .timeout(Duration::from_secs(10)) + .pool_max_idle_per_host(25) + .build() + .map_err(StratumTranscoderProxyError::ReqwestError)?; + let base_node_client = + grpc::base_node_client::BaseNodeClient::connect(format!("http://{}", config.grpc_base_node_address)).await?; + let wallet_client = + grpc::wallet_client::WalletClient::connect(format!("http://{}", config.grpc_console_wallet_address)).await?; + let miningcore_service = StratumTranscoderProxyService::new(config, client, base_node_client, wallet_client); + let service = make_service_fn(|_conn| future::ready(Result::<_, Infallible>::Ok(miningcore_service.clone()))); + + match Server::try_bind(&addr) { + Ok(builder) => { + println!("Listening on {}...", addr); + builder.serve(service).await?; + Ok(()) + }, + Err(err) => { + println!("Fatal: Cannot bind to '{}'.", addr); + println!("It may be part of a Port Exclusion Range. Please try to use another port for the"); + println!("'proxy_host_address' in 'config/config.toml' and for the applicable XMRig '[pools][url]' or"); + println!("[pools][self-select]' config setting that can be found in 'config/xmrig_config_***.json' or"); + println!("'/config.json'."); + println!(); + Err(err.into()) + }, + } +} + +/// Loads the configuration and sets up logging +fn initialize() -> Result { + // Parse and validate command-line arguments + let mut bootstrap = ConfigBootstrap::from_args(); + // Check and initialize configuration files + let application_type = ApplicationType::StratumTranscoder; + bootstrap.init_dirs(application_type)?; + + // Load and apply configuration file + let cfg = bootstrap.load_configuration()?; + + #[cfg(feature = "envlog")] + let _ = env_logger::try_init(); + // Initialise the logger + #[cfg(not(feature = "envlog"))] + bootstrap.initialize_logging()?; + + let cfg = GlobalConfig::convert_from(application_type, cfg)?; + Ok(cfg) +} diff --git a/applications/tari_stratum_transcoder/src/proxy.rs b/applications/tari_stratum_transcoder/src/proxy.rs new file mode 100644 index 0000000000..6e5d508a53 --- /dev/null +++ b/applications/tari_stratum_transcoder/src/proxy.rs @@ -0,0 +1,616 @@ +// Copyright 2020, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + common::{ + json_rpc, + json_rpc::{standard_error_response, try_into_json_block_header_response}, + mining, + proxy, + }, + error::StratumTranscoderProxyError, +}; +use bytes::Bytes; +use hyper::{service::Service, Body, Method, Request, Response, StatusCode}; +use json::json; +use jsonrpc::error::StandardError; +use serde_json as json; +use std::{ + convert::TryFrom, + future::Future, + net::SocketAddr, + pin::Pin, + task::{Context, Poll}, + time::Instant, +}; +use tari_app_grpc::{tari_rpc as grpc, tari_rpc::GetCoinbaseRequest}; +use tari_common::{configuration::Network, GlobalConfig}; +use tari_core::blocks::{Block, NewBlockTemplate}; +use tari_utilities::{hex::Hex, message_format::MessageFormat}; +use tracing::{debug, error}; + +const LOG_TARGET: &str = "tari_stratum_transcoder::transcoder"; + +#[derive(Debug, Clone)] +pub struct StratumTranscoderProxyConfig { + pub network: Network, + pub grpc_base_node_address: SocketAddr, + pub grpc_console_wallet_address: SocketAddr, + pub transcoder_host_address: SocketAddr, +} + +impl From for StratumTranscoderProxyConfig { + fn from(config: GlobalConfig) -> Self { + Self { + network: config.network, + grpc_base_node_address: config.grpc_base_node_address, + grpc_console_wallet_address: config.grpc_console_wallet_address, + transcoder_host_address: config.transcoder_host_address, + } + } +} + +#[derive(Debug, Clone)] +pub struct StratumTranscoderProxyService { + inner: InnerService, +} + +impl StratumTranscoderProxyService { + pub fn new( + config: StratumTranscoderProxyConfig, + http_client: reqwest::Client, + base_node_client: grpc::base_node_client::BaseNodeClient, + wallet_client: grpc::wallet_client::WalletClient, + ) -> Self { + Self { + inner: InnerService { + config, + http_client, + base_node_client, + wallet_client, + }, + } + } +} + +#[allow(clippy::type_complexity)] +impl Service> for StratumTranscoderProxyService { + type Error = hyper::Error; + type Future = Pin> + Send>>; + type Response = Response; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + let inner = self.inner.clone(); + let future = async move { + match inner.handle(req).await { + Ok(resp) => Ok(resp), + Err(err) => { + error!(target: LOG_TARGET, "Error handling request: {}", err); + + Ok(proxy::json_response( + StatusCode::INTERNAL_SERVER_ERROR, + &json_rpc::standard_error_response( + None, + StandardError::InternalError, + Some(json!({"details": err.to_string()})), + ), + ) + .expect("unexpected failure")) + }, + } + }; + + Box::pin(future) + } +} + +#[derive(Debug, Clone)] +struct InnerService { + config: StratumTranscoderProxyConfig, + http_client: reqwest::Client, + base_node_client: grpc::base_node_client::BaseNodeClient, + wallet_client: grpc::wallet_client::WalletClient, +} + +impl InnerService { + async fn handle_get_info(&self) -> Result, StratumTranscoderProxyError> { + let mut client = self.base_node_client.clone(); + let tip_info = client.get_tip_info(grpc::Empty {}).await?.into_inner(); + let consensus_constants = client.get_constants(grpc::Empty {}).await?.into_inner(); + let sync_info = client.get_sync_info(grpc::Empty {}).await?.into_inner(); + let info_json; + match tip_info.metadata { + Some(metadata) => { + info_json = json!({ + "jsonrpc": "2.0", + "result": { + "blockchain_version": consensus_constants.blockchain_version, + "min_diff": consensus_constants.min_blake_pow_difficulty, + "lock_height": consensus_constants.coinbase_lock_height, + "max_block_interval": consensus_constants.difficulty_max_block_interval, + "max_weight": consensus_constants.max_block_transaction_weight, + "height_of_longest_chain": metadata.height_of_longest_chain, + "best_block": metadata.best_block.to_hex(), + "local_height": sync_info.local_height, + "tip_height": sync_info.tip_height, + "initial_sync_achieved": tip_info.initial_sync_achieved, + } + }) + }, + None => { + return Err(StratumTranscoderProxyError::UnexpectedTariBaseNodeResponse( + "Base node GRPC returned empty metadata when calling tip_info".into(), + )) + }, + } + proxy::json_response(StatusCode::OK, &info_json) + } + + async fn handle_get_block_template( + &self, + request: Request, + ) -> Result, StratumTranscoderProxyError> { + let request = request.body(); + let request_id = request["id"].as_i64(); + let mut grpc_client = self.base_node_client.clone(); + + let grpc::NewBlockTemplateResponse { + miner_data, + new_block_template, + initial_sync_achieved: _, + } = grpc_client + .get_new_block_template(grpc::NewBlockTemplateRequest { + algo: Some(grpc::PowAlgo { + pow_algo: grpc::pow_algo::PowAlgos::Sha3.into(), + }), + max_weight: 0, + }) + .await + .map_err(|status| StratumTranscoderProxyError::GrpcRequestError { + status, + details: "failed to get new block template".to_string(), + })? + .into_inner(); + + let miner_data = miner_data.ok_or(StratumTranscoderProxyError::GrpcResponseMissingField("miner_data"))?; + let new_block_template = new_block_template.ok_or(StratumTranscoderProxyError::GrpcResponseMissingField( + "new_block_template", + ))?; + + let block_reward = miner_data.reward; + let total_fees = miner_data.total_fees; + let tari_difficulty = miner_data.target_difficulty; + + let template_block = NewBlockTemplate::try_from(new_block_template) + .map_err(|e| StratumTranscoderProxyError::MissingDataError(format!("GRPC Conversion Error: {}", e)))?; + let tari_height = template_block.header.height; + + let mut grpc_wallet_client = self.wallet_client.clone(); + let coinbase_response = grpc_wallet_client + .get_coinbase(GetCoinbaseRequest { + reward: block_reward, + fee: total_fees, + height: tari_height, + }) + .await + .map_err(|status| StratumTranscoderProxyError::GrpcRequestError { + status, + details: "failed to get new block template".to_string(), + })?; + let coinbase_transaction = coinbase_response.into_inner().transaction; + + let coinbased_block = mining::add_coinbase(coinbase_transaction, template_block)?; + + let block = grpc_client + .get_new_block(coinbased_block) + .await + .map_err(|status| StratumTranscoderProxyError::GrpcRequestError { + status, + details: "failed to get new block".to_string(), + })? + .into_inner(); + + let tari_block = Block::try_from( + block + .block + .ok_or_else(|| StratumTranscoderProxyError::MissingDataError("Tari block".to_string()))?, + ) + .map_err(StratumTranscoderProxyError::MissingDataError)?; + + let tari_header = tari_block.header.clone(); + let tari_prev_hash = tari_header.prev_hash.to_hex(); + + // todo remove unwraps + let header_hex = hex::encode(tari_header.to_json().unwrap()); + let block_hex = hex::encode(tari_block.to_json().unwrap()); + + let template_json = json!({ + "id": request_id.unwrap_or(-1), + "jsonrpc": "2.0", + "result": { + "blockheader_blob": header_hex, + "blocktemplate_blob": block_hex, + "difficulty" : tari_difficulty, + "height" : tari_height, + "expected_reward": block_reward+total_fees, + "prev_hash": tari_prev_hash, + } + }); + + proxy::json_response(StatusCode::OK, &template_json) + } + + async fn handle_submit_block( + &self, + request: Request, + ) -> Result, StratumTranscoderProxyError> { + let request = request.body(); + let params = match request["params"].as_array() { + Some(v) => v, + None => { + return proxy::json_response( + StatusCode::OK, + &json_rpc::error_response( + request["id"].as_i64(), + 1, + "`params` field is empty or an invalid type for submit block request. Expected an array.", + None, + ), + ) + }, + }; + let mut json_response: Result, StratumTranscoderProxyError> = proxy::json_response( + StatusCode::OK, + &json_rpc::error_response(request["id"].as_i64(), 2, "No block", None), + ); + for param in params.iter().filter_map(|p| p.as_str()) { + let block_hex = hex::decode(param); + match block_hex { + Ok(block_hex) => { + let block: Result = + serde_json::from_str(&String::from_utf8_lossy(&block_hex).to_string()); + match block { + Ok(block) => { + let mut client = self.base_node_client.clone(); + let grpc_block: tari_app_grpc::tari_rpc::Block = block.into(); + match client.submit_block(grpc_block).await { + Ok(_) => { + json_response = proxy::json_response( + StatusCode::OK, + &json_rpc::success_response( + request["id"].as_i64(), + json!({ "status": "OK", "untrusted": false }), + ), + ) + }, + Err(_) => { + json_response = proxy::json_response( + StatusCode::OK, + &json_rpc::error_response( + request["id"].as_i64(), + 3, + "Block not accepted", + None, + ), + ) + }, + } + }, + Err(_) => { + json_response = proxy::json_response( + StatusCode::OK, + &json_rpc::error_response(request["id"].as_i64(), 4, "Invalid Block", None), + ) + }, + } + }, + Err(_) => { + json_response = proxy::json_response( + StatusCode::OK, + &json_rpc::error_response(request["id"].as_i64(), 5, "Invalid Hex", None), + ) + }, + } + } + json_response + } + + async fn handle_get_block_header_by_height( + &self, + request: Request, + ) -> Result, StratumTranscoderProxyError> { + let request = request.into_body(); + let mut height = request["params"]["height"].as_u64().unwrap_or(0); + // bug for height = 0 (genesis block), streams indefinitely + if height == 0 { + height = 1; + } + let mut client = self.base_node_client.clone(); + let mut resp = client + .get_blocks(grpc::GetBlocksRequest { heights: vec![height] }) + .await? + .into_inner(); + let message = resp.message().await?; + resp.trailers().await?; // drain stream + // todo: remove unwraps + let resp = client + .get_header_by_hash(grpc::GetHeaderByHashRequest { + hash: message.unwrap().block.unwrap().header.unwrap().hash, + }) + .await; + match resp { + Ok(resp) => { + let json_response = try_into_json_block_header_response(resp.into_inner(), request["id"].as_i64())?; + proxy::json_response(StatusCode::OK, &json_response) + }, + Err(err) if err.code() == tonic::Code::NotFound => proxy::json_response( + StatusCode::OK, + &json_rpc::error_response(request["id"].as_i64(), 5, "Not found", None), + ), + Err(err) => Err(StratumTranscoderProxyError::GrpcRequestError { + status: err, + details: "failed to get header by height".to_string(), + }), + } + } + + async fn handle_get_block_header_by_hash( + &self, + request: Request, + ) -> Result, StratumTranscoderProxyError> { + let request = request.into_body(); + let hash = request["hash"] + .as_str() + .ok_or("hash parameter is not a string") + .and_then(|hash| hex::decode(hash).map_err(|_| "hash parameter is not a valid hex value")); + let hash = match hash { + Ok(hash) => hash, + Err(err) => { + return proxy::json_response( + StatusCode::OK, + &json_rpc::error_response(request["id"].as_i64(), -1, err, None), + ) + }, + }; + + let mut client = self.base_node_client.clone(); + let resp = client + .get_header_by_hash(grpc::GetHeaderByHashRequest { hash: hash.clone() }) + .await; + match resp { + Ok(resp) => { + let json_response = try_into_json_block_header_response(resp.into_inner(), request["id"].as_i64())?; + + debug!( + target: LOG_TARGET, + "[get_header_by_hash] Found tari block header with hash `{:?}`", + hash.clone() + ); + + proxy::json_response(StatusCode::OK, &json_response) + }, + Err(err) if err.code() == tonic::Code::NotFound => { + debug!( + target: LOG_TARGET, + "[get_header_by_hash] No tari block header found with hash `{:?}`", hash + ); + proxy::json_response( + StatusCode::OK, + &json_rpc::error_response(request["id"].as_i64(), 5, "Not found", None), + ) + }, + Err(err) => Err(StratumTranscoderProxyError::GrpcRequestError { + status: err, + details: "failed to get header by hash".to_string(), + }), + } + } + + async fn handle_get_last_block_header( + &self, + request: Request, + ) -> Result, StratumTranscoderProxyError> { + let request = request.into_body(); + let mut client = self.base_node_client.clone(); + let tip_info = client.get_tip_info(grpc::Empty {}).await?; + let tip_info = tip_info.into_inner(); + let chain_metadata = tip_info.metadata.ok_or_else(|| { + StratumTranscoderProxyError::UnexpectedTariBaseNodeResponse( + "get_tip_info returned no chain metadata".into(), + ) + })?; + + let tip_header = client + .get_header_by_hash(grpc::GetHeaderByHashRequest { + hash: chain_metadata.best_block, + }) + .await?; + + let tip_header = tip_header.into_inner(); + let json_response = try_into_json_block_header_response(tip_header, request["id"].as_i64())?; + proxy::json_response(StatusCode::OK, &json_response) + } + + async fn handle_get_balance( + &self, + request: Request, + ) -> Result, StratumTranscoderProxyError> { + let request = request.body(); + let request_id = request["id"].as_i64(); + let mut client = self.wallet_client.clone(); + let balances = client.get_balance(grpc::GetBalanceRequest {}).await?.into_inner(); + + let json_response = json!({ + "id": request_id.unwrap_or(-1), + "jsonrpc": "2.0", + "result": { + "available_balance": balances.available_balance, + "pending_incoming_balance": balances.pending_incoming_balance, + "pending_outgoing_balance": balances.pending_outgoing_balance, + } + }); + proxy::json_response(StatusCode::OK, &json_response) + } + + async fn handle_transfer( + &self, + request: Request, + ) -> Result, StratumTranscoderProxyError> { + let request = request.body(); + let recipients = match request["params"]["recipients"].as_array() { + Some(v) => v, + None => { + return proxy::json_response( + StatusCode::OK, + &json_rpc::error_response( + request["id"].as_i64(), + 1, + "`recipients` field is empty or an invalid type for transfer request. Expected an array.", + None, + ), + ) + }, + }; + + let mut grpc_payments = Vec::new(); + + for recipient in recipients.iter() { + grpc_payments.push(grpc::PaymentRecipient { + address: recipient["address"].as_str().unwrap().to_string(), + amount: recipient["amount"].as_u64().unwrap(), + fee_per_gram: recipient["fee_per_gram"].as_u64().unwrap(), + message: recipient["message"].as_str().unwrap().to_string(), + payment_type: 1, + }); + } + + let mut client = self.wallet_client.clone(); + let transfer_results = client + .transfer(grpc::TransferRequest { + recipients: grpc_payments, + }) + .await? + .into_inner(); + let transaction_results = &transfer_results.results; + + let mut results = Vec::new(); + for transaction_result in transaction_results.iter() { + let result = json!({ + "address": transaction_result.address, + "transaction_id": transaction_result.transaction_id, + "is_success": transaction_result.is_success, + "failure_message": transaction_result.failure_message, + }); + results.push(result.as_object().unwrap().clone()); + } + let json_response = json!({ + "jsonrpc": "2.0", + "result": {"transaction_results" : results}, + }); + proxy::json_response(StatusCode::OK, &json_response) + } + + async fn get_proxy_response(&self, request: Request) -> Result, StratumTranscoderProxyError> { + let mut proxy_resp = Response::new(standard_error_response(Some(-1), StandardError::MethodNotFound, None)); + match request.method().clone() { + Method::GET => match request.uri().path() { + "/get_info" | "/getinfo" => self.handle_get_info().await, + _ => Ok(proxy::into_body_from_response(proxy_resp)), + }, + Method::POST => { + let json = json::from_slice::(request.body())?; + let request = request.map(move |_| json); + match request.body()["method"].as_str().unwrap_or_default() { + "get_info" | "getinfo" => self.handle_get_info().await, + "submitblock" | "submit_block" => self.handle_submit_block(request).await, + "getblocktemplate" | "get_block_template" => self.handle_get_block_template(request).await, + "getblockheaderbyhash" | "get_block_header_by_hash" => { + self.handle_get_block_header_by_hash(request).await + }, + "getblockheaderbyheight" | "get_block_header_by_height" => { + self.handle_get_block_header_by_height(request).await + }, + "getlastblockheader" | "get_last_block_header" => self.handle_get_last_block_header(request).await, + "transfer" => self.handle_transfer(request).await, + "getbalance" | "get_balance" => self.handle_get_balance(request).await, + _ => { + let request = request.body(); + proxy_resp = Response::new(standard_error_response( + request["id"].as_i64(), + StandardError::MethodNotFound, + None, + )); + Ok(proxy::into_body_from_response(proxy_resp)) + }, + } + }, + // Simply return the response "as is" + _ => Ok(proxy::into_body_from_response(proxy_resp)), + } + } + + async fn handle(self, mut request: Request) -> Result, StratumTranscoderProxyError> { + let start = Instant::now(); + let bytes = proxy::read_body_until_end(request.body_mut()).await?; + let request = request.map(|_| bytes.freeze()); + let method_name; + match *request.method() { + Method::GET => { + let mut chars = request.uri().path().chars(); + chars.next(); + method_name = chars.as_str().to_string(); + }, + Method::POST => { + let json = json::from_slice::(request.body()).unwrap_or_default(); + method_name = str::replace(json["method"].as_str().unwrap_or_default(), "\"", ""); + }, + _ => { + method_name = "unsupported".to_string(); + }, + } + + debug!( + target: LOG_TARGET, + "request: {} ({})", + String::from_utf8_lossy(&request.body().clone()[..]), + request + .headers() + .iter() + .map(|(k, v)| format!("{}={}", k, String::from_utf8_lossy(v.as_ref()))) + .collect::>() + .join(","), + ); + + let response = self.get_proxy_response(request).await?; + println!( + "Method: {}, Proxy Status: {}, Response Time: {}ms", + method_name, + response.status(), + start.elapsed().as_millis() + ); + Ok(response) + } +} diff --git a/applications/test_faucet/Cargo.toml b/applications/test_faucet/Cargo.toml index e0ca29ae7d..3ef3c8a4c1 100644 --- a/applications/test_faucet/Cargo.toml +++ b/applications/test_faucet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test_faucet" -version = "0.9.1" +version = "0.9.5" authors = ["The Tari Development Community"] edition = "2018" diff --git a/applications/test_faucet/src/main.rs b/applications/test_faucet/src/main.rs index 3563a3a874..0aadee4d5e 100644 --- a/applications/test_faucet/src/main.rs +++ b/applications/test_faucet/src/main.rs @@ -63,7 +63,7 @@ async fn main() -> Result<(), Box> { task::spawn(async move { let result = task::spawn_blocking(move || { let script = script!(Nop); - let (utxo, key, _) = helpers::create_utxo(value, &fc, Some(feature), &script); + let (utxo, key, _) = helpers::create_utxo(value, &fc, feature, &script); print!("."); (utxo, key, value) }) diff --git a/applications/utils/automatic_recovery_test.js b/applications/utils/automatic_recovery_test.js deleted file mode 100644 index 76d257bfaa..0000000000 --- a/applications/utils/automatic_recovery_test.js +++ /dev/null @@ -1,85 +0,0 @@ -const WalletProcess = require("../../integration_tests/helpers/walletProcess"); -const WalletClient = require("../../integration_tests/helpers/walletClient"); - -const fs = require("fs"); -const yargs = require("yargs"); - -async function main() { - const argv = yargs - .option("sw", { - alias: "seed-words", - description: "Seed words to use during recovery", - type: "string", - default: - "pigeon marble letter canal hard close kit cash coin still melt random require long shaft antenna tent turkey neck divert enrich iron analyst abandon", - }) - .help() - .alias("help", "h").argv; - - const wallet = new WalletProcess( - "compile", - true, - { - transport: "tor", - network: "weatherwax", - grpc_console_wallet_address: "127.0.0.1:18111", - }, - false, - argv.seedWords - ); - - await wallet.startNew(); - - let startTime = new Date(); - - let recoveryPromise = new Promise((resolve) => { - wallet.ps.stdout.on("data", (data) => { - let height = data - .toString() - .match("Recovery\\ complete!\\ Scanned\\ =\\ (\\d+)\\ in"); - let recovered_ut = data.toString().match("worth\\ (\\d+)\\ µT"); - if (height && recovered_ut) { - resolve({ - height: parseInt(height[1]), - recoveredAmount: parseInt(recovered_ut[1]), - }); - } else if (data.toString().match("Failed to sync. Attempt 10 of 10")) { - resolve(false); - } - }); - }); - - let height_amount = await recoveryPromise; - - let endTime = new Date(); - const timeDiffMs = endTime - startTime; - const timeDiffMinutes = timeDiffMs / 60000; - - let walletClient = new WalletClient("127.0.0.1:18111"); - let id = await walletClient.identify(); - - wallet.stop(); - - if (height_amount) { - const block_rate = height_amount.height / timeDiffMinutes; - console.log( - "Wallet (Pubkey:", - id.public_key, - ") recovered to a block height of", - height_amount.height, - "completed in", - timeDiffMinutes.toFixed(2), - "minutes (", - block_rate.toFixed(2), - "blocks/min).", - height_amount.recoveredAmount, - "µT recovered." - ); - } else { - console.log("Wallet (Pubkey:", id.public_key, ") recovery failed"); - } - - fs.rmdirSync(__dirname + "/temp/base_nodes", { recursive: true }); -} - -Promise.all([main()]); diff --git a/applications/utils/automatic_sync_test.js b/applications/utils/automatic_sync_test.js deleted file mode 100644 index 6df0f571c7..0000000000 --- a/applications/utils/automatic_sync_test.js +++ /dev/null @@ -1,69 +0,0 @@ -const BaseNodeProcess = require("../../integration_tests/helpers/baseNodeProcess"); - -const fs = require("fs"); -const os = require("os"); -const path = require("path"); - -async function main() { - const baseNode = new BaseNodeProcess("compile", true); - await baseNode.init(); - - // Set pruning horizon in config file if `pruned` command line arg is present - if (process.argv.includes("pruned")) { - let config = fs.readFileSync(baseNode.baseDir + "/config/config.toml"); - let updated_config = config - .toString() - .replace("#pruning_horizon = 0", "pruning_horizon = 1000"); - fs.writeFileSync(baseNode.baseDir + "/config/config.toml", updated_config); - } - - await baseNode.start(); - - let startTime = new Date(); - - let syncPromise = new Promise((resolve) => { - baseNode.ps.stdout.on("data", (data) => { - let height = parseInt(data.toString().match("Tip:\\ (\\d+)\\ \\(")[1]); - - if ( - parseInt(height) > 0 && - data - .toString() - .toUpperCase() - .match(/STATE: LISTENING/) - ) { - resolve(height); - } - }); - }); - - let block_height = await syncPromise; - - let endTime = new Date(); - const timeDiffMs = endTime - startTime; - const timeDiffMinutes = timeDiffMs / 60000; - const block_rate = block_height / timeDiffMinutes; - - let message = "Syncing "; - if (process.argv.includes("pruned")) { - message = message + "Pruned Node "; - } else { - message = message + "Archival Node "; - } - - console.log( - message + "to block height", - block_height, - "took", - timeDiffMinutes.toFixed(2), - "minutes for a rate of", - block_rate.toFixed(2), - "blocks/min" - ); - - baseNode.stop(); - - fs.rmdirSync(__dirname + "/temp/base_nodes", { recursive: true }); -} - -Promise.all([main()]); diff --git a/applications/utils/washing_machine.js b/applications/utils/washing_machine.js deleted file mode 100644 index dcc220bd66..0000000000 --- a/applications/utils/washing_machine.js +++ /dev/null @@ -1,294 +0,0 @@ -const WalletProcess = require("../../integration_tests/helpers/walletProcess"); -const WalletClient = require("../../integration_tests/helpers/walletClient"); -const { sleep, yargs } = require("../../integration_tests/helpers/util"); -const { PaymentType } = require("../../integration_tests/helpers/types"); - -// FPG to use for transactions -const FEE_PER_GRAM = 5; - -/// To start, create a normal console wallet, and send it funds. Then run it with GRPC set to 18143. For quickest results, -/// set the confirmation time to 0 (must be mined) -/// This test will send txtr between two wallets and then back again. It waits for a block to be mined, so could take a while -async function main() { - const argObj = yargs() - .option("base-node", { - alias: "b", - description: - "Base node for wallet2. This is ignored if wallet2-grpc is set", - type: "string", - default: - // ncal - "e2cef0473117da34108dd85d4425536b8a1f317478686a6d7a0bbb5c800a747d::/onion3/3eiacmnozk7rcvrx7brhlssnpueqsjdsfbfmq63d2bk7h3vtah35tcyd:18141", - }) - .option("wallet1-grpc", { - alias: "w1", - description: "Wallet 1 GRPC address", - type: "string", - default: "127.0.0.1:18143", - }) - .option("wallet2-grpc", { - alias: "w2", - description: - "Wallet 2 GRPC address (If not supplied, a new wallet will be started)", - type: "string", - default: null, - }) - .option("num-txns", { - alias: "t", - type: "number", - default: 10, - description: - "The number of transactions that are sent each way within a washing round.", - }) - .option("num-rounds", { - alias: "n", - type: "number", - default: null, - description: "The number of send back and forth washing rounds.", - }) - .option("amount-range", { - type: "string", - description: "The start/end range for per txn amounts in millitari (mT)", - default: "50-200", - }) - .option("sleep-after-round", { - alias: "s", - type: "number", - description: "Interval in seconds between rounds", - default: null, - }) - .option("one-sided", { type: "boolean", default: false }) - .alias("help", "h"); - - argObj.help(); - - const { argv: args } = argObj; - - debug(JSON.stringify(args, null, 2)); - console.log("Hello, starting the washing machine"); - - let wallet1 = new WalletClient(args.wallet1Grpc); - - // Start wallet2 - let wallet2; - let wallet2Process = null; - if (!args.wallet2Grpc) { - wallet2Process = createGrpcWallet(args.baseNode); - wallet2Process.baseDir = "./wallet"; - await wallet2Process.startNew(); - wallet2 = wallet2Process.getClient(); - } else { - wallet2 = new WalletClient(args.wallet2Grpc); - } - - await showWalletDetails("Wallet 1", wallet1); - await showWalletDetails("Wallet 2 ", wallet2); - - let [minAmount, maxAmount] = args.amountRange - .split("-", 2) - .map((n) => parseInt(n) * 1000); - - const { numTxns: numTransactions, numRounds, sleepAfterRound } = args; - - const minRequiredBalance = - ((maxAmount - minAmount) / 2) * numTransactions + - calcPossibleFee(FEE_PER_GRAM, numTransactions); - let currentBalance = await waitForBalance(wallet1, minRequiredBalance); - - console.log( - `Required balance (${minRequiredBalance}uT) reached: ${currentBalance.available_balance}uT. Sending transactions between ${minAmount}uT and ${maxAmount}uT` - ); - - let roundCount = 0; - while (true) { - console.log(`Wallet 1 -> Wallet 2`); - let wallet1AmountSent = await sendFunds(wallet1, wallet2, { - minAmount: minAmount, - maxAmount: maxAmount, - oneSided: args.oneSided, - numTransactions, - feePerGram: FEE_PER_GRAM, - }); - - console.log( - `Waiting for wallet2 to have a balance of ${wallet1AmountSent}uT` - ); - await waitForBalance(wallet2, wallet1AmountSent); - - console.log(`Wallet 2 -> Wallet 1`); - await sendFunds(wallet2, wallet1, { - minAmount: minAmount, - maxAmount: maxAmount, - oneSided: args.oneSided, - numTransactions, - feePerGram: FEE_PER_GRAM, - }); - - roundCount++; - if (numRounds && roundCount >= numRounds) { - break; - } - if (sleepAfterRound) { - console.log(`Taking a break for ${sleepAfterRound}s`); - await sleep(sleepAfterRound * 1000); - } - } - - if (wallet2Process) { - await wallet2Process.stop(); - } -} - -async function sendFunds(senderWallet, receiverWallet, options) { - const { available_balance: senderBalance } = await senderWallet.getBalance(); - const receiverInfo = await receiverWallet.identify(); - const paymentType = options.oneSided - ? PaymentType.ONE_SIDED - : PaymentType.STANDARD_MIMBLEWIMBLE; - - const transactionIter = transactionGenerator({ - address: receiverInfo.public_key, - feePerGram: options.feePerGram, - minAmount: options.minAmount, - maxAmount: options.maxAmount, - numTransactions: options.numTransactions, - balance: senderBalance, - paymentType, - }); - - let transactions = collect(transactionIter); - let totalToSend = transactions.reduce( - (total, { amount }) => total + amount, - 0 - ); - // For interactive transactions, a coin split is needed first - if (!options.oneSided) { - let avgAmountPerTransaction = totalToSend / transactions.length; - console.log(`COINSPLIT: amount = ${avgAmountPerTransaction}uT`); - if (transactions.length > 1) { - let leftToSplit = transactions.length; - while (leftToSplit > 499) { - let split_result = await senderWallet.coin_split({ - amount_per_split: avgAmountPerTransaction, - split_count: 499, - fee_per_gram: options.feePerGram, - }); - console.log("Split:", split_result); - leftToSplit -= 499; - } - if (leftToSplit > 0) { - let split_result = await senderWallet.coin_split({ - amount_per_split: avgAmountPerTransaction, - split_count: leftToSplit, - fee_per_gram: options.feePerGram, - }); - console.log("Last split:", split_result); - } - } - await waitForBalance(senderWallet, totalToSend); - } - - let { results } = await senderWallet.transfer({ - recipients: transactions, - }); - // debug(results); - for (let result of results) { - console.log( - `${ - result.is_success ? "✅ " : `❌ ${result.failure_message}` - } transaction #${result.transaction_id} ` - ); - } - - return totalToSend; -} - -function* transactionGenerator(options) { - // Loosely account for fees - const avgSpendPerTransaction = - options.minAmount + - (options.maxAmount - options.minAmount) / 2 + - calcPossibleFee(options.feePerGram, 1); - console.log( - `Generating ${options.numTransactions} transactions averaging ${avgSpendPerTransaction}uT (incl fees)` - ); - - let amountToSend = options.minAmount; - let i = 0; - let total = 0; - while (i < options.numTransactions && total < options.balance) { - total += amountToSend + calcPossibleFee(options.feePerGram, 1); - yield { - address: options.address, - amount: amountToSend, - fee_per_gram: options.feePerGram, - message: `Washing machine funds ${i + 1} of ${options.numTransactions}`, - payment_type: options.paymentType, - }; - - amountToSend = Math.max( - options.minAmount, - (amountToSend + 1) % options.maxAmount - ); - i++; - } -} - -function createGrpcWallet(baseNode, opts = {}) { - let process = new WalletProcess("sender", false, { - transport: "tor", - network: "stibbons", - num_confirmations: 0, - ...opts, - }); - process.setPeerSeeds([baseNode]); - return process; -} - -async function waitForBalance(client, balance) { - if (isNaN(balance)) { - throw new Error("balance is not a number"); - } - let i = 0; - while (true) { - let newBalance = await client.getBalance(); - console.log( - `[t=${i}s] Waiting for available wallet balance (${newBalance.available_balance}uT, pending=${newBalance.pending_incoming_balance}uT) to reach at least ${balance}uT...` - ); - if (newBalance.available_balance >= balance) { - return newBalance; - } - await sleep(1000); - i++; - } -} - -function collect(iter) { - let arr = []; - for (i of iter) { - arr.push(i); - } - return arr; -} - -function calcPossibleFee(feePerGram, numTransactions) { - const TRANSACTION_WEIGHT = (1 + 3 + 13) * 2; - return feePerGram * TRANSACTION_WEIGHT * numTransactions; -} - -function debug(...args) { - // Poor man's debug - if (process.env.DEBUG) { - console.log(...args); - } -} - -async function showWalletDetails(name, wallet) { - const walletIdentity = await wallet.identify(); - const { status, num_node_connections } = await wallet.getNetworkStatus(); - console.log( - `${name}: ${walletIdentity.public_key} status = ${status}, num_node_connections = ${num_node_connections}` - ); -} - -Promise.all([main()]); diff --git a/base_layer/common_types/Cargo.toml b/base_layer/common_types/Cargo.toml index 7e4e518e69..2b85d4e9a9 100644 --- a/base_layer/common_types/Cargo.toml +++ b/base_layer/common_types/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_common_types" authors = ["The Tari Development Community"] description = "Tari cryptocurrency common types" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index fc3b04a5a8..1212b2a40c 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [features] @@ -59,6 +59,9 @@ tokio = { version="^0.2", features = ["blocking", "time", "sync"] } ttl_cache = "0.5.1" uint = { version = "0.9", default-features = false } num-format = "0.4.0" +tracing = "0.1.26" +tracing-futures="*" +tracing-attributes="*" [dev-dependencies] tari_p2p = { version = "^0.9", path = "../../base_layer/p2p", features=["test-mocks"]} diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 2d79d90b4c..21ebc49201 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -30,7 +30,7 @@ use crate::{ OutboundNodeCommsInterface, }, blocks::{block_header::BlockHeader, Block, NewBlock, NewBlockTemplate}, - chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainBlock}, + chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainBlock, PrunedOutput}, consensus::{ConsensusConstants, ConsensusManager}, mempool::{async_mempool, Mempool}, proof_of_work::{Difficulty, PowAlgorithm}, @@ -224,12 +224,14 @@ where T: BlockchainBackend + 'static }, NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => { let mut res = Vec::with_capacity(utxo_hashes.len()); - for (output, spent) in (self.blockchain_db.fetch_utxos(utxo_hashes).await?) + for (pruned_output, spent) in (self.blockchain_db.fetch_utxos(utxo_hashes).await?) .into_iter() .flatten() { - if !spent { - res.push(output); + if let PrunedOutput::NotPruned { output } = pruned_output { + if !spent { + res.push(output); + } } } Ok(NodeCommsResponse::TransactionOutputs(res)) @@ -240,7 +242,10 @@ where T: BlockchainBackend + 'static .fetch_utxos(hashes) .await? .into_iter() - .filter_map(|opt| opt.map(|(output, _)| output)) + .filter_map(|opt| match opt { + Some((PrunedOutput::NotPruned { output }, _)) => Some(output), + _ => None, + }) .collect(); Ok(NodeCommsResponse::TransactionOutputs(res)) }, diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index de068e95f1..a0f5bcf2c3 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -32,7 +32,7 @@ use crate::{ chain_storage::HistoricalBlock, proof_of_work::PowAlgorithm, transactions::{ - transaction::{TransactionKernel, TransactionOutput}, + transaction::TransactionKernel, types::{Commitment, HashOutput, Signature}, }, }; @@ -43,7 +43,10 @@ use tokio::sync::broadcast; pub type BlockEventSender = broadcast::Sender>; pub type BlockEventReceiver = broadcast::Receiver>; -use crate::base_node::comms_interface::comms_request::GetNewBlockTemplateRequest; +use crate::{ + base_node::comms_interface::comms_request::GetNewBlockTemplateRequest, + transactions::transaction::TransactionOutput, +}; /// The InboundNodeCommsInterface provides an interface to request information from the current local node by other /// internal services. diff --git a/base_layer/core/src/base_node/rpc/mod.rs b/base_layer/core/src/base_node/rpc/mod.rs index cf41468610..44cdefdbc3 100644 --- a/base_layer/core/src/base_node/rpc/mod.rs +++ b/base_layer/core/src/base_node/rpc/mod.rs @@ -24,18 +24,6 @@ mod service; #[cfg(feature = "base_node")] use crate::base_node::StateMachineHandle; -use crate::proto::{ - base_node::{ - FetchMatchingUtxos, - FetchUtxosResponse, - Signatures, - TipInfoResponse, - TxQueryBatchResponses, - TxQueryResponse, - TxSubmissionResponse, - }, - types::{Signature, Transaction}, -}; #[cfg(feature = "base_node")] use crate::{ chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, @@ -44,6 +32,22 @@ use crate::{ #[cfg(feature = "base_node")] pub use service::BaseNodeWalletRpcService; +use crate::{ + proto, + proto::{ + base_node::{ + FetchMatchingUtxos, + FetchUtxosResponse, + Signatures, + TipInfoResponse, + TxQueryBatchResponses, + TxQueryResponse, + TxSubmissionResponse, + }, + types::{Signature, Transaction}, + }, +}; + use tari_comms::protocol::rpc::{Request, Response, RpcStatus}; use tari_comms_rpc_macros::tari_rpc; @@ -72,6 +76,9 @@ pub trait BaseNodeWalletService: Send + Sync + 'static { #[rpc(method = 5)] async fn get_tip_info(&self, request: Request<()>) -> Result, RpcStatus>; + + #[rpc(method = 6)] + async fn get_header(&self, request: Request) -> Result, RpcStatus>; } #[cfg(feature = "base_node")] diff --git a/base_layer/core/src/base_node/rpc/service.rs b/base_layer/core/src/base_node/rpc/service.rs index fb6f247199..c50600ea9c 100644 --- a/base_layer/core/src/base_node/rpc/service.rs +++ b/base_layer/core/src/base_node/rpc/service.rs @@ -22,8 +22,9 @@ use crate::{ base_node::{rpc::BaseNodeWalletService, state_machine_service::states::StateInfo, StateMachineHandle}, - chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, PrunedOutput}, mempool::{service::MempoolHandle, TxStorageResponse}, + proto, proto::{ base_node::{ FetchMatchingUtxos, @@ -288,15 +289,17 @@ impl BaseNodeWalletService for BaseNodeWalletRpc let db = self.db(); let mut res = Vec::with_capacity(message.output_hashes.len()); - for (output, spent) in (db + for (pruned_output, spent) in (db .fetch_utxos(message.output_hashes) .await .map_err(RpcStatus::log_internal_error(LOG_TARGET))?) .into_iter() .flatten() { - if !spent { - res.push(output); + if let PrunedOutput::NotPruned { output } = pruned_output { + if !spent { + res.push(output); + } } } @@ -309,7 +312,7 @@ impl BaseNodeWalletService for BaseNodeWalletRpc async fn get_tip_info(&self, _request: Request<()>) -> Result, RpcStatus> { let state_machine = self.state_machine(); let status_watch = state_machine.get_status_info_watch(); - let is_synced = match (*status_watch.borrow()).state_info { + let is_synced = match status_watch.borrow().state_info { StateInfo::Listening(li) => li.is_synced(), _ => false, }; @@ -325,4 +328,16 @@ impl BaseNodeWalletService for BaseNodeWalletRpc is_synced, })) } + + async fn get_header(&self, request: Request) -> Result, RpcStatus> { + let height = request.into_message(); + let header = self + .db() + .fetch_header(height) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| RpcStatus::not_found(format!("Header not found at height {}", height)))?; + + Ok(Response::new(header.into())) + } } diff --git a/base_layer/core/src/base_node/service/service.rs b/base_layer/core/src/base_node/service/service.rs index cfa56ec9af..db96b6ed9e 100644 --- a/base_layer/core/src/base_node/service/service.rs +++ b/base_layer/core/src/base_node/service/service.rs @@ -544,7 +544,7 @@ async fn handle_outbound_request( Request::FetchBlocksWithUtxos(_) => { trace!( target: LOG_TARGET, - "Timeout for service request ({}) at {:?}", + "Timeout for service request FetchBlocks... ({}) set at {:?}", request_key, config.fetch_blocks_timeout ); @@ -553,7 +553,7 @@ async fn handle_outbound_request( Request::FetchMatchingUtxos(_) => { trace!( target: LOG_TARGET, - "Timeout for service request ({}) at {:?}", + "Timeout for service request FetchMatchingUtxos ({}) set at {:?}", request_key, config.fetch_utxos_timeout ); @@ -562,7 +562,7 @@ async fn handle_outbound_request( _ => { trace!( target: LOG_TARGET, - "Timeout for service request ({}) at {:?}", + "Timeout for service request ... ({}) set at {:?}", request_key, config.service_request_timeout ); @@ -638,7 +638,7 @@ async fn handle_request_timeout( let _ = reply_tx.send(reply_msg.map_err(|e| { error!( target: LOG_TARGET, - "Failed to send outbound request (request key: {}): {:?}", &request_key, e + "Failed to process outbound request (request key: {}): {:?}", &request_key, e ); e })); diff --git a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs index 89960740c5..689c9a8316 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/block_sync.rs @@ -58,6 +58,7 @@ impl BlockSync { shared: &mut BaseNodeStateMachine, ) -> StateEvent { let mut synchronizer = BlockSynchronizer::new( + shared.config.block_sync_config.clone(), shared.db.clone(), shared.connectivity.clone(), self.sync_peer.take(), @@ -65,8 +66,12 @@ impl BlockSync { ); let status_event_sender = shared.status_event_sender.clone(); - let local_nci = shared.local_node_interface.clone(); let bootstrapped = shared.is_bootstrapped(); + let _ = status_event_sender.broadcast(StatusInfo { + bootstrapped, + state_info: StateInfo::BlockSyncStarting, + }); + let local_nci = shared.local_node_interface.clone(); synchronizer.on_progress(move |block, remote_tip_height, sync_peers| { let local_height = block.height(); local_nci.publish_block_event(BlockEvent::ValidBlockAdded( @@ -98,7 +103,7 @@ impl BlockSync { StateEvent::BlocksSynchronized }, Err(err) => { - debug!(target: LOG_TARGET, "Block sync failed: {}", err); + warn!(target: LOG_TARGET, "Block sync failed: {}", err); StateEvent::BlockSyncFailed }, } diff --git a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs index 8c65886792..4e864624bc 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/events_and_states.rs @@ -162,6 +162,7 @@ pub enum StateInfo { StartUp, HeaderSync(BlockSyncInfo), HorizonSync(HorizonSyncInfo), + BlockSyncStarting, BlockSync(BlockSyncInfo), Listening(ListeningInfo), } @@ -193,12 +194,17 @@ impl StateInfo { HorizonSyncStatus::Finalizing => "Finalizing horizon sync".to_string(), }, Self::BlockSync(info) => format!( - "Syncing blocks: {}/{} ({:.0}%)", + "Syncing blocks with {}: {}/{} ({:.0}%) ", + info.sync_peers + .first() + .map(|s| s.short_str()) + .unwrap_or_else(|| "".to_string()), info.local_height, info.tip_height, info.local_height as f64 / info.tip_height as f64 * 100.0 ), Self::Listening(_) => "Listening".to_string(), + Self::BlockSyncStarting => "Starting block sync".to_string(), } } @@ -212,7 +218,7 @@ impl StateInfo { pub fn is_synced(&self) -> bool { use StateInfo::*; match self { - StartUp | HeaderSync(_) | HorizonSync(_) | BlockSync(_) => false, + StartUp | HeaderSync(_) | HorizonSync(_) | BlockSync(_) | BlockSyncStarting => false, Listening(info) => info.is_synced(), } } @@ -226,6 +232,7 @@ impl Display for StateInfo { Self::HorizonSync(info) => write!(f, "Synchronizing horizon state: {}", info), Self::BlockSync(info) => write!(f, "Synchronizing blocks: {}", info), Self::Listening(info) => write!(f, "Listening: {}", info), + Self::BlockSyncStarting => write!(f, "Synchronizing blocks: Starting"), } } } diff --git a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs index c83f02d65f..8347bcf521 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/horizon_state_sync/horizon_state_synchronization.rs @@ -93,8 +93,8 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { ); let header = self.db().fetch_header(self.horizon_sync_height).await?.ok_or_else(|| { ChainStorageError::ValueNotFound { - entity: "Header".to_string(), - field: "height".to_string(), + entity: "Header", + field: "height", value: self.horizon_sync_height.to_string(), } })?; @@ -538,6 +538,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { .await? .into_bitmap(), ); + let expected_prev_best_block = self.shared.db.get_chain_metadata().await?.best_block().clone(); for h in 0..=header.height() { let curr_header = self.db().fetch_chain_header(h).await?; @@ -622,6 +623,7 @@ impl<'a, B: BlockchainBackend + 'static> HorizonStateSynchronization<'a, B> { header.height(), header.hash().clone(), header.accumulated_data().total_accumulated_difficulty, + expected_prev_best_block, ) .set_pruned_height(header.height(), pruned_kernel_sum, pruned_utxo_sum) .commit() diff --git a/base_layer/core/src/base_node/state_machine_service/states/listening.rs b/base_layer/core/src/base_node/state_machine_service/states/listening.rs index e484dd5f3c..0ea8157568 100644 --- a/base_layer/core/src/base_node/state_machine_service/states/listening.rs +++ b/base_layer/core/src/base_node/state_machine_service/states/listening.rs @@ -152,8 +152,9 @@ impl Listening { ); if !self.is_synced { + debug!(target: LOG_TARGET, "Initial sync achieved"); self.is_synced = true; - shared.set_state_info(StateInfo::Listening(ListeningInfo::new(self.is_synced))); + shared.set_state_info(StateInfo::Listening(ListeningInfo::new(true))); } continue; } @@ -222,7 +223,7 @@ impl Listening { impl From for Listening { fn from(_: Waiting) -> Self { - Default::default() + Self { is_synced: false } } } diff --git a/base_layer/core/src/base_node/sync/block_sync/error.rs b/base_layer/core/src/base_node/sync/block_sync/error.rs index 297d700cf4..544a6644c3 100644 --- a/base_layer/core/src/base_node/sync/block_sync/error.rs +++ b/base_layer/core/src/base_node/sync/block_sync/error.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::{chain_storage::ChainStorageError, proof_of_work::PowError, validation::ValidationError}; +use crate::{chain_storage::ChainStorageError, validation::ValidationError}; use tari_comms::{ connectivity::ConnectivityError, protocol::rpc::{RpcError, RpcStatus}, @@ -42,10 +42,8 @@ pub enum BlockSyncError { ConnectivityError(#[from] ConnectivityError), #[error("No sync peers available")] NoSyncPeers, - #[error("Error fetching PoW: {0}")] - PowError(#[from] PowError), - //#[error("Expected to find header at height {0} however the header did not exist")] - // ExpectedHeaderNotFound(u64), #[error("Block validation failed: {0}")] ValidationError(#[from] ValidationError), + #[error("Failed to ban peer: {0}")] + FailedToBan(ConnectivityError), } diff --git a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs index 9becd727fa..b5af652d87 100644 --- a/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/block_sync/synchronizer.rs @@ -22,7 +22,10 @@ use super::error::BlockSyncError; use crate::{ - base_node::sync::{hooks::Hooks, rpc}, + base_node::{ + sync::{hooks::Hooks, rpc}, + BlockSyncConfig, + }, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, ChainBlock}, proto::base_node::SyncBlocksRequest, tari_utilities::{hex::Hex, Hashable}, @@ -43,10 +46,12 @@ use tari_comms::{ PeerConnection, }; use tokio::task; +use tracing; const LOG_TARGET: &str = "c::bn::block_sync"; pub struct BlockSynchronizer { + config: BlockSyncConfig, db: AsyncBlockchainDb, connectivity: ConnectivityRequester, sync_peer: Option, @@ -56,12 +61,14 @@ pub struct BlockSynchronizer { impl BlockSynchronizer { pub fn new( + config: BlockSyncConfig, db: AsyncBlockchainDb, connectivity: ConnectivityRequester, sync_peer: Option, block_validator: Arc>, ) -> Self { Self { + config, db, connectivity, sync_peer, @@ -80,6 +87,7 @@ impl BlockSynchronizer { self.hooks.add_on_complete_hook(hook); } + #[tracing::instrument(skip(self), err)] pub async fn synchronize(&mut self) -> Result<(), BlockSyncError> { let peer_conn = self.get_next_sync_peer().await?; let node_id = peer_conn.peer_node_id().clone(); @@ -87,10 +95,17 @@ impl BlockSynchronizer { target: LOG_TARGET, "Attempting to synchronize blocks with `{}`", node_id ); - self.attempt_block_sync(peer_conn).await?; - - self.db.cleanup_orphans().await?; - Ok(()) + match self.attempt_block_sync(peer_conn).await { + Ok(_) => { + self.db.cleanup_orphans().await?; + Ok(()) + }, + Err(err @ BlockSyncError::ValidationError(_)) | Err(err @ BlockSyncError::ReceivedInvalidBlockBody(_)) => { + self.ban_peer(node_id, &err).await?; + Err(err) + }, + Err(err) => Err(err), + } } async fn get_next_sync_peer(&mut self) -> Result { @@ -213,6 +228,7 @@ impl BlockSynchronizer { block.height(), header_hash, block.accumulated_data().total_accumulated_difficulty, + block.header().prev_hash.clone(), ) .commit() .await?; @@ -236,17 +252,6 @@ impl BlockSynchronizer { } if let Some(block) = current_block { - // Update metadata to last tip header - let header = &block.header(); - let height = header.height; - let best_block = header.hash(); - let accumulated_difficulty = block.accumulated_data().total_accumulated_difficulty; - self.db - .write_transaction() - .set_best_block(height, best_block.to_vec(), accumulated_difficulty) - .commit() - .await?; - self.hooks.call_on_complete_hooks(block); } @@ -267,4 +272,21 @@ impl BlockSynchronizer { .await .expect("block validator panicked") } + + async fn ban_peer(&mut self, node_id: NodeId, reason: T) -> Result<(), BlockSyncError> { + let reason = reason.to_string(); + if self.config.sync_peers.contains(&node_id) { + debug!( + target: LOG_TARGET, + "Not banning peer that is allowlisted for sync. Ban reason = {}", reason + ); + return Ok(()); + } + warn!(target: LOG_TARGET, "Banned sync peer because {}", reason); + self.connectivity + .ban_peer_until(node_id, self.config.ban_period, reason) + .await + .map_err(BlockSyncError::FailedToBan)?; + Ok(()) + } } diff --git a/base_layer/core/src/base_node/sync/config.rs b/base_layer/core/src/base_node/sync/config.rs index 2a4a379bc2..89965bb806 100644 --- a/base_layer/core/src/base_node/sync/config.rs +++ b/base_layer/core/src/base_node/sync/config.rs @@ -25,9 +25,6 @@ use tari_comms::peer_manager::NodeId; #[derive(Debug, Clone)] pub struct BlockSyncConfig { - pub max_sync_peers: usize, - pub num_tip_hashes: usize, - pub num_proof_headers: usize, pub ban_period: Duration, pub short_ban_period: Duration, pub sync_peers: Vec, @@ -36,9 +33,6 @@ pub struct BlockSyncConfig { impl Default for BlockSyncConfig { fn default() -> Self { Self { - max_sync_peers: 10, - num_tip_hashes: 500, - num_proof_headers: 100, ban_period: Duration::from_secs(30 * 60), short_ban_period: Duration::from_secs(60), sync_peers: Default::default(), diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index b5c9291f18..b7483ff70e 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -48,6 +48,7 @@ use tari_comms::{ protocol::rpc::{RpcError, RpcHandshakeError}, PeerConnection, }; +use tracing; const LOG_TARGET: &str = "c::bn::header_sync"; @@ -110,28 +111,28 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { Ok(()) => return Ok(peer_conn), // Try another peer Err(err @ BlockHeaderSyncError::NotInSync) => { - debug!(target: LOG_TARGET, "{}", err); + warn!(target: LOG_TARGET, "{}", err); }, Err(err @ BlockHeaderSyncError::RpcError(RpcError::HandshakeError(RpcHandshakeError::TimedOut))) => { - debug!(target: LOG_TARGET, "{}", err); + warn!(target: LOG_TARGET, "{}", err); self.ban_peer_short(node_id, BanReason::RpcNegotiationTimedOut).await?; }, Err(BlockHeaderSyncError::ValidationFailed(err)) => { - debug!(target: LOG_TARGET, "Block header validation failed: {}", err); + warn!(target: LOG_TARGET, "Block header validation failed: {}", err); self.ban_peer_long(node_id, err.into()).await?; }, Err(BlockHeaderSyncError::ChainSplitNotFound(peer)) => { - debug!(target: LOG_TARGET, "Chain split not found for peer {}.", peer); + warn!(target: LOG_TARGET, "Chain split not found for peer {}.", peer); self.ban_peer_long(peer, BanReason::ChainSplitNotFound).await?; }, Err(err @ BlockHeaderSyncError::InvalidBlockHeight { .. }) => { - debug!(target: LOG_TARGET, "{}", err); + warn!(target: LOG_TARGET, "{}", err); self.ban_peer_long(node_id, BanReason::GeneralHeaderSyncFailure(err)) .await?; }, Err(err) => { - debug!( + error!( target: LOG_TARGET, "Failed to synchronize headers from peer `{}`: {}", node_id, err ); @@ -253,6 +254,7 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { Ok(()) } + #[tracing::instrument(skip(self, conn), err)] async fn attempt_sync(&mut self, mut conn: PeerConnection) -> Result<(), BlockHeaderSyncError> { let peer = conn.peer_node_id().clone(); let mut client = conn.connect_rpc::().await?; @@ -480,8 +482,8 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { ) -> Result<(), BlockHeaderSyncError> { const COMMIT_EVERY_N_HEADERS: usize = 1000; - // Peer returned less than the max headers. This indicates that there are no further headers to request. - if self.header_validator.valid_headers().len() < NUM_INITIAL_HEADERS_TO_REQUEST as usize { + // Peer returned no more than the max headers. This indicates that there are no further headers to request. + if self.header_validator.valid_headers().len() <= NUM_INITIAL_HEADERS_TO_REQUEST as usize { debug!(target: LOG_TARGET, "No further headers to download"); if !self.pending_chain_has_higher_pow(&split_info.local_tip_header)? { return Err(BlockHeaderSyncError::WeakerChain); diff --git a/base_layer/core/src/base_node/sync/header_sync/validator.rs b/base_layer/core/src/base_node/sync/header_sync/validator.rs index 55c6dd4d9e..aff52b80fd 100644 --- a/base_layer/core/src/base_node/sync/header_sync/validator.rs +++ b/base_layer/core/src/base_node/sync/header_sync/validator.rs @@ -92,8 +92,8 @@ impl BlockHeaderSyncValidator { .fetch_header_accumulated_data(start_hash.clone()) .await? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeaderAccumulatedData".to_string(), - field: "hash".to_string(), + entity: "BlockHeaderAccumulatedData", + field: "hash", value: start_hash.to_hex(), })?; debug!( diff --git a/base_layer/core/src/base_node/sync/rpc/mod.rs b/base_layer/core/src/base_node/sync/rpc/mod.rs index 4762fc8afc..b25d7eb459 100644 --- a/base_layer/core/src/base_node/sync/rpc/mod.rs +++ b/base_layer/core/src/base_node/sync/rpc/mod.rs @@ -22,6 +22,9 @@ #[cfg(feature = "base_node")] mod service; +#[cfg(feature = "base_node")] +mod sync_utxos_task; + #[cfg(feature = "base_node")] pub use service::BaseNodeSyncRpcService; diff --git a/base_layer/core/src/base_node/sync/rpc/service.rs b/base_layer/core/src/base_node/sync/rpc/service.rs index 73a7d38ec9..776c2b7e01 100644 --- a/base_layer/core/src/base_node/sync/rpc/service.rs +++ b/base_layer/core/src/base_node/sync/rpc/service.rs @@ -21,9 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ - base_node::sync::rpc::BaseNodeSyncService, + base_node::sync::rpc::{sync_utxos_task::SyncUtxosTask, BaseNodeSyncService}, chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend, OrNotFound}, - crypto::tari_utilities::Hashable, iterators::NonOverlappingIntegerPairIter, proto, proto::base_node::{ @@ -32,17 +31,17 @@ use crate::{ SyncBlocksRequest, SyncHeadersRequest, SyncKernelsRequest, - SyncUtxo, SyncUtxosRequest, SyncUtxosResponse, }, }; use futures::{channel::mpsc, stream, SinkExt}; use log::*; -use std::{cmp, sync::Arc, time::Instant}; +use std::cmp; use tari_comms::protocol::rpc::{Request, Response, RpcStatus, Streaming}; use tari_crypto::tari_utilities::hex::Hex; use tokio::task; +use tracing::{instrument, span, Instrument, Level}; const LOG_TARGET: &str = "c::base_node::sync_rpc"; @@ -63,6 +62,7 @@ impl BaseNodeSyncRpcService { #[tari_comms::async_trait] impl BaseNodeSyncService for BaseNodeSyncRpcService { + #[instrument(name = "sync_rpc::sync_blocks", skip(self), err)] async fn sync_blocks( &self, request: Request, @@ -118,56 +118,61 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ const BATCH_SIZE: usize = 4; let (mut tx, rx) = mpsc::channel(BATCH_SIZE); - task::spawn(async move { - let iter = NonOverlappingIntegerPairIter::new(start, end + 1, BATCH_SIZE); - for (start, end) in iter { - if tx.is_closed() { - break; - } - - debug!(target: LOG_TARGET, "Sending blocks #{} - #{}", start, end); - let blocks = db - .fetch_blocks(start..=end) - .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET)); - - match blocks { - Ok(blocks) if blocks.is_empty() => { + let span = span!(Level::TRACE, "sync_rpc::block_sync::inner_worker"); + task::spawn( + async move { + let iter = NonOverlappingIntegerPairIter::new(start, end + 1, BATCH_SIZE); + for (start, end) in iter { + if tx.is_closed() { break; - }, - Ok(blocks) => { - let mut blocks = stream::iter( - blocks - .into_iter() - .map(|hb| hb.try_into_block().map_err(RpcStatus::log_internal_error(LOG_TARGET))) - .map(|block| match block { - Ok(b) => Ok(proto::base_node::BlockBodyResponse::from(b)), - Err(err) => Err(err), - }) - .map(Ok), - ); + } - // Ensure task stops if the peer prematurely stops their RPC session - if tx.send_all(&mut blocks).await.is_err() { + debug!(target: LOG_TARGET, "Sending blocks #{} - #{}", start, end); + let blocks = db + .fetch_blocks(start..=end) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET)); + + match blocks { + Ok(blocks) if blocks.is_empty() => { break; - } - }, - Err(err) => { - let _ = tx.send(Err(err)).await; - break; - }, + }, + Ok(blocks) => { + let mut blocks = stream::iter( + blocks + .into_iter() + .map(|hb| hb.try_into_block().map_err(RpcStatus::log_internal_error(LOG_TARGET))) + .map(|block| match block { + Ok(b) => Ok(proto::base_node::BlockBodyResponse::from(b)), + Err(err) => Err(err), + }) + .map(Ok), + ); + + // Ensure task stops if the peer prematurely stops their RPC session + if tx.send_all(&mut blocks).await.is_err() { + break; + } + }, + Err(err) => { + let _ = tx.send(Err(err)).await; + break; + }, + } } - } - debug!( - target: LOG_TARGET, - "Block sync round complete for peer `{}`.", peer_node_id, - ); - }); + debug!( + target: LOG_TARGET, + "Block sync round complete for peer `{}`.", peer_node_id, + ); + } + .instrument(span), + ); Ok(Streaming::new(rx)) } + #[instrument(name = "sync_rpc::sync_headers", skip(self), err)] async fn sync_headers( &self, request: Request, @@ -205,50 +210,55 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ ); let (mut tx, rx) = mpsc::channel(chunk_size); - task::spawn(async move { - let iter = NonOverlappingIntegerPairIter::new( - start_header.height + 1, - start_header.height.saturating_add(count).saturating_add(1), - chunk_size, - ); - for (start, end) in iter { - if tx.is_closed() { - break; - } - debug!(target: LOG_TARGET, "Sending headers #{} - #{}", start, end); - let headers = db - .fetch_headers(start..=end) - .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET)); - - match headers { - Ok(headers) if headers.is_empty() => { + let span = span!(Level::TRACE, "sync_rpc::sync_headers::inner_worker"); + task::spawn( + async move { + let iter = NonOverlappingIntegerPairIter::new( + start_header.height + 1, + start_header.height.saturating_add(count).saturating_add(1), + chunk_size, + ); + for (start, end) in iter { + if tx.is_closed() { break; - }, - Ok(headers) => { - let mut headers = - stream::iter(headers.into_iter().map(proto::core::BlockHeader::from).map(Ok).map(Ok)); - // Ensure task stops if the peer prematurely stops their RPC session - if tx.send_all(&mut headers).await.is_err() { + } + debug!(target: LOG_TARGET, "Sending headers #{} - #{}", start, end); + let headers = db + .fetch_headers(start..=end) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET)); + + match headers { + Ok(headers) if headers.is_empty() => { break; - } - }, - Err(err) => { - let _ = tx.send(Err(err)).await; - break; - }, + }, + Ok(headers) => { + let mut headers = + stream::iter(headers.into_iter().map(proto::core::BlockHeader::from).map(Ok).map(Ok)); + // Ensure task stops if the peer prematurely stops their RPC session + if tx.send_all(&mut headers).await.is_err() { + break; + } + }, + Err(err) => { + let _ = tx.send(Err(err)).await; + break; + }, + } } - } - debug!( - target: LOG_TARGET, - "Header sync round complete for peer `{}`.", peer_node_id, - ); - }); + debug!( + target: LOG_TARGET, + "Header sync round complete for peer `{}`.", peer_node_id, + ); + } + .instrument(span), + ); Ok(Streaming::new(rx)) } + #[instrument(skip(self), err)] async fn get_header_by_height( &self, request: Request, @@ -264,6 +274,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ Ok(Response::new(header.into())) } + #[instrument(skip(self), err)] async fn find_chain_split( &self, request: Request, @@ -326,6 +337,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ } } + #[instrument(skip(self), err)] async fn get_chain_metadata(&self, _: Request<()>) -> Result, RpcStatus> { let chain_metadata = self .db() @@ -335,6 +347,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ Ok(Response::new(chain_metadata.into())) } + #[instrument(skip(self), err)] async fn sync_kernels( &self, request: Request, @@ -403,6 +416,7 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ Ok(Streaming::new(rx)) } + #[instrument(skip(self), err)] async fn sync_utxos(&self, request: Request) -> Result, RpcStatus> { let req = request.message(); let peer = request.context().peer_node_id(); @@ -415,177 +429,6 @@ impl BaseNodeSyncService for BaseNodeSyncRpcServ req.include_deleted_bitmaps ); - struct SyncUtxosTask { - db: AsyncBlockchainDb, - request: SyncUtxosRequest, - } - - impl SyncUtxosTask - where B: BlockchainBackend + 'static - { - pub fn new(db: AsyncBlockchainDb, request: SyncUtxosRequest) -> Self { - Self { db, request } - } - - pub async fn run(self, mut tx: mpsc::Sender>) { - if let Err(err) = self.start_streaming(&mut tx).await { - let _ = tx.send(Err(err)).await; - } - } - - async fn start_streaming( - &self, - tx: &mut mpsc::Sender>, - ) -> Result<(), RpcStatus> { - let end_header = self - .db - .fetch_header_by_block_hash(self.request.end_header_hash.clone()) - .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET))? - .ok_or_else(|| { - RpcStatus::not_found(format!( - "End header hash {} is was not found", - self.request.end_header_hash.to_hex() - )) - })?; - - if self.request.start > end_header.output_mmr_size - 1 { - return Err(RpcStatus::bad_request(format!( - "start index {} cannot be greater than the end header's output MMR size ({})", - self.request.start, end_header.output_mmr_size - ))); - } - - let prev_header = self - .db - .fetch_header_containing_utxo_mmr(self.request.start) - .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; - let (mut prev_header, _) = prev_header.into_parts(); - - if prev_header.height > end_header.height { - return Err(RpcStatus::bad_request("start index is greater than end index")); - } - // we need to construct a temp bitmap for the height the client requested - let bitmap = self - .db - .fetch_complete_deleted_bitmap_at(end_header.hash()) - .await - .map_err(|_| RpcStatus::not_found("Could not get tip deleted bitmap"))? - .into_bitmap(); - - let bitmap = Arc::new(bitmap); - loop { - let timer = Instant::now(); - if prev_header.height == end_header.height { - break; - } - - let current_header = self - .db - .fetch_header(prev_header.height + 1) - .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET))? - .ok_or_else(|| { - RpcStatus::general(format!( - "Potential data consistency issue: header {} not found", - prev_header.height + 1 - )) - })?; - - debug!( - target: LOG_TARGET, - "previous header = {} ({}) current header = {} ({})", - prev_header.height, - prev_header.hash().to_hex(), - current_header.height, - current_header.hash().to_hex() - ); - - let start = cmp::max(self.request.start, prev_header.output_mmr_size); - let end = current_header.output_mmr_size - 1; - - if tx.is_closed() { - debug!(target: LOG_TARGET, "Exiting sync_utxos early because client has gone",); - break; - } - - debug!( - target: LOG_TARGET, - "Streaming UTXOs {}-{} ({}) for block #{}", - start, - end, - end.saturating_sub(start).saturating_add(1), - current_header.height - ); - let (utxos, deleted_diff) = self - .db - .fetch_utxos_by_mmr_position(start, end, bitmap.clone()) - .await - .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; - trace!( - target: LOG_TARGET, - "Loaded {} UTXO(s) and |deleted_diff| = {}", - utxos.len(), - deleted_diff.cardinality(), - ); - let mut utxos = stream::iter( - utxos - .into_iter() - .enumerate() - // Only include pruned UTXOs if include_pruned_utxos is true - .filter(|(_, utxo)| self.request.include_pruned_utxos || !utxo.is_pruned()) - .map(|(i, utxo)| { - SyncUtxosResponse { - utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::Utxo( - SyncUtxo::from(utxo) - )), - mmr_index: start + i as u64, - } - }) - .map(Ok) - .map(Ok), - ); - - // Ensure task stops if the peer prematurely stops their RPC session - if tx.send_all(&mut utxos).await.is_err() { - break; - } - - if self.request.include_deleted_bitmaps { - let bitmaps = SyncUtxosResponse { - utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::DeletedDiff( - deleted_diff.serialize(), - )), - mmr_index: 0, - }; - - if tx.send(Ok(bitmaps)).await.is_err() { - break; - } - } - debug!( - target: LOG_TARGET, - "Streamed utxos {} to {} in {:.2?} (including stream backpressure)", - start, - end, - timer.elapsed() - ); - - prev_header = current_header; - } - - debug!( - target: LOG_TARGET, - "UTXO sync completed to UTXO {} (Header hash = {})", - prev_header.output_mmr_size, - prev_header.hash().to_hex() - ); - - Ok(()) - } - } - let (tx, rx) = mpsc::channel(200); task::spawn(SyncUtxosTask::new(self.db(), request.into_message()).run(tx)); diff --git a/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs new file mode 100644 index 0000000000..ef10b41c2f --- /dev/null +++ b/base_layer/core/src/base_node/sync/rpc/sync_utxos_task.rs @@ -0,0 +1,205 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + chain_storage::{async_db::AsyncBlockchainDb, BlockchainBackend}, + proto, + proto::base_node::{SyncUtxo, SyncUtxosRequest, SyncUtxosResponse}, +}; +use futures::{channel::mpsc, stream, SinkExt}; +use log::*; +use std::{cmp, sync::Arc, time::Instant}; +use tari_comms::protocol::rpc::RpcStatus; +use tari_crypto::tari_utilities::{hex::Hex, Hashable}; + +const LOG_TARGET: &str = "c::base_node::sync_rpc::sync_utxo_task"; + +pub(crate) struct SyncUtxosTask { + db: AsyncBlockchainDb, + request: SyncUtxosRequest, +} + +impl SyncUtxosTask +where B: BlockchainBackend + 'static +{ + pub(crate) fn new(db: AsyncBlockchainDb, request: SyncUtxosRequest) -> Self { + Self { db, request } + } + + pub(crate) async fn run(self, mut tx: mpsc::Sender>) { + if let Err(err) = self.start_streaming(&mut tx).await { + let _ = tx.send(Err(err)).await; + } + } + + async fn start_streaming( + &self, + tx: &mut mpsc::Sender>, + ) -> Result<(), RpcStatus> { + let end_header = self + .db + .fetch_header_by_block_hash(self.request.end_header_hash.clone()) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| { + RpcStatus::not_found(format!( + "End header hash {} is was not found", + self.request.end_header_hash.to_hex() + )) + })?; + + if self.request.start > end_header.output_mmr_size - 1 { + return Err(RpcStatus::bad_request(format!( + "start index {} cannot be greater than the end header's output MMR size ({})", + self.request.start, end_header.output_mmr_size + ))); + } + + let prev_header = self + .db + .fetch_header_containing_utxo_mmr(self.request.start) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + let (mut prev_header, _) = prev_header.into_parts(); + + if prev_header.height > end_header.height { + return Err(RpcStatus::bad_request("start index is greater than end index")); + } + // we need to construct a temp bitmap for the height the client requested + let bitmap = self + .db + .fetch_complete_deleted_bitmap_at(end_header.hash()) + .await + .map_err(|_| RpcStatus::not_found("Could not get tip deleted bitmap"))? + .into_bitmap(); + + let bitmap = Arc::new(bitmap); + loop { + let timer = Instant::now(); + if prev_header.height == end_header.height { + break; + } + + let current_header = self + .db + .fetch_header(prev_header.height + 1) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))? + .ok_or_else(|| { + RpcStatus::general(format!( + "Potential data consistency issue: header {} not found", + prev_header.height + 1 + )) + })?; + + debug!( + target: LOG_TARGET, + "previous header = {} ({}) current header = {} ({})", + prev_header.height, + prev_header.hash().to_hex(), + current_header.height, + current_header.hash().to_hex() + ); + + let start = cmp::max(self.request.start, prev_header.output_mmr_size); + let end = current_header.output_mmr_size - 1; + + if tx.is_closed() { + debug!(target: LOG_TARGET, "Exiting sync_utxos early because client has gone",); + break; + } + + debug!( + target: LOG_TARGET, + "Streaming UTXOs {}-{} ({}) for block #{}", + start, + end, + end.saturating_sub(start).saturating_add(1), + current_header.height + ); + let (utxos, deleted_diff) = self + .db + .fetch_utxos_by_mmr_position(start, end, bitmap.clone()) + .await + .map_err(RpcStatus::log_internal_error(LOG_TARGET))?; + trace!( + target: LOG_TARGET, + "Loaded {} UTXO(s) and |deleted_diff| = {}", + utxos.len(), + deleted_diff.cardinality(), + ); + let mut utxos = stream::iter( + utxos + .into_iter() + .enumerate() + // Only include pruned UTXOs if include_pruned_utxos is true + .filter(|(_, utxo)| self.request.include_pruned_utxos || !utxo.is_pruned()) + .map(|(i, utxo)| { + SyncUtxosResponse { + utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::Utxo( + SyncUtxo::from(utxo) + )), + mmr_index: start + i as u64, + } + }) + .map(Ok) + .map(Ok), + ); + + // Ensure task stops if the peer prematurely stops their RPC session + if tx.send_all(&mut utxos).await.is_err() { + break; + } + + if self.request.include_deleted_bitmaps { + let bitmaps = SyncUtxosResponse { + utxo_or_deleted: Some(proto::base_node::sync_utxos_response::UtxoOrDeleted::DeletedDiff( + deleted_diff.serialize(), + )), + mmr_index: 0, + }; + + if tx.send(Ok(bitmaps)).await.is_err() { + break; + } + } + debug!( + target: LOG_TARGET, + "Streamed utxos {} to {} in {:.2?} (including stream backpressure)", + start, + end, + timer.elapsed() + ); + + prev_header = current_header; + } + + debug!( + target: LOG_TARGET, + "UTXO sync completed to UTXO {} (Header hash = {})", + prev_header.output_mmr_size, + prev_header.hash().to_hex() + ); + + Ok(()) + } +} diff --git a/base_layer/core/src/chain_storage/async_db.rs b/base_layer/core/src/chain_storage/async_db.rs index ca258b2034..14e4f0e5cd 100644 --- a/base_layer/core/src/chain_storage/async_db.rs +++ b/base_layer/core/src/chain_storage/async_db.rs @@ -139,9 +139,9 @@ impl AsyncBlockchainDb { make_async_fn!(fetch_horizon_data() -> Option, "fetch_horizon_data"); //---------------------------------- TXO --------------------------------------------// - make_async_fn!(fetch_utxo(hash: HashOutput) -> Option, "fetch_utxo"); + make_async_fn!(fetch_utxo(hash: HashOutput) -> Option, "fetch_utxo"); - make_async_fn!(fetch_utxos(hashes: Vec) -> Vec>, "fetch_utxos"); + make_async_fn!(fetch_utxos(hashes: Vec) -> Vec>, "fetch_utxos"); make_async_fn!(fetch_utxos_by_mmr_position(start: u64, end: u64, deleted: Arc) -> (Vec, Bitmap), "fetch_utxos_by_mmr_position"); @@ -253,8 +253,15 @@ impl<'a, B: BlockchainBackend + 'static> AsyncDbTransaction<'a, B> { } } - pub fn set_best_block(&mut self, height: u64, hash: HashOutput, accumulated_data: u128) -> &mut Self { - self.transaction.set_best_block(height, hash, accumulated_data); + pub fn set_best_block( + &mut self, + height: u64, + hash: HashOutput, + accumulated_data: u128, + expected_prev_best_block: HashOutput, + ) -> &mut Self { + self.transaction + .set_best_block(height, hash, accumulated_data, expected_prev_best_block); self } diff --git a/base_layer/core/src/chain_storage/blockchain_backend.rs b/base_layer/core/src/chain_storage/blockchain_backend.rs index f4f842a718..f5d3b6ad36 100644 --- a/base_layer/core/src/chain_storage/blockchain_backend.rs +++ b/base_layer/core/src/chain_storage/blockchain_backend.rs @@ -15,8 +15,8 @@ use crate::{ MmrTree, }, transactions::{ - transaction::{TransactionInput, TransactionKernel, TransactionOutput}, - types::{HashOutput, Signature}, + transaction::{TransactionInput, TransactionKernel}, + types::{Commitment, HashOutput, Signature}, }, }; use croaring::Bitmap; @@ -103,11 +103,14 @@ pub trait BlockchainBackend: Send + Sync { ) -> Result<(Vec, Bitmap), ChainStorageError>; /// Fetch a specific output. Returns the output and the leaf index in the output MMR - fn fetch_output( - &self, - output_hash: &HashOutput, - ) -> Result, ChainStorageError>; + fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError>; + /// Returns the unspent TransactionOutput output that matches the given commitment if it exists in the current UTXO + /// set, otherwise None is returned. + fn fetch_unspent_output_hash_by_commitment( + &self, + commitment: &Commitment, + ) -> Result, ChainStorageError>; /// Fetch all outputs in a block fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError>; diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 84812820d5..867e7877e1 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -47,7 +47,7 @@ use crate::{ proof_of_work::{monero_rx::MoneroPowData, PowAlgorithm, TargetDifficultyWindow}, tari_utilities::epoch_time::EpochTime, transactions::{ - transaction::{TransactionKernel, TransactionOutput}, + transaction::TransactionKernel, types::{Commitment, HashDigest, HashOutput, Signature}, }, validation::{DifficultyCalculator, HeaderValidation, OrphanValidation, PostOrphanBodyValidation, ValidationError}, @@ -284,18 +284,23 @@ where B: BlockchainBackend } // Fetch the utxo - pub fn fetch_utxo(&self, hash: HashOutput) -> Result, ChainStorageError> { + pub fn fetch_utxo(&self, hash: HashOutput) -> Result, ChainStorageError> { let db = self.db_read_access()?; Ok(db.fetch_output(&hash)?.map(|(out, _index, _)| out)) } + pub fn fetch_unspent_output_by_commitment( + &self, + commitment: &Commitment, + ) -> Result, ChainStorageError> { + let db = self.db_read_access()?; + db.fetch_unspent_output_hash_by_commitment(commitment) + } + /// Return a list of matching utxos, with each being `None` if not found. If found, the transaction /// output, and a boolean indicating if the UTXO was spent as of the block hash specified or the tip if not /// specified. - pub fn fetch_utxos( - &self, - hashes: Vec, - ) -> Result>, ChainStorageError> { + pub fn fetch_utxos(&self, hashes: Vec) -> Result>, ChainStorageError> { let db = self.db_read_access()?; let deleted = db.fetch_deleted_bitmap()?; @@ -418,8 +423,8 @@ where B: BlockchainBackend let start_header = self.fetch_header_by_block_hash(start_hash.clone())? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader".to_string(), - field: "start_hash".to_string(), + entity: "BlockHeader", + field: "start_hash", value: start_hash.to_hex(), })?; let constants = self.consensus_manager.consensus_constants(start_header.height); @@ -497,8 +502,8 @@ where B: BlockchainBackend let accumulated_data = db.fetch_header_accumulated_data(&hash)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeaderAccumulatedData".to_string(), - field: "hash".to_string(), + entity: "BlockHeaderAccumulatedData", + field: "hash", value: hash.to_hex(), })?; @@ -563,8 +568,8 @@ where B: BlockchainBackend let db = self.db_read_access()?; db.fetch_block_accumulated_data(&at_hash)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockAccumulatedData".to_string(), - field: "at_hash".to_string(), + entity: "BlockAccumulatedData", + field: "at_hash", value: at_hash.to_hex(), }) } @@ -802,8 +807,8 @@ where B: BlockchainBackend if end > metadata.height_of_longest_chain() { return Err(ChainStorageError::ValueNotFound { - entity: "Block".to_string(), - field: "end height".to_string(), + entity: "Block", + field: "end height", value: end.to_string(), }); } @@ -943,8 +948,8 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul } = db .fetch_block_accumulated_data(&header.prev_hash)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockAccumulatedData".to_string(), - field: "header_hash".to_string(), + entity: "BlockAccumulatedData", + field: "header_hash", value: header.prev_hash.to_hex(), })?; @@ -975,8 +980,8 @@ pub fn calculate_mmr_roots(db: &T, block: &Block) -> Resul output_mmr .find_leaf_index(&output_hash)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "UTXO".to_string(), - field: "hash".to_string(), + entity: "UTXO", + field: "hash", value: output_hash.to_hex(), })?; debug!( @@ -1128,9 +1133,10 @@ fn insert_block(txn: &mut DbTransaction, block: Arc) -> Result<(), C let height = block.height(); let accumulated_difficulty = block.accumulated_data().total_accumulated_difficulty; + let expected_prev_best_block = block.block().header.prev_hash.clone(); txn.insert_chain_header(block.to_chain_header()) .insert_block_body(block) - .set_best_block(height, block_hash, accumulated_difficulty); + .set_best_block(height, block_hash, accumulated_difficulty, expected_prev_best_block); Ok(()) } @@ -1246,8 +1252,8 @@ fn fetch_block_with_kernel( None => Ok(None), }, Err(_) => Err(ChainStorageError::ValueNotFound { - entity: "Kernel".to_string(), - field: "Excess sig".to_string(), + entity: "Kernel", + field: "Excess sig", value: excess_sig.get_signature().to_hex(), }), } @@ -1266,8 +1272,8 @@ fn fetch_block_with_utxo( None => Ok(None), }, Err(_) => Err(ChainStorageError::ValueNotFound { - entity: "Output".to_string(), - field: "Commitment".to_string(), + entity: "Output", + field: "Commitment", value: commitment.to_hex(), }), } @@ -1309,6 +1315,7 @@ fn rewind_to_height( // Delete headers let last_header_height = last_header.height; let metadata = db.fetch_chain_metadata()?; + let expected_block_hash = metadata.best_block().clone(); let last_block_height = metadata.height_of_longest_chain(); let steps_back = last_header_height .checked_sub(cmp::max(last_block_height, height)) @@ -1409,6 +1416,7 @@ fn rewind_to_height( chain_header.height(), chain_header.accumulated_data().hash.clone(), chain_header.accumulated_data().total_accumulated_difficulty, + expected_block_hash, ); db.write(txn)?; @@ -1420,12 +1428,11 @@ fn rewind_to_hash( block_hash: BlockHash, ) -> Result>, ChainStorageError> { let block_hash_hex = block_hash.to_hex(); - let target_header = - fetch_header_by_block_hash(&*db, block_hash)?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader".to_string(), - field: "block_hash".to_string(), - value: block_hash_hex, - })?; + let target_header = fetch_header_by_block_hash(&*db, block_hash)?.ok_or(ChainStorageError::ValueNotFound { + entity: "BlockHeader", + field: "block_hash", + value: block_hash_hex, + })?; rewind_to_height(db, target_header.height) } @@ -1556,8 +1563,8 @@ fn handle_possible_reorg( }, // We want a warning if the number of removed blocks is at least 2. "Chain reorg required from {} to {} (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{}). Number of \ blocks to remove: {}, to add: {}.", - tip_header.header(), - fork_header.header(), + tip_header.header().height, + fork_header.header().height, tip_header.accumulated_data().total_accumulated_difficulty, tip_header.accumulated_data().hash.to_hex(), fork_header.accumulated_data().total_accumulated_difficulty, @@ -1607,10 +1614,7 @@ fn reorganize_chain( let block_hash_hex = block.accumulated_data().hash.to_hex(); txn.delete_orphan(block.accumulated_data().hash.clone()); let chain_metadata = backend.fetch_chain_metadata()?; - let deleted_bitmap = backend.fetch_deleted_bitmap()?; - if let Err(e) = - block_validator.validate_body_for_valid_orphan(&block, backend, &chain_metadata, &deleted_bitmap) - { + if let Err(e) = block_validator.validate_body_for_valid_orphan(&block, backend, &chain_metadata) { warn!( target: LOG_TARGET, "Orphan block {} ({}) failed validation during chain reorg: {:?}", @@ -2658,7 +2662,7 @@ mod test { let prev_block = block_hashes .get(&from) .unwrap_or_else(|| panic!("Could not find block {}", from)); - let mut block = create_block(1, prev_block.height() + 1, vec![]); + let (mut block, _) = create_block(1, prev_block.height() + 1, vec![]); block.header.prev_hash = prev_block.hash().clone(); // Keep times constant in case we need a particular target difficulty diff --git a/base_layer/core/src/chain_storage/db_transaction.rs b/base_layer/core/src/chain_storage/db_transaction.rs index 99b1e42a06..dfe5947543 100644 --- a/base_layer/core/src/chain_storage/db_transaction.rs +++ b/base_layer/core/src/chain_storage/db_transaction.rs @@ -23,7 +23,7 @@ use crate::{ blocks::{Block, BlockHeader}, chain_storage::{error::ChainStorageError, ChainBlock, ChainHeader, MmrTree}, transactions::{ - transaction::{TransactionInput, TransactionKernel, TransactionOutput}, + transaction::{TransactionKernel, TransactionOutput}, types::{Commitment, HashOutput}, }, }; @@ -145,15 +145,6 @@ impl DbTransaction { self } - pub fn insert_input(&mut self, input: TransactionInput, header_hash: HashOutput, mmr_leaf_index: u32) -> &mut Self { - self.operations.push(WriteOperation::InsertInput { - header_hash, - input: Box::new(input), - mmr_position: mmr_leaf_index, - }); - self - } - pub fn update_pruned_hash_set( &mut self, mmr_tree: MmrTree, @@ -232,11 +223,18 @@ impl DbTransaction { self } - pub fn set_best_block(&mut self, height: u64, hash: HashOutput, accumulated_difficulty: u128) -> &mut Self { + pub fn set_best_block( + &mut self, + height: u64, + hash: HashOutput, + accumulated_difficulty: u128, + expected_prev_best_block: HashOutput, + ) -> &mut Self { self.operations.push(WriteOperation::SetBestBlock { height, hash, accumulated_difficulty, + expected_prev_best_block, }); self } @@ -283,11 +281,6 @@ pub enum WriteOperation { InsertBlockBody { block: Arc, }, - InsertInput { - header_hash: HashOutput, - input: Box, - mmr_position: u32, - }, InsertKernel { header_hash: HashOutput, kernel: Box, @@ -337,6 +330,7 @@ pub enum WriteOperation { height: u64, hash: HashOutput, accumulated_difficulty: u128, + expected_prev_best_block: HashOutput, }, SetPruningHorizonConfig(u64), SetPrunedHeight { @@ -389,17 +383,6 @@ impl fmt::Display for WriteOperation { header_height, mmr_position ), - InsertInput { - header_hash, - input, - mmr_position, - } => write!( - f, - "Insert input {} in block: {} position: {}", - input.output_hash().to_hex(), - header_hash.to_hex(), - mmr_position - ), DeleteOrphanChainTip(hash) => write!(f, "DeleteOrphanChainTip({})", hash.to_hex()), InsertOrphanChainTip(hash) => write!(f, "InsertOrphanChainTip({})", hash.to_hex()), DeleteBlock(hash) => write!(f, "DeleteBlock({})", hash.to_hex()), @@ -446,6 +429,7 @@ impl fmt::Display for WriteOperation { height, hash, accumulated_difficulty, + expected_prev_best_block: _, } => write!( f, "Update best block to height:{} ({}) with difficulty: {}", @@ -471,9 +455,9 @@ pub enum DbKey { impl DbKey { pub fn to_value_not_found_error(&self) -> ChainStorageError { let (entity, field, value) = match self { - DbKey::BlockHeader(v) => ("BlockHeader".to_string(), "Height".to_string(), v.to_string()), - DbKey::BlockHash(v) => ("Block".to_string(), "Hash".to_string(), v.to_hex()), - DbKey::OrphanBlock(v) => ("Orphan".to_string(), "Hash".to_string(), v.to_hex()), + DbKey::BlockHeader(v) => ("BlockHeader", "Height", v.to_string()), + DbKey::BlockHash(v) => ("Block", "Hash", v.to_hex()), + DbKey::OrphanBlock(v) => ("Orphan", "Hash", v.to_hex()), }; ChainStorageError::ValueNotFound { entity, field, value } } diff --git a/base_layer/core/src/chain_storage/error.rs b/base_layer/core/src/chain_storage/error.rs index f8541071fc..33c9e88a1a 100644 --- a/base_layer/core/src/chain_storage/error.rs +++ b/base_layer/core/src/chain_storage/error.rs @@ -64,8 +64,8 @@ pub enum ChainStorageError { }, #[error("The requested {entity} was not found via {field}:{value} in the database")] ValueNotFound { - entity: String, - field: String, + entity: &'static str, + field: &'static str, value: String, }, #[error("MMR error: {source}")] @@ -107,12 +107,18 @@ pub enum ChainStorageError { IoError(#[from] std::io::Error), #[error("Cannot calculate MMR roots for block that does not form a chain with the current tip. {0}")] CannotCalculateNonTipMmr(String), + #[error("Key {key} in {table_name} already exists")] + KeyExists { table_name: &'static str, key: String }, } impl ChainStorageError { pub fn is_value_not_found(&self) -> bool { matches!(self, ChainStorageError::ValueNotFound { .. }) } + + pub fn is_key_exist_error(&self) -> bool { + matches!(self, ChainStorageError::KeyExists { .. }) + } } impl From for ChainStorageError { @@ -126,9 +132,9 @@ impl From for ChainStorageError { use lmdb_zero::Error::*; match err { Code(c) if c == lmdb_zero::error::NOTFOUND => ChainStorageError::ValueNotFound { - entity: "LMDB".to_string(), - field: "unknown".to_string(), - value: "unknown".to_string(), + entity: "", + field: "", + value: "".to_string(), }, _ => ChainStorageError::AccessError(err.to_string()), } @@ -150,21 +156,32 @@ impl Optional for Result { } pub trait OrNotFound { - fn or_not_found(self, entity: &str, field: &str, value: String) -> Result; + fn or_not_found(self, entity: &'static str, field: &'static str, value: String) -> Result; } impl OrNotFound for Result, ChainStorageError> { - fn or_not_found(self, entity: &str, field: &str, value: String) -> Result { + fn or_not_found(self, entity: &'static str, field: &'static str, value: String) -> Result { match self { Ok(inner) => match inner { - None => Err(ChainStorageError::ValueNotFound { - entity: entity.to_string(), - field: field.to_string(), - value, - }), + None => Err(ChainStorageError::ValueNotFound { entity, field, value }), Some(v) => Ok(v), }, Err(err) => Err(err), } } } + +impl OrNotFound for Result { + fn or_not_found(self, entity: &'static str, field: &'static str, value: String) -> Result { + use lmdb_zero::Error::*; + match self { + Ok(v) => Ok(v), + Err(err) => match err { + Code(c) if c == lmdb_zero::error::NOTFOUND => { + Err(ChainStorageError::ValueNotFound { entity, field, value }) + }, + err => Err(err.into()), + }, + } + } +} diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs index b0656b3e16..dbaf0e6870 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::chain_storage::error::ChainStorageError; +use crate::chain_storage::{error::ChainStorageError, OrNotFound}; use lmdb_zero::{ del, error::{self, LmdbResultExt}, @@ -86,6 +86,16 @@ where val, e, ); + + if let lmdb_zero::Error::Code(code) = &e { + if *code == lmdb_zero::error::KEYEXIST { + return ChainStorageError::KeyExists { + table_name, + key: to_hex(key.as_lmdb_bytes()), + }; + } + } + ChainStorageError::InsertError { table: table_name, error: e.to_string(), @@ -131,9 +141,18 @@ where } /// Deletes the given key. An error is returned if the key does not exist -pub fn lmdb_delete(txn: &WriteTransaction<'_>, db: &Database, key: &K) -> Result<(), ChainStorageError> -where K: AsLmdbBytes + ?Sized { - txn.access().del_key(&db, key)?; +pub fn lmdb_delete( + txn: &WriteTransaction<'_>, + db: &Database, + key: &K, + table_name: &'static str, +) -> Result<(), ChainStorageError> +where + K: AsLmdbBytes + ?Sized, +{ + txn.access() + .del_key(&db, key) + .or_not_found(table_name, "", to_hex(key.as_lmdb_bytes()))?; Ok(()) } diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index c070c499fe..069765edbb 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -62,6 +62,7 @@ use crate::{ LMDB_DB_ORPHAN_PARENT_MAP_INDEX, LMDB_DB_TXOS_HASH_TO_INDEX, LMDB_DB_UTXOS, + LMDB_DB_UTXO_COMMITMENT_INDEX, LMDB_DB_UTXO_MMR_SIZE_INDEX, }, BlockchainBackend, @@ -96,13 +97,13 @@ type DatabaseRef = Arc>; pub const LOG_TARGET: &str = "c::cs::lmdb_db::lmdb_db"; -struct OutputKey { - header_hash: HashOutput, +struct OutputKey<'a> { + header_hash: &'a [u8], mmr_position: u32, } -impl OutputKey { - pub fn new(header_hash: HashOutput, mmr_position: u32) -> OutputKey { +impl<'a> OutputKey<'a> { + pub fn new(header_hash: &'a [u8], mmr_position: u32) -> OutputKey { OutputKey { header_hash, mmr_position, @@ -110,7 +111,7 @@ impl OutputKey { } pub fn get_key(&self) -> String { - format!("{}-{:010}", self.header_hash.to_hex(), self.mmr_position) + format!("{}-{:010}", to_hex(&self.header_hash), self.mmr_position) } } @@ -131,6 +132,7 @@ pub struct LMDBDatabase { kernel_excess_sig_index: DatabaseRef, kernel_mmr_size_index: DatabaseRef, output_mmr_size_index: DatabaseRef, + utxo_commitment_index: DatabaseRef, orphans_db: DatabaseRef, monero_seed_height_db: DatabaseRef, orphan_header_accumulated_data_db: DatabaseRef, @@ -157,6 +159,7 @@ impl LMDBDatabase { kernel_excess_sig_index: get_database(&store, LMDB_DB_KERNEL_EXCESS_SIG_INDEX)?, kernel_mmr_size_index: get_database(&store, LMDB_DB_KERNEL_MMR_SIZE_INDEX)?, output_mmr_size_index: get_database(&store, LMDB_DB_UTXO_MMR_SIZE_INDEX)?, + utxo_commitment_index: get_database(&store, LMDB_DB_UTXO_COMMITMENT_INDEX)?, orphans_db: get_database(&store, LMDB_DB_ORPHANS)?, orphan_header_accumulated_data_db: get_database(&store, LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA)?, monero_seed_height_db: get_database(&store, LMDB_DB_MONERO_SEED_HEIGHT)?, @@ -226,13 +229,6 @@ impl LMDBDatabase { mmr_position, )?; }, - InsertInput { - header_hash, - input, - mmr_position, - } => { - self.insert_input(&write_txn, header_hash, *input, mmr_position)?; - }, DeleteHeader(height) => { self.delete_header(&write_txn, height)?; }, @@ -240,7 +236,7 @@ impl LMDBDatabase { self.delete_orphan(&write_txn, hash)?; }, DeleteOrphanChainTip(hash) => { - lmdb_delete(&write_txn, &self.orphan_chain_tips_db, &hash)?; + lmdb_delete(&write_txn, &self.orphan_chain_tips_db, &hash, "orphan_chain_tips_db")?; }, InsertOrphanChainTip(hash) => { lmdb_insert( @@ -248,7 +244,7 @@ impl LMDBDatabase { &self.orphan_chain_tips_db, &hash, &hash, - "orphan_chain_tips", + "orphan_chain_tips_db", )?; }, DeleteBlock(hash) => { @@ -303,7 +299,29 @@ impl LMDBDatabase { height, hash, accumulated_difficulty, + expected_prev_best_block, } => { + // for security we check that the best block does exist, and we check the previous value + // we dont want to check this if the prev block has never been set, this means a empty hash of 32 + // bytes. + if height > 0 { + let prev = fetch_best_block(&write_txn, &self.metadata_db)?; + if expected_prev_best_block != prev { + return Err(ChainStorageError::InvalidOperation(format!( + "There was a change in best_block, the best block is suppose to be: ({}), but it \ + currently is: ({})", + expected_prev_best_block.to_hex(), + prev.to_hex(), + ))); + }; + } + if !lmdb_exists(&write_txn, &self.block_hashes_db, hash.as_slice())? { + // we dont care about the header or the height, we just want to know its there. + return Err(ChainStorageError::InvalidOperation(format!( + "There is no Blockheader hash ({}) in db", + expected_prev_best_block.to_hex(), + ))); + }; self.set_metadata(&write_txn, MetadataKey::ChainHeight, MetadataValue::ChainHeight(height))?; self.set_metadata(&write_txn, MetadataKey::BestBlock, MetadataValue::BestBlock(hash))?; self.set_metadata( @@ -347,18 +365,20 @@ impl LMDBDatabase { &self, txn: &WriteTransaction<'_>, key: &OutputKey, - ) -> Result, ChainStorageError> { + ) -> Result { let key = key.get_key(); - let key_string = key.as_str(); - let mut output: TransactionOutputRowData = lmdb_get(txn, &self.utxos_db, key_string).or_not_found( - "TransactionOutput", - "key", - key_string.to_string(), - )?; - let result = output.output.take(); + let mut output: TransactionOutputRowData = + lmdb_get(txn, &self.utxos_db, key.as_str()).or_not_found("TransactionOutput", "key", key.clone())?; + let pruned_output = output + .output + .take() + .ok_or_else(|| ChainStorageError::DataInconsistencyDetected { + function: "prune_output", + details: format!("Attempt to prune output that has already been pruned for key {}", key), + })?; // output.output is None - lmdb_replace(txn, &self.utxos_db, key_string, &output)?; - Ok(result) + lmdb_replace(txn, &self.utxos_db, key.as_str(), &output)?; + Ok(pruned_output) } fn insert_output( @@ -372,14 +392,22 @@ impl LMDBDatabase { let output_hash = output.hash(); let witness_hash = output.witness_hash(); - let key = OutputKey::new(header_hash.clone(), mmr_position); + let key = OutputKey::new(&header_hash, mmr_position); let key_string = key.get_key(); + lmdb_insert( + txn, + &*self.utxo_commitment_index, + output.commitment.as_bytes(), + &output_hash, + "utxo_commitment_index", + )?; + lmdb_insert( txn, &*self.txos_hash_to_index_db, output_hash.as_slice(), - &(mmr_position, key_string.clone()), + &(mmr_position, &key_string), "txos_hash_to_index_db", )?; lmdb_insert( @@ -395,7 +423,9 @@ impl LMDBDatabase { mined_height: header_height, }, "utxos_db", - ) + )?; + + Ok(()) } fn insert_pruned_output( @@ -413,7 +443,7 @@ impl LMDBDatabase { header_hash.to_hex(), ))); } - let key = OutputKey::new(header_hash.clone(), mmr_position); + let key = OutputKey::new(&header_hash, mmr_position); let key_string = key.get_key(); lmdb_insert( txn, @@ -488,6 +518,13 @@ impl LMDBDatabase { input: TransactionInput, mmr_position: u32, ) -> Result<(), ChainStorageError> { + lmdb_delete( + txn, + &self.utxo_commitment_index, + input.commitment().as_bytes(), + "utxo_commitment_index", + )?; + let hash = input.hash(); let key = format!("{}-{:010}-{}", header_hash.to_hex(), mmr_position, hash.to_hex()); lmdb_insert( @@ -676,50 +713,129 @@ impl LMDBDatabase { ))); } - lmdb_delete(&txn, &self.block_hashes_db, &hash)?; - lmdb_delete(&txn, &self.headers_db, &height)?; - lmdb_delete(&txn, &self.header_accumulated_data_db, &height)?; - lmdb_delete(&txn, &self.kernel_mmr_size_index, &header.kernel_mmr_size.to_be_bytes())?; - lmdb_delete(&txn, &self.output_mmr_size_index, &header.output_mmr_size.to_be_bytes())?; + lmdb_delete(&txn, &self.block_hashes_db, &hash, "block_hashes_db")?; + lmdb_delete(&txn, &self.headers_db, &height, "headers_db")?; + lmdb_delete( + &txn, + &self.header_accumulated_data_db, + &height, + "header_accumulated_data_db", + )?; + lmdb_delete( + &txn, + &self.kernel_mmr_size_index, + &header.kernel_mmr_size.to_be_bytes(), + "kernel_mmr_size_index", + )?; + lmdb_delete( + &txn, + &self.output_mmr_size_index, + &header.output_mmr_size.to_be_bytes(), + "output_mmr_size_index", + )?; Ok(()) } - fn delete_block_body(&self, write_txn: &WriteTransaction<'_>, hash: HashOutput) -> Result<(), ChainStorageError> { - let hash_hex = hash.to_hex(); + fn delete_block_body( + &self, + write_txn: &WriteTransaction<'_>, + block_hash: HashOutput, + ) -> Result<(), ChainStorageError> { + let hash_hex = block_hash.to_hex(); debug!(target: LOG_TARGET, "Deleting block `{}`", hash_hex); debug!(target: LOG_TARGET, "Deleting UTXOs..."); - let height = self - .fetch_height_from_hash(&write_txn, &hash) - .or_not_found("Block", "hash", hash.to_hex())?; + let height = + self.fetch_height_from_hash(&write_txn, &block_hash) + .or_not_found("Block", "hash", hash_hex.clone())?; let block_accum_data = self.fetch_block_accumulated_data(write_txn, height)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockAccumulatedData".to_string(), - field: "height".to_string(), + entity: "BlockAccumulatedData", + field: "height", value: height.to_string(), })?; let mut bitmap = self.load_deleted_bitmap_model(write_txn)?; bitmap.remove(block_accum_data.deleted())?; bitmap.finish()?; - lmdb_delete(&write_txn, &self.block_accumulated_data_db, &height)?; - let rows = lmdb_delete_keys_starting_with::(&write_txn, &self.utxos_db, &hash_hex)?; + lmdb_delete( + &write_txn, + &self.block_accumulated_data_db, + &height, + "block_accumulated_data_db", + )?; + + self.delete_block_inputs_outputs(write_txn, &hash_hex)?; + self.delete_block_kernels(write_txn, &hash_hex)?; + + Ok(()) + } - for utxo in rows { + fn delete_block_inputs_outputs(&self, txn: &WriteTransaction<'_>, hash: &str) -> Result<(), ChainStorageError> { + let output_rows = lmdb_delete_keys_starting_with::(txn, &self.utxos_db, hash)?; + debug!(target: LOG_TARGET, "Deleted {} outputs...", output_rows.len()); + let inputs = lmdb_delete_keys_starting_with::(txn, &self.inputs_db, hash)?; + debug!(target: LOG_TARGET, "Deleted {} input(s)...", inputs.len()); + + for utxo in &output_rows { trace!(target: LOG_TARGET, "Deleting UTXO `{}`", to_hex(&utxo.hash)); - lmdb_delete(&write_txn, &self.txos_hash_to_index_db, utxo.hash.as_slice())?; + lmdb_delete( + txn, + &self.txos_hash_to_index_db, + utxo.hash.as_slice(), + "txos_hash_to_index_db", + )?; + if let Some(ref output) = utxo.output { + let output_hash = output.hash(); + // if an output was already spent in the block, it was never created as unspent, so dont delete it as it + // does not exist here + if inputs.iter().any(|r| r.input.output_hash() == output_hash) { + continue; + } + lmdb_delete( + txn, + &*self.utxo_commitment_index, + output.commitment.as_bytes(), + "utxo_commitment_index", + )?; + } + } + // Move inputs in this block back into the unspent set, any outputs spent within this block they will be removed + // by deleting all the block's outputs below + for row in inputs { + // If input spends an output in this block, don't add it to the utxo set + let output_hash = row.input.output_hash(); + if output_rows.iter().any(|r| r.hash == output_hash) { + continue; + } + trace!(target: LOG_TARGET, "Input moved to UTXO set: {}", row.input); + lmdb_insert( + txn, + &*self.utxo_commitment_index, + row.input.commitment.as_bytes(), + &row.input.output_hash(), + "utxo_commitment_index", + )?; } - debug!(target: LOG_TARGET, "Deleting kernels..."); - let kernels = - lmdb_delete_keys_starting_with::(&write_txn, &self.kernels_db, &hash_hex)?; + Ok(()) + } + + fn delete_block_kernels(&self, txn: &WriteTransaction<'_>, hash: &str) -> Result<(), ChainStorageError> { + let kernels = lmdb_delete_keys_starting_with::(txn, &self.kernels_db, hash)?; + debug!(target: LOG_TARGET, "Deleted {} kernels...", kernels.len()); for kernel in kernels { trace!( target: LOG_TARGET, "Deleting excess `{}`", - to_hex(kernel.kernel.excess.as_bytes()) + kernel.kernel.excess.to_hex() ); - lmdb_delete(&write_txn, &self.kernel_excess_index, kernel.kernel.excess.as_bytes())?; + lmdb_delete( + txn, + &self.kernel_excess_index, + kernel.kernel.excess.as_bytes(), + "kernel_excess_index", + )?; let mut excess_sig_key = Vec::::new(); excess_sig_key.extend(kernel.kernel.excess_sig.get_public_nonce().as_bytes()); excess_sig_key.extend(kernel.kernel.excess_sig.get_signature().as_bytes()); @@ -728,10 +844,13 @@ impl LMDBDatabase { "Deleting excess signature `{}`", to_hex(&excess_sig_key) ); - lmdb_delete(&write_txn, &self.kernel_excess_sig_index, excess_sig_key.as_slice())?; + lmdb_delete( + txn, + &self.kernel_excess_sig_index, + excess_sig_key.as_slice(), + "kernel_excess_sig_index", + )?; } - debug!(target: LOG_TARGET, "Deleting Inputs..."); - lmdb_delete_keys_starting_with::(&write_txn, &self.inputs_db, &hash_hex)?; Ok(()) } @@ -742,7 +861,12 @@ impl LMDBDatabase { // Orphan is a tip hash if lmdb_exists(&txn, &self.orphan_chain_tips_db, hash.as_slice())? { - lmdb_delete(&txn, &self.orphan_chain_tips_db, hash.as_slice())?; + lmdb_delete( + &txn, + &self.orphan_chain_tips_db, + hash.as_slice(), + "orphan_chain_tips_db", + )?; // Parent becomes a tip hash if lmdb_exists(&txn, &self.orphans_db, parent_hash.as_slice())? { @@ -757,7 +881,12 @@ impl LMDBDatabase { } if lmdb_exists(&txn, &self.orphan_header_accumulated_data_db, hash.as_slice())? { - lmdb_delete(&txn, &self.orphan_header_accumulated_data_db, hash.as_slice())?; + lmdb_delete( + &txn, + &self.orphan_header_accumulated_data_db, + hash.as_slice(), + "orphan_header_accumulated_data_db", + )?; } if lmdb_get::<_, BlockHeaderAccumulatedData>( @@ -767,9 +896,14 @@ impl LMDBDatabase { )? .is_some() { - lmdb_delete(&txn, &self.orphan_header_accumulated_data_db, hash.as_slice())?; + lmdb_delete( + &txn, + &self.orphan_header_accumulated_data_db, + hash.as_slice(), + "orphan_header_accumulated_data_db", + )?; } - lmdb_delete(&txn, &self.orphans_db, hash.as_slice())?; + lmdb_delete(&txn, &self.orphans_db, hash.as_slice(), "orphans_db")?; } Ok(()) } @@ -813,8 +947,8 @@ impl LMDBDatabase { } else { self.fetch_block_accumulated_data(&*txn, header.height - 1)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockAccumulatedData".to_string(), - field: "prev_hash".to_string(), + entity: "BlockAccumulatedData", + field: "prev_hash", value: header.prev_hash.to_hex(), })? }; @@ -847,11 +981,7 @@ impl LMDBDatabase { total_utxo_sum = &total_utxo_sum + &output.commitment; output_mmr.push(output.hash())?; witness_mmr.push(output.witness_hash())?; - trace!( - target: LOG_TARGET, - "Inserting output `{}`", - to_hex(&output.commitment.as_bytes()) - ); + debug!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); self.insert_output( txn, block_hash.clone(), @@ -872,11 +1002,7 @@ impl LMDBDatabase { index ))); } - trace!( - target: LOG_TARGET, - "Inserting input `{}`", - to_hex(&input.commitment.as_bytes()) - ); + debug!(target: LOG_TARGET, "Inserting input `{}`", input.commitment.to_hex()); self.insert_input(txn, block_hash.clone(), input, index)?; } @@ -1026,7 +1152,7 @@ impl LMDBDatabase { &((pos + 1) as u64).to_be_bytes(), ) .or_not_found("BlockHeader", "mmr_position", pos.to_string())?; - let key = OutputKey::new(hash, pos); + let key = OutputKey::new(&hash, pos); debug!(target: LOG_TARGET, "Pruning output: {}", key.get_key()); self.prune_output(&write_txn, &key)?; } @@ -1115,6 +1241,7 @@ pub fn create_lmdb_database>(path: P, config: LMDBConfig) -> Resu .add_database(LMDB_DB_KERNEL_EXCESS_SIG_INDEX, flags) .add_database(LMDB_DB_KERNEL_MMR_SIZE_INDEX, flags) .add_database(LMDB_DB_UTXO_MMR_SIZE_INDEX, flags) + .add_database(LMDB_DB_UTXO_COMMITMENT_INDEX, flags) .add_database(LMDB_DB_ORPHANS, flags) .add_database(LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA, flags) .add_database(LMDB_DB_MONERO_SEED_HEIGHT, flags) @@ -1243,16 +1370,16 @@ impl BlockchainBackend for LMDBDatabase { let header: BlockHeader = lmdb_get(&txn, &self.headers_db, &height)?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader".to_string(), - field: "height".to_string(), + entity: "BlockHeader", + field: "height", value: height.to_string(), })?; let accum_data = self .fetch_header_accumulated_data_by_height(&txn, height)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeaderAccumulatedData".to_string(), - field: "height".to_string(), + entity: "BlockHeaderAccumulatedData", + field: "height", value: height.to_string(), })?; @@ -1312,8 +1439,8 @@ impl BlockchainBackend for LMDBDatabase { } Err(ChainStorageError::ValueNotFound { - entity: "chain_header_in_all_chains".to_string(), - field: "hash".to_string(), + entity: "chain_header_in_all_chains", + field: "hash", value: hash.to_hex(), }) } @@ -1323,23 +1450,23 @@ impl BlockchainBackend for LMDBDatabase { let height = lmdb_first_after::<_, u64>(&txn, &self.kernel_mmr_size_index, &mmr_position.to_be_bytes())? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "kernel_mmr_size_index".to_string(), - field: "mmr_position".to_string(), + entity: "kernel_mmr_size_index", + field: "mmr_position", value: mmr_position.to_string(), })?; let header: BlockHeader = lmdb_get(&txn, &self.headers_db, &height)?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader".to_string(), - field: "height".to_string(), + entity: "BlockHeader", + field: "height", value: height.to_string(), })?; let accum_data = self .fetch_header_accumulated_data_by_height(&txn, height)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeaderAccumulatedData".to_string(), - field: "height".to_string(), + entity: "BlockHeaderAccumulatedData", + field: "height", value: height.to_string(), })?; @@ -1359,22 +1486,22 @@ impl BlockchainBackend for LMDBDatabase { let (height, _hash) = lmdb_first_after::<_, (u64, Vec)>(&txn, &self.output_mmr_size_index, &mmr_position.to_be_bytes())? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "output_mmr_size_index".to_string(), - field: "mmr_position".to_string(), + entity: "output_mmr_size_index", + field: "mmr_position", value: mmr_position.to_string(), })?; let header: BlockHeader = lmdb_get(&txn, &self.headers_db, &height)?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader".to_string(), - field: "height".to_string(), + entity: "BlockHeader", + field: "height", value: height.to_string(), })?; let accum_data = self .fetch_header_accumulated_data_by_height(&txn, height)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeaderAccumulatedData".to_string(), - field: "height".to_string(), + entity: "BlockHeaderAccumulatedData", + field: "height", value: height.to_string(), })?; @@ -1491,8 +1618,8 @@ impl BlockchainBackend for LMDBDatabase { for height in start_height..=end_height { let hash = lmdb_get::<_, BlockHeaderAccumulatedData>(&txn, &self.header_accumulated_data_db, &height)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader".to_string(), - field: "height".to_string(), + entity: "BlockHeader", + field: "height", value: height.to_string(), })? .hash; @@ -1569,8 +1696,8 @@ impl BlockchainBackend for LMDBDatabase { let accum_data = lmdb_get::<_, BlockHeaderAccumulatedData>(&txn, &self.header_accumulated_data_db, &height)? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeader".to_string(), - field: "height".to_string(), + entity: "BlockHeader", + field: "height", value: height.to_string(), })?; @@ -1616,10 +1743,7 @@ impl BlockchainBackend for LMDBDatabase { Ok((result, difference_bitmap)) } - fn fetch_output( - &self, - output_hash: &HashOutput, - ) -> Result, ChainStorageError> { + fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { debug!(target: LOG_TARGET, "Fetch output: {}", output_hash.to_hex()); let txn = self.read_transaction()?; if let Some((index, key)) = @@ -1632,20 +1756,33 @@ impl BlockchainBackend for LMDBDatabase { index, key ); - if let Some(output) = lmdb_get::<_, TransactionOutputRowData>(&txn, &self.utxos_db, key.as_str())? { - if output.output.is_none() { - error!( - target: LOG_TARGET, - "Tried to fetch pruned output: {} ({}, {})", - output_hash.to_hex(), - index, - key - ); - unimplemented!("Output has been pruned"); - } - Ok(Some((output.output.unwrap(), output.mmr_position, output.mined_height))) - } else { - Ok(None) + match lmdb_get::<_, TransactionOutputRowData>(&txn, &self.utxos_db, key.as_str())? { + Some(TransactionOutputRowData { + output: Some(o), + mmr_position, + mined_height, + .. + }) => Ok(Some(( + PrunedOutput::NotPruned { output: o }, + mmr_position, + mined_height, + ))), + Some(TransactionOutputRowData { + output: None, + mmr_position, + mined_height, + hash, + witness_hash, + .. + }) => Ok(Some(( + PrunedOutput::Pruned { + output_hash: hash, + witness_hash, + }, + mmr_position, + mined_height, + ))), + _ => Ok(None), } } else { debug!( @@ -1657,6 +1794,14 @@ impl BlockchainBackend for LMDBDatabase { } } + fn fetch_unspent_output_hash_by_commitment( + &self, + commitment: &Commitment, + ) -> Result, ChainStorageError> { + let txn = self.read_transaction()?; + lmdb_get::<_, HashOutput>(&*txn, &*self.utxo_commitment_index, commitment.as_bytes()) + } + fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { let txn = self.read_transaction()?; Ok( @@ -1721,15 +1866,15 @@ impl BlockchainBackend for LMDBDatabase { let metadata = self.fetch_chain_metadata()?; let height = metadata.height_of_longest_chain(); let header = lmdb_get(&txn, &self.headers_db, &height)?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "Header".to_string(), - field: "height".to_string(), + entity: "Header", + field: "height", value: height.to_string(), })?; let accumulated_data = self .fetch_header_accumulated_data_by_height(&txn, metadata.height_of_longest_chain())? .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "BlockHeaderAccumulatedData".to_string(), - field: "height".to_string(), + entity: "BlockHeaderAccumulatedData", + field: "height", value: height.to_string(), })?; let chain_header = ChainHeader::try_construct(header, accumulated_data).ok_or_else(|| { @@ -1767,16 +1912,16 @@ impl BlockchainBackend for LMDBDatabase { let orphan: Block = lmdb_get(&txn, &self.orphans_db, hash.as_slice())?.ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "Orphan".to_string(), - field: "hash".to_string(), + entity: "Orphan", + field: "hash", value: hash.to_hex(), })?; let accumulated_data = lmdb_get(&txn, &self.orphan_header_accumulated_data_db, hash.as_slice())?.ok_or_else(|| { ChainStorageError::ValueNotFound { - entity: "Orphan accumulated data".to_string(), - field: "hash".to_string(), + entity: "Orphan accumulated data", + field: "hash", value: hash.to_hex(), } })?; @@ -1803,8 +1948,8 @@ impl BlockchainBackend for LMDBDatabase { for hash in orphan_hashes { res.push(lmdb_get(&txn, &self.orphans_db, hash.as_slice())?.ok_or_else(|| { ChainStorageError::ValueNotFound { - entity: "Orphan".to_string(), - field: "hash".to_string(), + entity: "Orphan", + field: "hash", value: hash.to_hex(), } })?) @@ -1919,8 +2064,8 @@ fn fetch_chain_height(txn: &ConstTransaction<'_>, db: &Database) -> Result Ok(height), _ => Err(ChainStorageError::ValueNotFound { - entity: "ChainMetadata".to_string(), - field: "ChainHeight".to_string(), + entity: "ChainMetadata", + field: "ChainHeight", value: "".to_string(), }), } @@ -1943,8 +2088,8 @@ fn fetch_horizon_data(txn: &ConstTransaction<'_>, db: &Database) -> Result Ok(Some(data)), None => Ok(None), _ => Err(ChainStorageError::ValueNotFound { - entity: "ChainMetadata".to_string(), - field: "HorizonData".to_string(), + entity: "ChainMetadata", + field: "HorizonData", value: "".to_string(), }), } @@ -1956,8 +2101,8 @@ fn fetch_best_block(txn: &ConstTransaction<'_>, db: &Database) -> Result Ok(best_block), _ => Err(ChainStorageError::ValueNotFound { - entity: "ChainMetadata".to_string(), - field: "BestBlock".to_string(), + entity: "ChainMetadata", + field: "BestBlock", value: "".to_string(), }), } @@ -1970,8 +2115,8 @@ fn fetch_accumulated_work(txn: &ConstTransaction<'_>, db: &Database) -> Result Ok(accumulated_difficulty), _ => Err(ChainStorageError::ValueNotFound { - entity: "ChainMetadata".to_string(), - field: "AccumulatedWork".to_string(), + entity: "ChainMetadata", + field: "AccumulatedWork", value: "".to_string(), }), } @@ -1985,8 +2130,8 @@ fn fetch_deleted_bitmap(txn: &ConstTransaction<'_>, db: &Database) -> Result Ok(bitmap), None => Ok(Bitmap::create().into()), _ => Err(ChainStorageError::ValueNotFound { - entity: "ChainMetadata".to_string(), - field: "DeletedBitmap".to_string(), + entity: "ChainMetadata", + field: "DeletedBitmap", value: "".to_string(), }), } diff --git a/base_layer/core/src/chain_storage/lmdb_db/mod.rs b/base_layer/core/src/chain_storage/lmdb_db/mod.rs index 42ab93ef6e..785b0363ee 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/mod.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/mod.rs @@ -44,6 +44,7 @@ pub const LMDB_DB_KERNEL_EXCESS_INDEX: &str = "kernel_excess_index"; pub const LMDB_DB_KERNEL_EXCESS_SIG_INDEX: &str = "kernel_excess_sig_index"; pub const LMDB_DB_KERNEL_MMR_SIZE_INDEX: &str = "kernel_mmr_size_index"; pub const LMDB_DB_UTXO_MMR_SIZE_INDEX: &str = "utxo_mmr_size_index"; +pub const LMDB_DB_UTXO_COMMITMENT_INDEX: &str = "utxo_commitment_index"; pub const LMDB_DB_ORPHANS: &str = "orphans"; pub const LMDB_DB_MONERO_SEED_HEIGHT: &str = "monero_seed_height"; pub const LMDB_DB_ORPHAN_HEADER_ACCUMULATED_DATA: &str = "orphan_accumulated_data"; diff --git a/base_layer/core/src/chain_storage/pruned_output.rs b/base_layer/core/src/chain_storage/pruned_output.rs index 2d6128eb1f..957c0e8c86 100644 --- a/base_layer/core/src/chain_storage/pruned_output.rs +++ b/base_layer/core/src/chain_storage/pruned_output.rs @@ -22,6 +22,7 @@ use crate::transactions::{transaction::TransactionOutput, types::HashOutput}; #[allow(clippy::large_enum_variant)] +#[derive(Debug, PartialEq)] pub enum PrunedOutput { Pruned { output_hash: HashOutput, diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index 41c34b9ef5..0a3807fad3 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -28,6 +28,7 @@ use crate::{ blockchain::{create_new_blockchain, TempDatabase}, create_block, }, + transactions::transaction::{Transaction, UnblindedOutput}, }; use std::sync::Arc; use tari_test_utils::unpack_enum; @@ -36,20 +37,33 @@ fn setup() -> BlockchainDatabase { create_new_blockchain() } -fn add_many_chained_blocks(size: usize, db: &BlockchainDatabase) -> Vec> { +fn create_next_block(prev_block: &Block, transactions: Vec>) -> (Arc, UnblindedOutput) { + let (mut block, output) = create_block( + 1, + prev_block.header.height + 1, + transactions.into_iter().map(|t| (&*t).clone()).collect(), + ); + block.header.prev_hash = prev_block.hash(); + block.header.output_mmr_size = prev_block.header.output_mmr_size + block.body.outputs().len() as u64; + block.header.kernel_mmr_size = prev_block.header.kernel_mmr_size + block.body.kernels().len() as u64; + (Arc::new(block), output) +} + +fn add_many_chained_blocks( + size: usize, + db: &BlockchainDatabase, +) -> (Vec>, Vec) { let mut prev_block = Arc::new(db.fetch_block(0).unwrap().try_into_block().unwrap()); let mut blocks = Vec::with_capacity(size); - for i in 1..=size as u64 { - let mut block = create_block(1, i, vec![]); - block.header.prev_hash = prev_block.hash().clone(); - block.header.output_mmr_size = prev_block.header.output_mmr_size + block.body.outputs().len() as u64; - block.header.kernel_mmr_size = prev_block.header.kernel_mmr_size + block.body.kernels().len() as u64; - let block = Arc::new(block); - prev_block = block.clone(); + let mut outputs = Vec::with_capacity(size); + for _ in 1..=size as u64 { + let (block, coinbase_utxo) = create_next_block(&prev_block, vec![]); db.add_block(block.clone()).unwrap().assert_added(); + prev_block = block.clone(); blocks.push(block); + outputs.push(coinbase_utxo); } - blocks + (blocks, outputs) } mod fetch_blocks { @@ -76,7 +90,7 @@ mod fetch_blocks { #[test] fn it_returns_one() { let db = setup(); - let new_blocks = add_many_chained_blocks(1, &db); + let (new_blocks, _) = add_many_chained_blocks(1, &db); let blocks = db.fetch_blocks(1..=1).unwrap(); assert_eq!(blocks.len(), 1); assert_eq!(blocks[0].block().hash(), new_blocks[0].hash()); @@ -291,7 +305,7 @@ mod fetch_block_hashes_from_header_tip { #[test] fn it_returns_n_hashes_from_tip() { let db = setup(); - let blocks = add_many_chained_blocks(5, &db); + let (blocks, _) = add_many_chained_blocks(5, &db); let hashes = db.fetch_block_hashes_from_header_tip(3, 1).unwrap(); assert_eq!(hashes.len(), 3); assert_eq!(hashes[0], blocks[3].hash()); @@ -302,7 +316,7 @@ mod fetch_block_hashes_from_header_tip { #[test] fn it_returns_hashes_without_overlapping() { let db = setup(); - let blocks = add_many_chained_blocks(3, &db); + let (blocks, _) = add_many_chained_blocks(3, &db); let hashes = db.fetch_block_hashes_from_header_tip(2, 0).unwrap(); assert_eq!(hashes[0], blocks[2].hash()); assert_eq!(hashes[1], blocks[1].hash()); @@ -314,10 +328,81 @@ mod fetch_block_hashes_from_header_tip { fn it_returns_all_hashes_from_tip() { let db = setup(); let genesis = db.fetch_tip_header().unwrap(); - let blocks = add_many_chained_blocks(5, &db); + let (blocks, _) = add_many_chained_blocks(5, &db); let hashes = db.fetch_block_hashes_from_header_tip(10, 0).unwrap(); assert_eq!(hashes.len(), 6); assert_eq!(hashes[0], blocks[4].hash()); assert_eq!(&hashes[5], genesis.hash()); } } + +mod add_block { + use super::*; + use crate::{ + chain_storage::ChainStorageError, + crypto::tari_utilities::hex::Hex, + transactions::{ + helpers::{schema_to_transaction, TransactionSchema}, + tari_amount::T, + transaction::OutputFeatures, + }, + txn_schema, + }; + + #[test] + fn it_does_not_allow_duplicate_commitments_in_the_utxo_set() { + let db = setup(); + let (blocks, outputs) = add_many_chained_blocks(5, &db); + + let prev_block = blocks.last().unwrap(); + + let (txns, tx_outputs) = + schema_to_transaction(&[txn_schema!(from: vec![outputs[0].clone()], to: vec![500 * T])]); + let mut prev_utxo = tx_outputs[0].clone(); + + let (block, _) = create_next_block(&prev_block, txns); + db.add_block(block.clone()).unwrap().assert_added(); + + let prev_block = block; + let prev_output = prev_utxo.as_transaction_output(&Default::default()).unwrap(); + + let (txns, _) = schema_to_transaction(&[TransactionSchema { + from: vec![outputs[1].clone()], + to: vec![], + to_outputs: vec![prev_utxo.clone()], + fee: 25.into(), + lock_height: 0, + features: Default::default(), + script: tari_crypto::script![Nop], + input_data: None, + }]); + + let (block, _) = create_next_block(&prev_block, txns); + let err = db.add_block(block).unwrap_err(); + unpack_enum!(ChainStorageError::KeyExists { key, table_name } = err); + assert_eq!(table_name, "utxo_commitment_index"); + assert_eq!(key, prev_output.commitment.to_hex()); + + let (txns, _) = schema_to_transaction(&[txn_schema!(from: vec![prev_utxo.clone()], to: vec![50 * T])]); + let (block, _) = create_next_block(&prev_block, txns); + let block = db.add_block(block).unwrap().assert_added(); + let prev_block = block.to_arc_block(); + + // Different maturity so that the output hash is different in txo_hash_to_index_db + prev_utxo.features = OutputFeatures::with_maturity(1); + // Now we can reuse a commitment + let (txns, _) = schema_to_transaction(&[TransactionSchema { + from: vec![outputs[1].clone()], + to: vec![], + to_outputs: vec![prev_utxo], + fee: 25.into(), + lock_height: 0, + features: Default::default(), + script: tari_crypto::script![Nop], + input_data: None, + }]); + + let (block, _) = create_next_block(&prev_block, txns); + db.add_block(block).unwrap().assert_added(); + } +} diff --git a/base_layer/core/src/mempool/service/service.rs b/base_layer/core/src/mempool/service/service.rs index ed14d68410..b8ee487b9c 100644 --- a/base_layer/core/src/mempool/service/service.rs +++ b/base_layer/core/src/mempool/service/service.rs @@ -475,7 +475,7 @@ async fn handle_request_timeout( let _ = reply_tx.send(reply_msg.map_err(|e| { error!( target: LOG_TARGET, - "Failed to send outbound request (request key: {}): {:?}", &request_key, e + "Failed to process outbound request (request key: {}): {:?}", &request_key, e ); e })); diff --git a/base_layer/core/src/mempool/sync_protocol/test.rs b/base_layer/core/src/mempool/sync_protocol/test.rs index 5b3ef0eef1..dd77fe3c70 100644 --- a/base_layer/core/src/mempool/sync_protocol/test.rs +++ b/base_layer/core/src/mempool/sync_protocol/test.rs @@ -97,7 +97,7 @@ async fn empty_set() { let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = - create_peer_connection_mock_pair(1, node1.to_peer(), node2.to_peer()).await; + create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx @@ -127,7 +127,7 @@ async fn synchronise() { let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = - create_peer_connection_mock_pair(1, node1.to_peer(), node2.to_peer()).await; + create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx @@ -161,7 +161,7 @@ async fn duplicate_set() { let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = - create_peer_connection_mock_pair(1, node1.to_peer(), node2.to_peer()).await; + create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx @@ -267,7 +267,7 @@ async fn responder_messages() { let node1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let node2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (_node1_conn, node1_mock, node2_conn, _) = - create_peer_connection_mock_pair(1, node1.to_peer(), node2.to_peer()).await; + create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream connectivity_events_tx diff --git a/base_layer/core/src/proof_of_work/monero_rx/pow_data.rs b/base_layer/core/src/proof_of_work/monero_rx/pow_data.rs index 80ab870c18..47691482e8 100644 --- a/base_layer/core/src/proof_of_work/monero_rx/pow_data.rs +++ b/base_layer/core/src/proof_of_work/monero_rx/pow_data.rs @@ -150,27 +150,15 @@ mod test { use super::*; use monero::TxIn; - #[ignore] #[test] + #[should_panic(expected = "capacity overflow")] fn simple_capacity_overflow_panic() { let data = &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]; let _ = deserialize::>(data); } - #[ignore] - #[test] - fn oom_moneroblock_deserialize() { - let data = [ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x95, 0x95, - 0x95, 0x01, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, - ]; - let _ = deserialize::(&data); - } - - #[ignore] #[test] + #[should_panic(expected = "capacity overflow")] fn panic_alloc_capacity_overflow_moneroblock_deserialize() { let data = [ 0x0f, 0x9e, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, diff --git a/base_layer/core/src/test_helpers/blockchain.rs b/base_layer/core/src/test_helpers/blockchain.rs index 320d77e269..e871ed10c9 100644 --- a/base_layer/core/src/test_helpers/blockchain.rs +++ b/base_layer/core/src/test_helpers/blockchain.rs @@ -44,8 +44,8 @@ use crate::{ }, consensus::{chain_strength_comparer::ChainStrengthComparerBuilder, ConsensusConstantsBuilder, ConsensusManager}, transactions::{ - transaction::{TransactionInput, TransactionKernel, TransactionOutput}, - types::{CryptoFactories, HashOutput, Signature}, + transaction::{TransactionInput, TransactionKernel}, + types::{Commitment, CryptoFactories, HashOutput, Signature}, }, validation::{ block_validators::{BodyOnlyValidator, OrphanBlockValidator}, @@ -244,13 +244,17 @@ impl BlockchainBackend for TempDatabase { self.db.fetch_utxos_by_mmr_position(start, end, deleted) } - fn fetch_output( - &self, - output_hash: &HashOutput, - ) -> Result, ChainStorageError> { + fn fetch_output(&self, output_hash: &HashOutput) -> Result, ChainStorageError> { self.db.fetch_output(output_hash) } + fn fetch_unspent_output_hash_by_commitment( + &self, + commitment: &Commitment, + ) -> Result, ChainStorageError> { + self.db.fetch_unspent_output_hash_by_commitment(commitment) + } + fn fetch_outputs_in_block(&self, header_hash: &HashOutput) -> Result, ChainStorageError> { self.db.fetch_outputs_in_block(header_hash) } diff --git a/base_layer/core/src/test_helpers/mod.rs b/base_layer/core/src/test_helpers/mod.rs index c32070ee3a..a1055b75da 100644 --- a/base_layer/core/src/test_helpers/mod.rs +++ b/base_layer/core/src/test_helpers/mod.rs @@ -31,7 +31,12 @@ use crate::{ consensus::ConsensusManager, crypto::tari_utilities::Hashable, proof_of_work::{sha3_difficulty, AchievedTargetDifficulty, Difficulty}, - transactions::{transaction::Transaction, types::CryptoFactories, CoinbaseBuilder}, + transactions::{ + tari_amount::T, + transaction::{Transaction, UnblindedOutput}, + types::CryptoFactories, + CoinbaseBuilder, + }, }; use rand::{distributions::Alphanumeric, Rng}; use std::{iter, path::Path, sync::Arc}; @@ -42,29 +47,29 @@ use tari_storage::{lmdb_store::LMDBBuilder, LMDBWrapper}; /// Create a partially constructed block using the provided set of transactions /// is chain_block, or rename it to `create_orphan_block` and drop the prev_block argument pub fn create_orphan_block(block_height: u64, transactions: Vec, consensus: &ConsensusManager) -> Block { - create_block( - consensus.consensus_constants(block_height).blockchain_version(), - block_height, - transactions, - ) + let mut header = BlockHeader::new(consensus.consensus_constants(block_height).blockchain_version()); + header.height = block_height; + header.into_builder().with_transactions(transactions).build() } -pub fn create_block(block_version: u16, block_height: u64, transactions: Vec) -> Block { +pub fn create_block(block_version: u16, block_height: u64, transactions: Vec) -> (Block, UnblindedOutput) { let mut header = BlockHeader::new(block_version); header.height = block_height; - if transactions.is_empty() { - let constants = ConsensusManager::builder(Network::LocalNet).build(); - let coinbase = CoinbaseBuilder::new(CryptoFactories::default()) - .with_block_height(block_height) - .with_fees(0.into()) - .with_nonce(0.into()) - .with_spend_key(block_height.into()) - .build_with_reward(constants.consensus_constants(block_height), 1.into()) - .unwrap(); - header.into_builder().with_transactions(vec![coinbase.0]).build() - } else { - header.into_builder().with_transactions(transactions).build() - } + let constants = ConsensusManager::builder(Network::LocalNet).build(); + let (coinbase, coinbase_output) = CoinbaseBuilder::new(CryptoFactories::default()) + .with_block_height(block_height) + .with_fees(0.into()) + .with_nonce(0.into()) + .with_spend_key(block_height.into()) + .build_with_reward(constants.consensus_constants(block_height), 5000 * T) + .unwrap(); + ( + header + .into_builder() + .with_transactions(iter::once(coinbase).chain(transactions).collect()) + .build(), + coinbase_output, + ) } pub fn mine_to_difficulty(mut block: Block, difficulty: Difficulty) -> Result { diff --git a/base_layer/core/src/transactions/coinbase_builder.rs b/base_layer/core/src/transactions/coinbase_builder.rs index db0597000d..52cb4ac0ba 100644 --- a/base_layer/core/src/transactions/coinbase_builder.rs +++ b/base_layer/core/src/transactions/coinbase_builder.rs @@ -200,7 +200,7 @@ impl CoinbaseBuilder { let unblinded_output = UnblindedOutput::new( total_reward, spending_key, - Some(output_features), + output_features, script, inputs!(PublicKey::from_secret_key(&script_private_key)), script_private_key, diff --git a/base_layer/core/src/transactions/helpers.rs b/base_layer/core/src/transactions/helpers.rs index 7f5aaa9a5d..8e6c4d4c7b 100644 --- a/base_layer/core/src/transactions/helpers.rs +++ b/base_layer/core/src/transactions/helpers.rs @@ -136,7 +136,7 @@ impl TestParams { UnblindedOutput::new( params.value, self.spend_key.clone(), - Some(params.output_features.clone()), + params.output_features, params.script.clone(), params .input_data @@ -263,6 +263,7 @@ macro_rules! txn_schema { $crate::transactions::helpers::TransactionSchema { from: $input.clone(), to: $outputs.clone(), + to_outputs: vec![], fee: $fee, lock_height: $lock, features: $features, @@ -307,6 +308,7 @@ macro_rules! txn_schema { pub struct TransactionSchema { pub from: Vec, pub to: Vec, + pub to_outputs: Vec, pub fee: MicroTari, pub lock_height: u64, pub features: OutputFeatures, @@ -455,7 +457,23 @@ pub fn spend_utxos(schema: TransactionSchema) -> (Transaction, Vec (Transaction, Vec (Transaction, Vec (), - Err(e) => panic!("{:?}", e), - } + stx_protocol.finalize(KernelFeatures::empty(), &factories).unwrap(); let txn = stx_protocol.get_transaction().unwrap().clone(); (txn, outputs, test_params_change_and_txn) } @@ -511,12 +526,11 @@ pub fn create_test_kernel(fee: MicroTari, lock_height: u64) -> TransactionKernel pub fn create_utxo( value: MicroTari, factories: &CryptoFactories, - features: Option, + features: OutputFeatures, script: &TariScript, ) -> (TransactionOutput, PrivateKey, PrivateKey) { let keys = generate_keys(); let offset_keys = generate_keys(); - let features = features.unwrap_or_default(); let commitment = factories.commitment.commit_value(&keys.k, value.into()); let proof = factories.range_proof.construct_proof(&keys.k, value.into()).unwrap(); let metadata_sig = diff --git a/base_layer/core/src/transactions/transaction.rs b/base_layer/core/src/transactions/transaction.rs index 94f7bc5448..81d972d90b 100644 --- a/base_layer/core/src/transactions/transaction.rs +++ b/base_layer/core/src/transactions/transaction.rs @@ -225,7 +225,7 @@ impl UnblindedOutput { pub fn new( value: MicroTari, spending_key: BlindingFactor, - features: Option, + features: OutputFeatures, script: TariScript, input_data: ExecutionStack, script_private_key: PrivateKey, @@ -235,7 +235,7 @@ impl UnblindedOutput { UnblindedOutput { value, spending_key, - features: features.unwrap_or_default(), + features, script, input_data, script_private_key, diff --git a/base_layer/core/src/transactions/transaction_protocol/sender.rs b/base_layer/core/src/transactions/transaction_protocol/sender.rs index 0a8ab7751d..0341dcbda1 100644 --- a/base_layer/core/src/transactions/transaction_protocol/sender.rs +++ b/base_layer/core/src/transactions/transaction_protocol/sender.rs @@ -65,7 +65,7 @@ pub(super) struct RawTransactionInfo { pub num_recipients: usize, // The sum of self-created outputs plus change pub amount_to_self: MicroTari, - pub ids: Vec, + pub tx_id: u64, pub amounts: Vec, pub recipient_scripts: Vec, pub recipient_output_features: Vec, @@ -209,7 +209,7 @@ impl SenderTransactionProtocol { match &self.state { SenderState::Finalizing(info) | SenderState::SingleRoundMessageReady(info) | - SenderState::CollectingSingleSignature(info) => info.ids[0] == tx_id, + SenderState::CollectingSingleSignature(info) => info.tx_id == tx_id, _ => false, } } @@ -218,7 +218,7 @@ impl SenderTransactionProtocol { match &self.state { SenderState::Finalizing(info) | SenderState::SingleRoundMessageReady(info) | - SenderState::CollectingSingleSignature(info) => Ok(info.ids[0]), + SenderState::CollectingSingleSignature(info) => Ok(info.tx_id), _ => Err(TPE::InvalidStateError), } } @@ -357,7 +357,7 @@ impl SenderTransactionProtocol { })?; Ok(SingleRoundSenderData { - tx_id: info.ids[0], + tx_id: info.tx_id, amount: self.get_total_amount()?, public_nonce: info.public_nonce.clone(), public_excess: info.public_excess.clone(), diff --git a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs index 73ef9cca04..0d5beb738d 100644 --- a/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs +++ b/base_layer/core/src/transactions/transaction_protocol/transaction_initializer.rs @@ -44,7 +44,6 @@ use digest::Digest; use log::*; use rand::rngs::OsRng; use std::{ - cmp::max, collections::HashMap, fmt::{Debug, Error, Formatter}, }; @@ -91,6 +90,7 @@ pub struct SenderTransactionInitializer { recipient_scripts: FixedSet, recipient_sender_offset_private_keys: FixedSet, private_commitment_nonces: FixedSet, + tx_id: Option, } pub struct BuildError { @@ -130,6 +130,7 @@ impl SenderTransactionInitializer { recipient_scripts: FixedSet::new(num_recipients), recipient_sender_offset_private_keys: FixedSet::new(num_recipients), private_commitment_nonces: FixedSet::new(num_recipients), + tx_id: None, } } @@ -314,7 +315,7 @@ impl SenderTransactionInitializer { let change_unblinded_output = UnblindedOutput::new( v, change_key.clone(), - Some(output_features), + output_features, script, self.change_input_data .as_ref() @@ -334,6 +335,12 @@ impl SenderTransactionInitializer { } } + /// Specify the tx_id of this transaction, if not provided it will be calculated on build + pub fn with_tx_id(&mut self, tx_id: u64) -> &mut Self { + self.tx_id = Some(tx_id); + self + } + fn check_value(name: &str, val: &Option, vec: &mut Vec) { if val.is_none() { vec.push(name.to_string()); @@ -491,23 +498,22 @@ impl SenderTransactionInitializer { 1 => RecipientInfo::Single(None), _ => RecipientInfo::Multiple(HashMap::new()), }; - let num_ids = max(1, self.num_recipients); - let mut ids = Vec::with_capacity(num_ids); - for i in 0..num_ids { - ids.push(calculate_tx_id::(&public_nonce, i)); - } + + let tx_id = match self.tx_id { + Some(id) => id, + None => calculate_tx_id::(&public_nonce, 0), + }; // The fee should be less than the amount being sent. This isn't a protocol requirement, but it's what you want // 99.999% of the time, however, always preventing this will also prevent spending dust in some edge // cases. if self.amounts.size() > 0 && total_fee > self.calculate_amount_to_others() { - let ids_clone = ids.to_vec(); warn!( target: LOG_TARGET, "Fee ({}) is greater than amount ({}) being sent for Transaction (TxId: {}).", total_fee, self.calculate_amount_to_others(), - ids_clone[0] + tx_id ); if self.prevent_fee_gt_amount { return self.build_err("Fee is greater than amount"); @@ -523,7 +529,7 @@ impl SenderTransactionInitializer { let sender_info = RawTransactionInfo { num_recipients: self.num_recipients, amount_to_self, - ids, + tx_id, amounts: self.amounts.into_vec(), recipient_output_features: self.recipient_output_features.into_vec(), recipient_scripts: self.recipient_scripts.into_vec(), @@ -643,7 +649,6 @@ mod test { if let SenderState::Finalizing(info) = result.state { assert_eq!(info.num_recipients, 0, "Number of receivers"); assert_eq!(info.signatures.len(), 0, "Number of signatures"); - assert_eq!(info.ids.len(), 1, "Number of tx_ids"); assert_eq!(info.amounts.len(), 0, "Number of external payment amounts"); assert_eq!(info.metadata.lock_height, 100, "Lock height"); assert_eq!(info.metadata.fee, expected_fee, "Fee"); @@ -685,7 +690,6 @@ mod test { if let SenderState::Finalizing(info) = result.state { assert_eq!(info.num_recipients, 0, "Number of receivers"); assert_eq!(info.signatures.len(), 0, "Number of signatures"); - assert_eq!(info.ids.len(), 1, "Number of tx_ids"); assert_eq!(info.amounts.len(), 0, "Number of external payment amounts"); assert_eq!(info.metadata.lock_height, 0, "Lock height"); assert_eq!(info.metadata.fee, expected_fee, "Fee"); @@ -728,7 +732,6 @@ mod test { if let SenderState::Finalizing(info) = result.state { assert_eq!(info.num_recipients, 0, "Number of receivers"); assert_eq!(info.signatures.len(), 0, "Number of signatures"); - assert_eq!(info.ids.len(), 1, "Number of tx_ids"); assert_eq!(info.amounts.len(), 0, "Number of external payment amounts"); assert_eq!(info.metadata.lock_height, 0, "Lock height"); assert_eq!(info.metadata.fee, expected_fee + MicroTari(50), "Fee"); @@ -918,7 +921,6 @@ mod test { if let SenderState::SingleRoundMessageReady(info) = result.state { assert_eq!(info.num_recipients, 1, "Number of receivers"); assert_eq!(info.signatures.len(), 0, "Number of signatures"); - assert_eq!(info.ids.len(), 1, "Number of tx_ids"); assert_eq!(info.amounts.len(), 1, "Number of external payment amounts"); assert_eq!(info.metadata.lock_height, 1234, "Lock height"); assert_eq!(info.metadata.fee, expected_fee, "Fee"); diff --git a/base_layer/core/src/validation/block_validators.rs b/base_layer/core/src/validation/block_validators.rs index d2b80eb1eb..0c4ee76bfd 100644 --- a/base_layer/core/src/validation/block_validators.rs +++ b/base_layer/core/src/validation/block_validators.rs @@ -22,7 +22,7 @@ use crate::{ blocks::{Block, BlockValidationError}, chain_storage, - chain_storage::{BlockchainBackend, ChainBlock, DeletedBitmap, MmrTree}, + chain_storage::{BlockchainBackend, ChainBlock, MmrTree}, consensus::ConsensusManager, transactions::{ aggregated_body::AggregateBody, @@ -130,7 +130,6 @@ impl PostOrphanBodyValidation for BodyOnlyValidator { block: &ChainBlock, backend: &B, metadata: &ChainMetadata, - deleted_bitmap: &DeletedBitmap, ) -> Result<(), ValidationError> { if block.header().height != metadata.height_of_longest_chain() + 1 { return Err(ValidationError::IncorrectNextTipHeight { @@ -146,7 +145,7 @@ impl PostOrphanBodyValidation for BodyOnlyValidator { } let block_id = format!("block #{} ({})", block.header().height, block.hash().to_hex()); - check_inputs_are_utxos(block.block(), backend, deleted_bitmap)?; + check_inputs_are_utxos(block.block(), backend)?; check_not_duplicate_txos(block.block(), backend)?; trace!( target: LOG_TARGET, @@ -176,51 +175,61 @@ fn check_sorting_and_duplicates(body: &AggregateBody) -> Result<(), ValidationEr Ok(()) } -/// This function checks that all inputs in the blocks are valid UTXO's to be spend -fn check_inputs_are_utxos( - block: &Block, - db: &B, - deleted: &DeletedBitmap, -) -> Result<(), ValidationError> { +/// This function checks that all inputs in the blocks are valid UTXO's to be spent +fn check_inputs_are_utxos(block: &Block, db: &B) -> Result<(), ValidationError> { for input in block.body.inputs() { - if let Some((_, index, _height)) = db.fetch_output(&input.output_hash())? { - if deleted.bitmap().contains(index) { - warn!( - target: LOG_TARGET, - "Block validation failed due to already spent input: {}", input - ); - return Err(ValidationError::ContainsSTxO); - } - } else { - // lets check if the input exists in the output field - if !block - .body - .outputs() - .iter() - .any(|output| output.hash() == input.output_hash()) - { - warn!( - target: LOG_TARGET, - "Block validation failed because the block has invalid input: {} which does not exist", input - ); - return Err(ValidationError::BlockError(BlockValidationError::InvalidInput)); + if let Some(utxo_hash) = db.fetch_unspent_output_hash_by_commitment(&input.commitment)? { + // We know that the commitment exists in the UTXO set. Check that the output hash matches i.e. all fields + // (output features etc.) match + if utxo_hash == input.output_hash() { + continue; } + + warn!( + target: LOG_TARGET, + "The input spends an unspent output but does not produce the same hash as the output it spends. {}", + input + ); + return Err(ValidationError::BlockError(BlockValidationError::InvalidInput)); + } + + // The input was not found in the UTXO/STXO set, lets check if the input spends an output in the current block + let output_hash = input.output_hash(); + if block.body.outputs().iter().any(|output| output.hash() == output_hash) { + continue; } + + // The input does not spend a known UTXO + warn!( + target: LOG_TARGET, + "Block validation failed due an input that does not spend a known UTXO: {}", input + ); + return Err(ValidationError::BlockError(BlockValidationError::InvalidInput)); } Ok(()) } -// This function checks that the inputs and outputs do not exist in the STxO set. +/// This function checks that the outputs do not already exist in the UTxO set. fn check_not_duplicate_txos(block: &Block, db: &B) -> Result<(), ValidationError> { for output in block.body.outputs() { - if db.fetch_mmr_leaf_index(MmrTree::Utxo, &output.hash())?.is_some() { + if let Some(index) = db.fetch_mmr_leaf_index(MmrTree::Utxo, &output.hash())? { warn!( target: LOG_TARGET, - "Block validation failed due to previously spent output: {}", output + "Block validation failed due to previously spent output: {} (MMR index = {})", output, index ); return Err(ValidationError::ContainsTxO); } + if db + .fetch_unspent_output_hash_by_commitment(&output.commitment)? + .is_some() + { + warn!( + target: LOG_TARGET, + "Duplicate UTXO set commitment found for output: {}", output + ); + return Err(ValidationError::ContainsDuplicateUtxoCommitment); + } } Ok(()) } diff --git a/base_layer/core/src/validation/error.rs b/base_layer/core/src/validation/error.rs index d0952ad212..1a9ee2fab6 100644 --- a/base_layer/core/src/validation/error.rs +++ b/base_layer/core/src/validation/error.rs @@ -53,6 +53,8 @@ pub enum ValidationError { ContainsSTxO, #[error("Transaction contains already outputs that already exist")] ContainsTxO, + #[error("Transaction contains an output commitment that already exists")] + ContainsDuplicateUtxoCommitment, #[error("Final state validation failed: The UTXO set did not balance with the expected emission at height {0}")] ChainBalanceValidationFailed(u64), #[error("Proof of work error: {0}")] diff --git a/base_layer/core/src/validation/mocks.rs b/base_layer/core/src/validation/mocks.rs index 7d5bd71147..03c8951d3f 100644 --- a/base_layer/core/src/validation/mocks.rs +++ b/base_layer/core/src/validation/mocks.rs @@ -22,7 +22,7 @@ use crate::{ blocks::{Block, BlockHeader}, - chain_storage::{BlockchainBackend, ChainBlock, DeletedBitmap}, + chain_storage::{BlockchainBackend, ChainBlock}, proof_of_work::{sha3_difficulty, AchievedTargetDifficulty, Difficulty, PowAlgorithm}, transactions::{transaction::Transaction, types::Commitment}, validation::{ @@ -80,13 +80,7 @@ impl CandidateBlockBodyValidation for MockValidator { } impl PostOrphanBodyValidation for MockValidator { - fn validate_body_for_valid_orphan( - &self, - _: &ChainBlock, - _: &B, - _: &ChainMetadata, - _: &DeletedBitmap, - ) -> Result<(), ValidationError> { + fn validate_body_for_valid_orphan(&self, _: &ChainBlock, _: &B, _: &ChainMetadata) -> Result<(), ValidationError> { if self.is_valid.load(Ordering::SeqCst) { Ok(()) } else { diff --git a/base_layer/core/src/validation/test.rs b/base_layer/core/src/validation/test.rs index 9216f44cad..a5998fa3e2 100644 --- a/base_layer/core/src/validation/test.rs +++ b/base_layer/core/src/validation/test.rs @@ -22,11 +22,22 @@ use crate::{ blocks::BlockHeader, - consensus::ConsensusManagerBuilder, + chain_storage::{BlockHeaderAccumulatedData, ChainBlock, ChainHeader, DbTransaction}, + consensus::{ConsensusConstantsBuilder, ConsensusManagerBuilder}, + crypto::tari_utilities::Hashable, + proof_of_work::AchievedTargetDifficulty, test_helpers::{blockchain::create_store_with_consensus, create_chain_header}, - validation::header_iter::HeaderIter, + transactions::{ + helpers::{create_random_signature_from_s_key, create_utxo}, + tari_amount::{uT, MicroTari}, + transaction::{KernelBuilder, KernelFeatures, OutputFeatures, TransactionKernel}, + types::{Commitment, CryptoFactories}, + }, + validation::{header_iter::HeaderIter, ChainBalanceValidator, FinalHorizonStateValidation}, }; +use std::sync::Arc; use tari_common::configuration::Network; +use tari_crypto::{commitment::HomomorphicCommitment, script}; #[test] fn header_iter_empty_and_invalid_height() { @@ -77,182 +88,146 @@ fn header_iter_fetch_in_chunks() { } #[test] -#[ignore] // TODO: Fix this test with the new DB structure fn chain_balance_validation() { - // let factories = CryptoFactories::default(); - // let consensus_manager = ConsensusManagerBuilder::new(Network::Stibbons).build(); - // let mut genesis = consensus_manager.get_genesis_block(); - // let faucet_value = 5000 * uT; - // let (faucet_utxo, faucet_key) = create_utxo(faucet_value, &factories, None); - // let (pk, sig) = create_random_signature_from_s_key(faucet_key.clone(), 0.into(), 0); - // let excess = Commitment::from_public_key(&pk); - // let kernel = TransactionKernel { - // features: KernelFeatures::empty(), - // fee: 0 * uT, - // lock_height: 0, - // excess, - // excess_sig: sig, - // }; + let factories = CryptoFactories::default(); + let consensus_manager = ConsensusManagerBuilder::new(Network::Weatherwax).build(); + let genesis = consensus_manager.get_genesis_block(); + let faucet_value = 5000 * uT; + let (faucet_utxo, faucet_key, _) = create_utxo(faucet_value, &factories, OutputFeatures::default(), &script!(Nop)); + let (pk, sig) = create_random_signature_from_s_key(faucet_key, 0.into(), 0); + let excess = Commitment::from_public_key(&pk); + let kernel = TransactionKernel { + features: KernelFeatures::empty(), + fee: MicroTari::from(0), + lock_height: 0, + excess, + excess_sig: sig, + }; // let _faucet_hash = faucet_utxo.hash(); - // genesis.body.add_output(faucet_utxo); - // genesis.body.add_kernels(&mut vec![kernel]); - // let total_faucet = faucet_value + consensus_manager.consensus_constants(0).faucet_value(); - // let constants = ConsensusConstantsBuilder::new(Network::LocalNet) - // .with_consensus_constants(consensus_manager.consensus_constants(0).clone()) - // .with_faucet_value(total_faucet) - // .build(); - // // Create a LocalNet consensus manager that uses rincewind consensus constants and has a custom rincewind genesis - // // block that contains an extra faucet utxo - // let consensus_manager = ConsensusManagerBuilder::new(Network::LocalNet) - // .with_block(genesis.clone()) - // .with_consensus_constants(constants) - // .build(); - // - // let db = create_store_with_consensus(&consensus_manager); - // - // let validator = ChainBalanceValidator::new(db.clone(), consensus_manager.clone(), factories.clone()); - // // Validate the genesis state - // validator.validate(&genesis.header).unwrap(); - // - // //---------------------------------- Add a new coinbase and header --------------------------------------------// - // let mut txn = DbTransaction::new(); - // let coinbase_value = consensus_manager.emission_schedule().block_reward(1); - // let (coinbase, coinbase_key) = create_utxo(coinbase_value, &factories, Some(OutputFeatures::create_coinbase(1))); + let mut gen_block = genesis.block().clone(); + gen_block.body.add_output(faucet_utxo); + gen_block.body.add_kernels(&mut vec![kernel]); + let mut utxo_sum = HomomorphicCommitment::default(); + let mut kernel_sum = HomomorphicCommitment::default(); + for output in gen_block.body.outputs() { + utxo_sum = &output.commitment + &utxo_sum; + } + for kernel in gen_block.body.kernels() { + kernel_sum = &kernel.excess + &kernel_sum; + } + let genesis = ChainBlock::try_construct(Arc::new(gen_block), genesis.accumulated_data().clone()).unwrap(); + let total_faucet = faucet_value + consensus_manager.consensus_constants(0).faucet_value(); + let constants = ConsensusConstantsBuilder::new(Network::LocalNet) + .with_consensus_constants(consensus_manager.consensus_constants(0).clone()) + .with_faucet_value(total_faucet) + .build(); + // Create a LocalNet consensus manager that uses rincewind consensus constants and has a custom rincewind genesis + // block that contains an extra faucet utxo + let consensus_manager = ConsensusManagerBuilder::new(Network::LocalNet) + .with_block(genesis.clone()) + .with_consensus_constants(constants) + .build(); + + let db = create_store_with_consensus(consensus_manager.clone()); + + let validator = ChainBalanceValidator::new(consensus_manager.clone(), factories.clone()); + // Validate the genesis state + validator + .validate(0, &utxo_sum, &kernel_sum, &*db.db_read_access().unwrap()) + .unwrap(); + + //---------------------------------- Add a new coinbase and header --------------------------------------------// + let mut txn = DbTransaction::new(); + let coinbase_value = consensus_manager.get_block_reward_at(1); + let (coinbase, coinbase_key, _) = create_utxo( + coinbase_value, + &factories, + OutputFeatures::create_coinbase(1), + &script!(Nop), + ); // let _coinbase_hash = coinbase.hash(); - // let (pk, sig) = create_random_signature_from_s_key(coinbase_key.clone(), 0.into(), 0); - // let excess = Commitment::from_public_key(&pk); - // let kernel = KernelBuilder::new() - // .with_signature(&sig) - // .with_excess(&excess) - // .with_features(KernelFeatures::COINBASE_KERNEL) - // .build() - // .unwrap(); - // - // let header1 = BlockHeader::from_previous(&genesis.header).unwrap(); - // txn.insert_header(header1.clone()); - // - // let mut mmr_position = 0; - // let mut mmr_leaf_index = 0; - // - // txn.insert_kernel(kernel, header1.hash(), mmr_position); - // txn.insert_utxo(coinbase.clone(), header1.hash(), mmr_leaf_index); - // - // db.commit(txn).unwrap(); - // - // validator.validate(&header1).unwrap(); - // - // //---------------------------------- Spend coinbase from h=1 ----------------------------------// - // let mut txn = DbTransaction::new(); - // - // // txn.spend_utxo(coinbase_hash); - // - // let output = UnblindedOutput::new(coinbase_value, coinbase_key, None); - // let fee = Fee::calculate(25 * uT, 1, 1, 2); - // let schema = txn_schema!(from: vec![output], to: vec![coinbase_value - fee], fee: 25 * uT); - // let (tx, _, params) = spend_utxos(schema); - // - // let v = consensus_manager.emission_schedule().block_reward(2) + fee; - // let (coinbase, key) = create_utxo(v, &factories, Some(OutputFeatures::create_coinbase(1))); - // let (pk, sig) = create_random_signature_from_s_key(key, 0.into(), 0); - // let excess = Commitment::from_public_key(&pk); - // let kernel = KernelBuilder::new() - // .with_signature(&sig) - // .with_excess(&excess) - // .with_features(KernelFeatures::COINBASE_KERNEL) - // .build() - // .unwrap(); - // - // let mut header2 = BlockHeader::from_previous(&header1).unwrap(); - // header2.total_kernel_offset = params.offset; - // txn.insert_header(header2.clone()); - // - // let header2_hash = header2.hash(); - // mmr_leaf_index += 1; - // txn.insert_utxo(coinbase.clone(), header2_hash.clone(), mmr_leaf_index); - // for utxo in tx.body.outputs() { - // mmr_leaf_index += 1; - // txn.insert_utxo(utxo.clone(), header2_hash.clone(), mmr_leaf_index); - // } - // mmr_position += 1; - // txn.insert_kernel(kernel, header2_hash.clone(), mmr_position); - // for kernel in tx.body.kernels() { - // mmr_position += 1; - // txn.insert_kernel(kernel.clone(), header2_hash.clone(), mmr_position); - // } - // - // db.commit(txn).unwrap(); - // - // validator.validate(&header2).unwrap(); - // - // //---------------------------------- Spend faucet UTXO --------------------------------------------// - // let mut txn = DbTransaction::new(); - // - // // txn.spend_utxo(faucet_hash); - // - // let output = UnblindedOutput::new(faucet_value, faucet_key, None); - // let fee = Fee::calculate(25 * uT, 1, 1, 2); - // let schema = txn_schema!(from: vec![output], to: vec![faucet_value - fee], fee: 25 * uT); - // let (tx, _, params) = spend_utxos(schema); - // - // let v = consensus_manager.emission_schedule().block_reward(3) + fee; - // let (coinbase, key) = create_utxo(v, &factories, Some(OutputFeatures::create_coinbase(1))); - // let (pk, sig) = create_random_signature_from_s_key(key, 0.into(), 0); - // let excess = Commitment::from_public_key(&pk); - // let kernel = KernelBuilder::new() - // .with_signature(&sig) - // .with_excess(&excess) - // .with_features(KernelFeatures::COINBASE_KERNEL) - // .build() - // .unwrap(); - // - // let mut header3 = BlockHeader::from_previous(&header2).unwrap(); - // header3.total_kernel_offset = params.offset; - // txn.insert_header(header3.clone()); - // - // let header3_hash = header3.hash(); - // mmr_leaf_index += 1; - // txn.insert_utxo(coinbase.clone(), header3_hash.clone(), mmr_leaf_index); - // for utxo in tx.body.outputs() { - // mmr_leaf_index += 1; - // txn.insert_utxo(utxo.clone(), header3_hash.clone(), mmr_leaf_index); - // } - // - // mmr_position += 1; - // txn.insert_kernel(kernel, header3_hash.clone(), mmr_position); - // for kernel in tx.body.kernels() { - // mmr_position += 1; - // txn.insert_kernel(kernel.clone(), header3_hash.clone(), mmr_position); - // } - // db.commit(txn).unwrap(); - // - // validator.validate(&header3).unwrap(); - // - // //---------------------------------- Try to inflate --------------------------------------------// - // let mut txn = DbTransaction::new(); - // - // let v = consensus_manager.emission_schedule().block_reward(4) + 1 * uT; - // let (coinbase, key) = create_utxo(v, &factories, Some(OutputFeatures::create_coinbase(1))); - // let (pk, sig) = create_random_signature_from_s_key(key, 0.into(), 0); - // let excess = Commitment::from_public_key(&pk); - // let kernel = KernelBuilder::new() - // .with_signature(&sig) - // .with_excess(&excess) - // .with_features(KernelFeatures::COINBASE_KERNEL) - // .build() - // .unwrap(); - // - // let header4 = BlockHeader::from_previous(&header3).unwrap(); - // txn.insert_header(header4.clone()); - // let header4_hash = header4.hash(); - // - // mmr_leaf_index += 1; - // txn.insert_utxo(coinbase.clone(), header4_hash.clone(), mmr_leaf_index); - // mmr_position += 1; - // txn.insert_kernel(kernel, header4_hash.clone(), mmr_position); - // - // db.commit(txn).unwrap(); - // - // validator.validate(&header4).unwrap_err(); - unimplemented!(); + let (pk, sig) = create_random_signature_from_s_key(coinbase_key, 0.into(), 0); + let excess = Commitment::from_public_key(&pk); + let kernel = KernelBuilder::new() + .with_signature(&sig) + .with_excess(&excess) + .with_features(KernelFeatures::COINBASE_KERNEL) + .build() + .unwrap(); + + let mut header1 = BlockHeader::from_previous(genesis.header()); + header1.kernel_mmr_size += 1; + header1.output_mmr_size += 1; + let achieved_difficulty = AchievedTargetDifficulty::try_construct( + genesis.header().pow_algo(), + genesis.accumulated_data().target_difficulty, + genesis.accumulated_data().achieved_difficulty, + ) + .unwrap(); + let accumulated_data = BlockHeaderAccumulatedData::builder(genesis.accumulated_data()) + .with_hash(header1.hash()) + .with_achieved_target_difficulty(achieved_difficulty) + .with_total_kernel_offset(header1.total_kernel_offset.clone()) + .build() + .unwrap(); + let header1 = ChainHeader::try_construct(header1, accumulated_data).unwrap(); + txn.insert_chain_header(header1.clone()); + + let mut mmr_position = 4; + let mut mmr_leaf_index = 4; + + txn.insert_kernel(kernel.clone(), header1.hash().clone(), mmr_position); + txn.insert_utxo(coinbase.clone(), header1.hash().clone(), 1, mmr_leaf_index); + + db.commit(txn).unwrap(); + utxo_sum = &coinbase.commitment + &utxo_sum; + kernel_sum = &kernel.excess + &kernel_sum; + validator + .validate(1, &utxo_sum, &kernel_sum, &*db.db_read_access().unwrap()) + .unwrap(); + + //---------------------------------- Try to inflate --------------------------------------------// + let mut txn = DbTransaction::new(); + + let v = consensus_manager.get_block_reward_at(2) + uT; + let (coinbase, key, _) = create_utxo(v, &factories, OutputFeatures::create_coinbase(1), &script!(Nop)); + let (pk, sig) = create_random_signature_from_s_key(key, 0.into(), 0); + let excess = Commitment::from_public_key(&pk); + let kernel = KernelBuilder::new() + .with_signature(&sig) + .with_excess(&excess) + .with_features(KernelFeatures::COINBASE_KERNEL) + .build() + .unwrap(); + + let mut header2 = BlockHeader::from_previous(header1.header()); + header2.kernel_mmr_size += 1; + header2.output_mmr_size += 1; + let achieved_difficulty = AchievedTargetDifficulty::try_construct( + genesis.header().pow_algo(), + genesis.accumulated_data().target_difficulty, + genesis.accumulated_data().achieved_difficulty, + ) + .unwrap(); + let accumulated_data = BlockHeaderAccumulatedData::builder(genesis.accumulated_data()) + .with_hash(header2.hash()) + .with_achieved_target_difficulty(achieved_difficulty) + .with_total_kernel_offset(header2.total_kernel_offset.clone()) + .build() + .unwrap(); + let header2 = ChainHeader::try_construct(header2, accumulated_data).unwrap(); + txn.insert_chain_header(header2.clone()); + utxo_sum = &coinbase.commitment + &utxo_sum; + kernel_sum = &kernel.excess + &kernel_sum; + mmr_leaf_index += 1; + txn.insert_utxo(coinbase, header2.hash().clone(), 2, mmr_leaf_index); + mmr_position += 1; + txn.insert_kernel(kernel, header2.hash().clone(), mmr_position); + + db.commit(txn).unwrap(); + + validator + .validate(2, &utxo_sum, &kernel_sum, &*db.db_read_access().unwrap()) + .unwrap_err(); } diff --git a/base_layer/core/src/validation/traits.rs b/base_layer/core/src/validation/traits.rs index 6e55b85904..e7fabb449b 100644 --- a/base_layer/core/src/validation/traits.rs +++ b/base_layer/core/src/validation/traits.rs @@ -22,7 +22,7 @@ use crate::{ blocks::{Block, BlockHeader}, - chain_storage::{BlockchainBackend, ChainBlock, DeletedBitmap}, + chain_storage::{BlockchainBackend, ChainBlock}, proof_of_work::AchievedTargetDifficulty, transactions::{transaction::Transaction, types::Commitment}, validation::{error::ValidationError, DifficultyCalculator}, @@ -42,7 +42,6 @@ pub trait PostOrphanBodyValidation: Send + Sync { block: &ChainBlock, backend: &B, metadata: &ChainMetadata, - deleted_bitmap: &DeletedBitmap, ) -> Result<(), ValidationError>; } diff --git a/base_layer/core/src/validation/transaction_validators.rs b/base_layer/core/src/validation/transaction_validators.rs index 7e2433fbf8..59ff3bfc41 100644 --- a/base_layer/core/src/validation/transaction_validators.rs +++ b/base_layer/core/src/validation/transaction_validators.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ + blocks::BlockValidationError, chain_storage::{BlockchainBackend, BlockchainDatabase, MmrTree}, crypto::tari_utilities::Hashable, transactions::{transaction::Transaction, types::CryptoFactories}, @@ -97,7 +98,7 @@ impl TxInputAndMaturityValidator { impl MempoolTransactionValidation for TxInputAndMaturityValidator { fn validate(&self, tx: &Transaction) -> Result<(), ValidationError> { let db = self.db.db_read_access()?; - verify_not_stxos(tx, &*db)?; + verify_inputs_are_spendable(tx, &*db)?; check_not_duplicate_txos(tx, &*db)?; let tip_height = db.fetch_chain_metadata()?.height_of_longest_chain(); @@ -116,31 +117,47 @@ fn verify_timelocks(tx: &Transaction, current_height: u64) -> Result<(), Validat Ok(()) } -// This function checks that the inputs exists in the UTXO set but do not exist in the STXO set. -fn verify_not_stxos(tx: &Transaction, db: &B) -> Result<(), ValidationError> { - let deleted = db.fetch_deleted_bitmap()?; +/// This function checks that the inputs exists in the UTXO set but do not exist in the STXO set. +fn verify_inputs_are_spendable(tx: &Transaction, db: &B) -> Result<(), ValidationError> { let mut not_found_input = Vec::new(); for input in tx.body.inputs() { - if let Some((_, index, _height)) = db.fetch_output(&input.output_hash())? { - if deleted.bitmap().contains(index) { - warn!( - target: LOG_TARGET, - "Transaction validation failed due to already spent input: {}", input - ); - return Err(ValidationError::ContainsSTxO); + let output_hash = input.output_hash(); + if let Some(utxo_hash) = db.fetch_unspent_output_hash_by_commitment(&input.commitment)? { + // We know that the commitment exists in the UTXO set. Check that the output hash matches (i.e. all fields + // like output features match) + if utxo_hash == output_hash { + continue; } - } else if !tx - .body - .outputs() - .iter() - .any(|output| output.hash() == input.output_hash()) - { + warn!( target: LOG_TARGET, - "Transaction uses input: {} which does not exist yet", input + "Input spends a UTXO but does not produce the same hash as the output it spends: + {}", + input ); - not_found_input.push(input.output_hash()); + return Err(ValidationError::BlockError(BlockValidationError::InvalidInput)); } + + // Wallet needs to know if a transaction has already been mined and uses this error variant to do so. + if db.fetch_output(&output_hash)?.is_some() { + warn!( + target: LOG_TARGET, + "Transaction validation failed due to already spent input: {}", input + ); + // We know that the output here must be spent because `fetch_unspent_output_hash_by_commitment` would have + // been Some + return Err(ValidationError::ContainsSTxO); + } + + if tx.body.outputs().iter().any(|output| output.hash() == output_hash) { + continue; + } + + warn!( + target: LOG_TARGET, + "Transaction uses input: {} which does not exist yet", input + ); + not_found_input.push(output_hash); } if !not_found_input.is_empty() { return Err(ValidationError::UnknownInputs(not_found_input)); @@ -149,7 +166,7 @@ fn verify_not_stxos(tx: &Transaction, db: &B) -> Result<() Ok(()) } -// This function checks that the inputs and outputs do not exist in the STxO set. +/// This function checks that the outputs do not exist in the TxO set. fn check_not_duplicate_txos(transaction: &Transaction, db: &B) -> Result<(), ValidationError> { for output in transaction.body.outputs() { if db.fetch_mmr_leaf_index(MmrTree::Utxo, &output.hash())?.is_some() { diff --git a/base_layer/core/tests/async_db.rs b/base_layer/core/tests/async_db.rs index a69e8c4c14..afedd9c7d9 100644 --- a/base_layer/core/tests/async_db.rs +++ b/base_layer/core/tests/async_db.rs @@ -33,7 +33,7 @@ use std::ops::Deref; use tari_common::configuration::Network; use tari_core::{ blocks::Block, - chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult}, + chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, PrunedOutput}, transactions::{ helpers::schema_to_transaction, tari_amount::T, @@ -103,11 +103,11 @@ fn fetch_async_utxo() { let db2 = AsyncBlockchainDb::new(adb); rt.spawn(async move { let utxo_check = db.fetch_utxo(utxo.hash()).await.unwrap().unwrap(); - assert_eq!(utxo_check, utxo); + assert_eq!(utxo_check, PrunedOutput::NotPruned { output: utxo }); }); rt.spawn(async move { let stxo_check = db2.fetch_utxo(stxo.hash()).await.unwrap().unwrap(); - assert_eq!(stxo_check, stxo); + assert_eq!(stxo_check, PrunedOutput::NotPruned { output: stxo }); }); }); } diff --git a/base_layer/core/tests/chain_storage_tests/chain_backend.rs b/base_layer/core/tests/chain_storage_tests/chain_backend.rs index 77ee4747e9..33eddc0867 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_backend.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_backend.rs @@ -32,59 +32,6 @@ use tari_crypto::tari_utilities::Hashable; use tari_storage::lmdb_store::LMDBConfig; use tari_test_utils::paths::create_temporary_data_path; -#[test] -#[ignore = "Required for pruned mode"] -fn lmdb_insert_contains_delete_and_fetch_utxo() { - let _db = create_test_db(); - unimplemented!() - // let factories = CryptoFactories::default(); - // let (utxo, _) = create_utxo(MicroTari(10_000), &factories, None); - // let hash = utxo.hash(); - // assert_eq!(db.contains(&DbKey::UnspentOutput(hash.clone())).unwrap(), false); - // - // let mut txn = DbTransaction::new(); - // txn.insert_utxo(utxo.clone()); - // assert!(db.write(txn).is_ok()); - // assert_eq!(db.contains(&DbKey::UnspentOutput(hash.clone())).unwrap(), true); - // if let Some(DbValue::UnspentOutput(retrieved_utxo)) = db.fetch(&DbKey::UnspentOutput(hash.clone())).unwrap() { - // assert_eq!(*retrieved_utxo, utxo); - // } else { - // panic!(); - // } - // - // let mut txn = DbTransaction::new(); - // txn.delete(DbKey::UnspentOutput(hash.clone())); - // assert!(db.write(txn).is_ok()); - // assert_eq!(db.contains(&DbKey::UnspentOutput(hash)).unwrap(), false);; -} - -#[test] -#[ignore = "Requires pruned mode"] -fn lmdb_insert_contains_delete_and_fetch_kernel() { - let _db = create_test_db(); - unimplemented!(); - // let kernel = create_test_kernel(5.into(), 0); - // let hash = kernel.hash(); - // assert_eq!(db.contains(&DbKey::TransactionKernel(hash.clone())).unwrap(), false); - // - // let mut txn = DbTransaction::new(); - // txn.insert_kernel(kernel.clone()); - // assert!(db.write(txn).is_ok()); - // assert_eq!(db.contains(&DbKey::TransactionKernel(hash.clone())).unwrap(), true); - // if let Some(DbValue::TransactionKernel(retrieved_kernel)) = - // db.fetch(&DbKey::TransactionKernel(hash.clone())).unwrap() - // { - // assert_eq!(*retrieved_kernel, kernel); - // } else { - // panic!(); - // } - // - // let mut txn = DbTransaction::new(); - // txn.delete(DbKey::TransactionKernel(hash.clone())); - // assert!(db.write(txn).is_ok()); - // assert_eq!(db.contains(&DbKey::TransactionKernel(hash)).unwrap(), false); -} - #[test] fn lmdb_insert_contains_delete_and_fetch_orphan() { let network = Network::LocalNet; @@ -115,184 +62,6 @@ fn lmdb_insert_contains_delete_and_fetch_orphan() { assert!(!db.contains(&DbKey::OrphanBlock(hash)).unwrap()); } -#[test] -#[ignore = "Needs to be moved to chain storage"] -fn lmdb_duplicate_utxo() { - let _db = create_test_db(); - unimplemented!("This test should probably be done in chain_storage rather"); - // let factories = CryptoFactories::default(); - // let (utxo1, _) = create_utxo(MicroTari(10_000), &factories, None); - // let (utxo2, _) = create_utxo(MicroTari(15_000), &factories, None); - // let hash1 = utxo1.hash(); - // let block_builder = - // - // let mut txn = DbTransaction::new(); - // txn.insert_utxo_with_hash(hash1.clone(), utxo1.clone()); - // assert!(db.write(txn).is_ok()); - // assert_eq!(db.contains(&DbKey::UnspentOutput(hash1.clone())).unwrap(), true); - // if let Some(DbValue::UnspentOutput(retrieved_utxo)) = db.fetch(&DbKey::UnspentOutput(hash1.clone())).unwrap() { - // assert_eq!(*retrieved_utxo, utxo1); - // } else { - // panic!(); - // } - // let mut txn = DbTransaction::new(); - // txn.insert_utxo_with_hash(hash1.clone(), utxo2.clone()); - // assert!(db.write(txn).is_err()); // This should fail - // if let Some(DbValue::UnspentOutput(retrieved_utxo)) = db.fetch(&DbKey::UnspentOutput(hash1.clone())).unwrap() { - // assert_eq!(*retrieved_utxo, utxo1); // original data should still be there - // } else { - // panic!(); - // } -} - -#[test] -#[ignore = "To be completed with pruned mode"] -fn lmdb_fetch_utxo_rp_nodes_and_count() { - let _db = create_test_db(); - // let factories = CryptoFactories::default(); - // - // let (utxo1, _) = create_utxo(MicroTari(10_000), &factories, None); - // let (utxo2, _) = create_utxo(MicroTari(20_000), &factories, None); - // let (utxo3, _) = create_utxo(MicroTari(30_000), &factories, None); - // let (utxo4, _) = create_utxo(MicroTari(40_000), &factories, None); - // let (utxo5, _) = create_utxo(MicroTari(50_000), &factories, None); - // let (utxo6, _) = create_utxo(MicroTari(60_000), &factories, None); - // let utxo_hash1 = utxo1.hash(); - // let utxo_hash2 = utxo2.hash(); - // let utxo_hash3 = utxo3.hash(); - // let utxo_hash4 = utxo4.hash(); - // let utxo_hash5 = utxo5.hash(); - // let utxo_hash6 = utxo6.hash(); - // let utxo_leaf_nodes = vec![ - // (utxo_hash1.clone(), true), - // (utxo_hash2.clone(), false), - // (utxo_hash3.clone(), true), - // (utxo_hash4.clone(), true), - // (utxo_hash5.clone(), false), - // (utxo_hash6.clone(), false), - // ]; - // let rp_leaf_nodes = vec![ - // (utxo1.proof.hash(), false), - // (utxo2.proof.hash(), false), - // (utxo3.proof.hash(), false), - // (utxo4.proof.hash(), false), - // (utxo5.proof.hash(), false), - // (utxo6.proof.hash(), false), - // ]; - // - // let mut txn = DbTransaction::new(); - // txn.insert_utxo(utxo1); - // txn.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo)); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof)); - // assert!(db.write(txn).is_ok()); - // let mut txn = DbTransaction::new(); - // txn.insert_utxo(utxo2); - // txn.insert_utxo(utxo3); - // txn.spend_utxo(utxo_hash1.clone()); - // txn.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo)); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof)); - // assert!(db.write(txn).is_ok()); - // let mut txn = DbTransaction::new(); - // txn.insert_utxo(utxo4); - // txn.insert_utxo(utxo5); - // txn.spend_utxo(utxo_hash3.clone()); - // txn.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo)); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof)); - // assert!(db.write(txn).is_ok()); - // let mut txn = DbTransaction::new(); - // txn.insert_utxo(utxo6); - // txn.spend_utxo(utxo_hash4.clone()); - // txn.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo)); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof)); - // assert!(db.write(txn).is_ok()); - // - // for i in 0..=3 { - // let mmr_node = db.fetch_mmr_node(MmrTree::Utxo, i, None).unwrap(); - // assert_eq!(mmr_node, utxo_leaf_nodes[i as usize]); - // let mmr_node = db.fetch_mmr_node(MmrTree::RangeProof, i, None).unwrap(); - // assert_eq!(mmr_node, rp_leaf_nodes[i as usize]); - // - // let mmr_node = db.fetch_mmr_nodes(MmrTree::Utxo, i, 3, None).unwrap(); - // assert_eq!(mmr_node.len(), 3); - // assert_eq!(mmr_node[0], utxo_leaf_nodes[i as usize]); - // assert_eq!(mmr_node[1], utxo_leaf_nodes[(i + 1) as usize]); - // assert_eq!(mmr_node[2], utxo_leaf_nodes[(i + 2) as usize]); - // let mmr_node = db.fetch_mmr_nodes(MmrTree::RangeProof, i, 3, None).unwrap(); - // assert_eq!(mmr_node.len(), 3); - // assert_eq!(mmr_node[0], rp_leaf_nodes[i as usize]); - // assert_eq!(mmr_node[1], rp_leaf_nodes[(i + 1) as usize]); - // assert_eq!(mmr_node[2], rp_leaf_nodes[(i + 2) as usize]); - // } - // - // assert!(db.fetch_mmr_node(MmrTree::Utxo, 7, None).is_err()); - // assert!(db.fetch_mmr_nodes(MmrTree::Utxo, 5, 4, None).is_err()); - // assert!(db.fetch_mmr_node(MmrTree::RangeProof, 7, None).is_err()); - // assert!(db.fetch_mmr_nodes(MmrTree::RangeProof, 5, 4, None).is_err()); - unimplemented!() -} - -#[test] -#[ignore = "To be completed with pruned mode"] -fn lmdb_fetch_kernel_nodes_and_count() { - let _db = create_test_db(); - // let kernel1 = create_test_kernel(100.into(), 0); - // let kernel2 = create_test_kernel(200.into(), 1); - // let kernel3 = create_test_kernel(300.into(), 1); - // let kernel4 = create_test_kernel(400.into(), 2); - // let kernel5 = create_test_kernel(500.into(), 2); - // let kernel6 = create_test_kernel(600.into(), 3); - // let leaf_nodes = vec![ - // (kernel1.hash(), false), - // (kernel2.hash(), false), - // (kernel3.hash(), false), - // (kernel4.hash(), false), - // (kernel5.hash(), false), - // (kernel6.hash(), false), - // ]; - // - // let mut txn = DbTransaction::new(); - // txn.insert_kernel(kernel1); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel)); - // assert!(db.write(txn).is_ok()); - // let mut txn = DbTransaction::new(); - // txn.insert_kernel(kernel2); - // txn.insert_kernel(kernel3); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel)); - // assert!(db.write(txn).is_ok()); - // let mut txn = DbTransaction::new(); - // txn.insert_kernel(kernel4); - // txn.insert_kernel(kernel5); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel)); - // assert!(db.write(txn).is_ok()); - // let mut txn = DbTransaction::new(); - // txn.insert_kernel(kernel6); - // txn.operations - // .push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel)); - // assert!(db.write(txn).is_ok()); - // - // for i in 0..=3 { - // let mmr_node = db.fetch_mmr_node(MmrTree::Kernel, i, None).unwrap(); - // assert_eq!(mmr_node, leaf_nodes[i as usize]); - // - // let mmr_node = db.fetch_mmr_nodes(MmrTree::Kernel, i, 3, None).unwrap(); - // assert_eq!(mmr_node.len(), 3); - // assert_eq!(mmr_node[0], leaf_nodes[i as usize]); - // assert_eq!(mmr_node[1], leaf_nodes[(i + 1) as usize]); - // assert_eq!(mmr_node[2], leaf_nodes[(i + 2) as usize]); - // } - // - // assert!(db.fetch_mmr_node(MmrTree::Kernel, 7, None).is_err()); - // assert!(db.fetch_mmr_nodes(MmrTree::Kernel, 5, 4, None).is_err()); - unimplemented!() -} - #[test] fn lmdb_file_lock() { // Create temporary test folder diff --git a/base_layer/core/tests/chain_storage_tests/chain_storage.rs b/base_layer/core/tests/chain_storage_tests/chain_storage.rs index 7ca1e4b465..b716858d4c 100644 --- a/base_layer/core/tests/chain_storage_tests/chain_storage.rs +++ b/base_layer/core/tests/chain_storage_tests/chain_storage.rs @@ -1779,398 +1779,3 @@ fn input_malleability() { let mod_block_hash = mod_block.hash(); assert_ne!(*block_hash, mod_block_hash); } - -#[test] -#[ignore = "To be completed with pruned mode"] -fn pruned_mode_is_stxo() { - // let network = Network::LocalNet; - // let factories = CryptoFactories::default(); - // let consensus_constants = ConsensusConstantsBuilder::new(network) - // .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - // .build(); - // let (block0, output) = create_genesis_block(&factories, &consensus_constants); - // let consensus_manager = ConsensusManagerBuilder::new(network) - // .with_consensus_constants(consensus_constants.clone()) - // .with_block(block0.clone()) - // .build(); - // let validators = Validators::new(MockValidator::new(true), MockValidator::new(true)); - // let db = create_test_db(); - // let config = BlockchainDatabaseConfig { - // orphan_storage_capacity: 3, - // pruning_horizon: 2, - // pruning_interval: 2, - // }; - // let mut store = BlockchainDatabase::new(db, &consensus_manager, validators, config, false).unwrap(); - // let mut blocks = vec![block0]; - // let mut outputs = vec![vec![output]]; - // let txo_hash1 = blocks[0].body.outputs()[0].hash(); - // assert!(store.is_utxo(txo_hash1.clone()).unwrap()); - // - // // Block 1 - // let txs = vec![txn_schema!(from: vec![outputs[0][0].clone()], to: vec![50 * T])]; - // let coinbase_value = consensus_manager.emission_schedule().block_reward(1); - // assert_eq!( - // generate_new_block_with_coinbase( - // &mut store, - // &factories, - // &mut blocks, - // &mut outputs, - // txs, - // coinbase_value, - // &consensus_manager - // ) - // .unwrap(), - // BlockAddResult::Ok - // ); - // let metadata = store.get_chain_metadata().unwrap(); - // assert_eq!(metadata.height_of_longest_chain, Some(1)); - // let txo_hash2 = outputs[1][0].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash3 = outputs[1][1].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash4 = outputs[1][2].as_transaction_output(&factories).unwrap().hash(); - // assert!(store.is_stxo(txo_hash1.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash2.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash3.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash4.clone()).unwrap()); - // - // // Block 2 - // let txs = vec![txn_schema!(from: vec![outputs[1][1].clone()], to: vec![40 * T])]; - // let coinbase_value = consensus_manager.emission_schedule().block_reward(2); - // assert_eq!( - // generate_new_block_with_coinbase( - // &mut store, - // &factories, - // &mut blocks, - // &mut outputs, - // txs, - // coinbase_value, - // &consensus_manager - // ) - // .unwrap(), - // BlockAddResult::Ok - // ); - // let metadata = store.get_chain_metadata().unwrap(); - // assert_eq!(metadata.height_of_longest_chain, Some(2)); - // let txo_hash5 = outputs[2][0].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash6 = outputs[2][1].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash7 = outputs[2][2].as_transaction_output(&factories).unwrap().hash(); - // assert!(store.is_stxo(txo_hash1.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash2.clone()).unwrap()); - // assert!(store.is_stxo(txo_hash3.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash4.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash5.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash6.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash7.clone()).unwrap()); - // - // // Block 3 - // let txs = vec![txn_schema!(from: vec![outputs[2][2].clone()], to: vec![30 * T])]; - // let coinbase_value = consensus_manager.emission_schedule().block_reward(3); - // assert_eq!( - // generate_new_block_with_coinbase( - // &mut store, - // &factories, - // &mut blocks, - // &mut outputs, - // txs, - // coinbase_value, - // &consensus_manager - // ) - // .unwrap(), - // BlockAddResult::Ok - // ); - // let metadata = store.get_chain_metadata().unwrap(); - // assert_eq!(metadata.height_of_longest_chain, Some(3)); - // let txo_hash8 = outputs[3][0].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash9 = outputs[3][1].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash10 = outputs[3][2].as_transaction_output(&factories).unwrap().hash(); - // assert!(store.is_stxo(txo_hash1.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash2.clone()).unwrap()); - // assert!(store.is_stxo(txo_hash3.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash4.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash5.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash6.clone()).unwrap()); - // assert!(store.is_stxo(txo_hash7.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash8.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash9.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash10.clone()).unwrap()); - // - // // Block 4 - // let txs = vec![txn_schema!(from: vec![outputs[3][1].clone()], to: vec![20 * T])]; - // let coinbase_value = consensus_manager.emission_schedule().block_reward(4); - // assert_eq!( - // generate_new_block_with_coinbase( - // &mut store, - // &factories, - // &mut blocks, - // &mut outputs, - // txs, - // coinbase_value, - // &consensus_manager - // ) - // .unwrap(), - // BlockAddResult::Ok - // ); - // let metadata = store.get_chain_metadata().unwrap(); - // assert_eq!(metadata.height_of_longest_chain, Some(4)); - // let txo_hash11 = outputs[4][0].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash12 = outputs[4][1].as_transaction_output(&factories).unwrap().hash(); - // let txo_hash13 = outputs[4][2].as_transaction_output(&factories).unwrap().hash(); - // assert!(store.is_stxo(txo_hash1.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash2.clone()).unwrap()); - // assert!(store.is_stxo(txo_hash3.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash4.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash5.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash6.clone()).unwrap()); - // assert!(store.is_stxo(txo_hash7.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash8.clone()).unwrap()); - // assert!(store.is_stxo(txo_hash9.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash10.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash11.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash12.clone()).unwrap()); - // assert!(store.is_utxo(txo_hash13.clone()).unwrap()); - unimplemented!() -} - -#[test] -#[ignore = "To be completed with pruned mode"] -fn pruned_mode_fetch_insert_and_commit() { - // // This test demonstrates the basic steps involved in horizon syncing without any of the comms requests. - // let network = Network::LocalNet; - // // Create an archival chain for Alice - // let (mut alice_store, mut blocks, mut outputs, consensus_manager) = create_new_blockchain(network); - // // Block1 - // let txs = vec![txn_schema!( - // from: vec![outputs[0][0].clone()], - // to: vec![10 * T, 10 * T, 10 * T, 10 * T] - // )]; - // assert!(generate_new_block(&mut alice_store, &mut blocks, &mut outputs, txs, &consensus_manager).is_ok()); - // // Block2 - // let txs = vec![txn_schema!(from: vec![outputs[1][3].clone()], to: vec![6 * T])]; - // assert!(generate_new_block(&mut alice_store, &mut blocks, &mut outputs, txs, &consensus_manager).is_ok()); - // // Block3 - // let txs = vec![txn_schema!(from: vec![outputs[2][0].clone()], to: vec![2 * T])]; - // assert!(generate_new_block(&mut alice_store, &mut blocks, &mut outputs, txs, &consensus_manager).is_ok()); - // // Block4 - // let txs = vec![txn_schema!(from: vec![outputs[1][0].clone()], to: vec![2 * T])]; - // assert!(generate_new_block(&mut alice_store, &mut blocks, &mut outputs, txs, &consensus_manager).is_ok()); - // - // // Perform a manual horizon state sync between Alice and Bob - // let validators = Validators::new(MockValidator::new(true), MockValidator::new(true)); - // let config = BlockchainDatabaseConfig { - // orphan_storage_capacity: 3, - // pruning_horizon: 2, - // pruning_interval: 2, - // }; - // let bob_store = BlockchainDatabase::new( - // create_test_db(), - // &consensus_manager, - // validators, - // config, - // false, - // ) - // .unwrap(); - // let network_tip_height = alice_store - // .get_chain_metadata() - // .unwrap() - // .height_of_longest_chain - // .unwrap(); - // let bob_metadata = bob_store.get_chain_metadata().unwrap(); - // let sync_horizon_height = bob_metadata.horizon_block(network_tip_height) + 1; - // let state = bob_store.horizon_sync_begin().unwrap(); - // assert_eq!(state.metadata, bob_metadata); - // assert_eq!(state.initial_kernel_checkpoint_count, 1); - // assert_eq!(state.initial_utxo_checkpoint_count, 1); - // assert_eq!(state.initial_rangeproof_checkpoint_count, 1); - // - // // Sync headers - // let bob_height = bob_metadata.height_of_longest_chain.unwrap(); - // let headers = alice_store.fetch_headers(bob_height + 1, sync_horizon_height).unwrap(); - // assert!(bob_store.insert_valid_headers(headers).is_ok()); - // - // // Sync kernels - // let alice_num_kernels = alice_store - // .fetch_mmr_node_count(MmrTree::Kernel, sync_horizon_height) - // .unwrap(); - // let bob_num_kernels = bob_store - // .fetch_mmr_node_count(MmrTree::Kernel, sync_horizon_height) - // .unwrap(); - // let kernel_hashes = alice_store - // .fetch_mmr_nodes( - // MmrTree::Kernel, - // bob_num_kernels, - // alice_num_kernels - bob_num_kernels, - // Some(sync_horizon_height), - // ) - // .unwrap() - // .iter() - // .map(|n| n.0.clone()) - // .collect::>(); - // assert_eq!(kernel_hashes.len(), 3); - // let kernels = alice_store.fetch_kernels(kernel_hashes).unwrap(); - // assert_eq!(kernels.len(), 3); - // assert!(bob_store.horizon_sync_insert_kernels(kernels).is_ok()); - // bob_store.horizon_sync_create_mmr_checkpoint(MmrTree::Kernel).unwrap(); - // - // // Sync Utxos and RangeProofs - // let alice_num_utxos = alice_store - // .fetch_mmr_node_count(MmrTree::Utxo, sync_horizon_height) - // .unwrap(); - // let bob_num_utxos = bob_store - // .fetch_mmr_node_count(MmrTree::Utxo, sync_horizon_height) - // .unwrap(); - // let alice_num_rps = alice_store - // .fetch_mmr_node_count(MmrTree::RangeProof, sync_horizon_height) - // .unwrap(); - // let bob_num_rps = bob_store - // .fetch_mmr_node_count(MmrTree::RangeProof, sync_horizon_height) - // .unwrap(); - // assert_eq!(alice_num_utxos, alice_num_rps); - // assert_eq!(bob_num_utxos, bob_num_rps); - // // Check if some of the existing UTXOs need to be marked as deleted. - // let alice_utxo_nodes = alice_store - // .fetch_mmr_nodes(MmrTree::Utxo, 0, bob_num_utxos, Some(sync_horizon_height)) - // .unwrap(); - // let bob_utxo_nodes = bob_store - // .fetch_mmr_nodes(MmrTree::Utxo, 0, bob_num_utxos, Some(sync_horizon_height)) - // .unwrap(); - // assert_eq!(alice_utxo_nodes.len(), bob_utxo_nodes.len()); - // for index in 0..alice_utxo_nodes.len() { - // let (alice_utxo_hash, alice_utxo_deleted) = alice_utxo_nodes[index].clone(); - // let (bob_utxo_hash, bob_utxo_deleted) = bob_utxo_nodes[index].clone(); - // assert_eq!(alice_utxo_hash, bob_utxo_hash); - // if alice_utxo_deleted && !bob_utxo_deleted { - // assert!(bob_store.delete_mmr_node(MmrTree::Utxo, &bob_utxo_hash).is_ok()); - // assert!(bob_store.spend_utxo(bob_utxo_hash).is_ok()); - // } - // } - // - // // Continue with syncing of missing MMR nodes - // let utxo_mmr_nodes = alice_store - // .fetch_mmr_nodes( - // MmrTree::Utxo, - // bob_num_utxos, - // alice_num_utxos - bob_num_utxos, - // Some(sync_horizon_height), - // ) - // .unwrap(); - // let rp_hashes = alice_store - // .fetch_mmr_nodes( - // MmrTree::RangeProof, - // bob_num_rps, - // alice_num_rps - bob_num_rps, - // Some(sync_horizon_height), - // ) - // .unwrap() - // .iter() - // .map(|n| n.0.clone()) - // .collect::>(); - // assert_eq!(utxo_mmr_nodes.len(), 9); - // assert_eq!(rp_hashes.len(), 9); - // for (index, (utxo_hash, is_stxo)) in utxo_mmr_nodes.into_iter().enumerate() { - // if is_stxo { - // assert!(bob_store.insert_mmr_node(MmrTree::Utxo, utxo_hash, is_stxo).is_ok()); - // assert!(bob_store - // .insert_mmr_node(MmrTree::RangeProof, rp_hashes[index].clone(), false) - // .is_ok()); - // } else { - // let txo = alice_store.fetch_txo(utxo_hash).unwrap().unwrap(); - // assert!(bob_store.insert_utxo(txo).is_ok()); - // } - // } - // - // bob_store.horizon_sync_create_mmr_checkpoint(MmrTree::Utxo).unwrap(); - // bob_store - // .horizon_sync_create_mmr_checkpoint(MmrTree::RangeProof) - // .unwrap(); - // - // // Finalize horizon state sync - // bob_store.horizon_sync_commit().unwrap(); - // assert!(bob_store.get_horizon_sync_state().unwrap().is_none()); - // - // // Check Metadata - // let bob_metadata = bob_store.get_chain_metadata().unwrap(); - // let sync_height_header = blocks[sync_horizon_height as usize].header.clone(); - // assert_eq!(bob_metadata.height_of_longest_chain, Some(sync_horizon_height)); - // assert_eq!(bob_metadata.best_block, Some(sync_height_header.hash())); - // - // // Check headers - // let alice_headers = alice_store - // .fetch_headers(0, bob_metadata.height_of_longest_chain()) - // .unwrap(); - // let bob_headers = bob_store - // .fetch_headers(0, bob_metadata.height_of_longest_chain()) - // .unwrap(); - // assert_eq!(alice_headers, bob_headers); - // // Check Kernel MMR nodes - // let alice_num_kernels = alice_store - // .fetch_mmr_node_count(MmrTree::Kernel, sync_horizon_height) - // .unwrap(); - // let bob_num_kernels = bob_store - // .fetch_mmr_node_count(MmrTree::Kernel, sync_horizon_height) - // .unwrap(); - // assert_eq!(alice_num_kernels, bob_num_kernels); - // let alice_kernel_nodes = alice_store - // .fetch_mmr_nodes(MmrTree::Kernel, 0, alice_num_kernels, Some(sync_horizon_height)) - // .unwrap(); - // let bob_kernel_nodes = bob_store - // .fetch_mmr_nodes(MmrTree::Kernel, 0, bob_num_kernels, Some(sync_horizon_height)) - // .unwrap(); - // assert_eq!(alice_kernel_nodes, bob_kernel_nodes); - // // Check Kernels - // let alice_kernel_hashes = alice_kernel_nodes.iter().map(|n| n.0.clone()).collect::>(); - // let bob_kernels_hashes = bob_kernel_nodes.iter().map(|n| n.0.clone()).collect::>(); - // let alice_kernels = alice_store.fetch_kernels(alice_kernel_hashes).unwrap(); - // let bob_kernels = bob_store.fetch_kernels(bob_kernels_hashes).unwrap(); - // assert_eq!(alice_kernels, bob_kernels); - // // Check UTXO MMR nodes - // let alice_num_utxos = alice_store - // .fetch_mmr_node_count(MmrTree::Utxo, sync_horizon_height) - // .unwrap(); - // let bob_num_utxos = bob_store - // .fetch_mmr_node_count(MmrTree::Utxo, sync_horizon_height) - // .unwrap(); - // assert_eq!(alice_num_utxos, bob_num_utxos); - // let alice_utxo_nodes = alice_store - // .fetch_mmr_nodes(MmrTree::Utxo, 0, alice_num_utxos, Some(sync_horizon_height)) - // .unwrap(); - // let bob_utxo_nodes = bob_store - // .fetch_mmr_nodes(MmrTree::Utxo, 0, bob_num_utxos, Some(sync_horizon_height)) - // .unwrap(); - // assert_eq!(alice_utxo_nodes, bob_utxo_nodes); - // // Check RangeProof MMR nodes - // let alice_num_rps = alice_store - // .fetch_mmr_node_count(MmrTree::RangeProof, sync_horizon_height) - // .unwrap(); - // let bob_num_rps = bob_store - // .fetch_mmr_node_count(MmrTree::RangeProof, sync_horizon_height) - // .unwrap(); - // assert_eq!(alice_num_rps, bob_num_rps); - // let alice_rps_nodes = alice_store - // .fetch_mmr_nodes(MmrTree::RangeProof, 0, alice_num_rps, Some(sync_horizon_height)) - // .unwrap(); - // let bob_rps_nodes = bob_store - // .fetch_mmr_nodes(MmrTree::RangeProof, 0, bob_num_rps, Some(sync_horizon_height)) - // .unwrap(); - // assert_eq!(alice_rps_nodes, bob_rps_nodes); - // // Check UTXOs - // let mut alice_utxos = Vec::::new(); - // for (hash, deleted) in alice_utxo_nodes { - // if !deleted { - // alice_utxos.push(alice_store.fetch_txo(hash).unwrap().unwrap()); - // } - // } - // let mut bob_utxos = Vec::::new(); - // for (hash, deleted) in bob_utxo_nodes { - // if !deleted { - // bob_utxos.push(bob_store.fetch_utxo(hash).unwrap()); - // } - // } - // assert_eq!(alice_utxos, bob_utxos); - // - // // Check if chain can be extending using blocks after horizon state - // let height = sync_horizon_height as usize + 1; - // assert_eq!( - // bob_store.add_block(blocks[height].clone().into()).unwrap(), - // BlockAddResult::Ok - // ); - unimplemented!() -} diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index 66c929fb97..6ff5c2102a 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -63,7 +63,6 @@ use tari_core::{ use tari_crypto::{ keys::PublicKey as PublicKeyTrait, script, - script::TariScript, tari_utilities::{hash::Hashable, hex::Hex}, }; use tari_mmr::MutableMmr; @@ -114,8 +113,7 @@ pub fn _create_act_gen_block() { let factories = CryptoFactories::default(); let mut header = BlockHeader::new(consensus_manager.consensus_constants(0).blockchain_version()); let value = consensus_manager.emission_schedule().block_reward(0); - let (mut utxo, key, _) = create_utxo(value, &factories, None, &TariScript::default()); - utxo.features = OutputFeatures::create_coinbase(1); + let (utxo, key, _) = create_utxo(value, &factories, OutputFeatures::create_coinbase(1), &script![Nop]); let (pk, sig) = create_random_signature_from_s_key(key.clone(), 0.into(), 0); let excess = Commitment::from_public_key(&pk); let kernel = KernelBuilder::new() diff --git a/base_layer/core/tests/horizon_state_sync.rs b/base_layer/core/tests/horizon_state_sync.rs deleted file mode 100644 index b689e64c29..0000000000 --- a/base_layer/core/tests/horizon_state_sync.rs +++ /dev/null @@ -1,749 +0,0 @@ -// Copyright 2019. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// #[allow(dead_code)] -// mod helpers; -// -// use crate::helpers::block_builders::append_block_with_coinbase; -// use futures::StreamExt; -// use helpers::{block_builders::create_genesis_block, nodes::create_network_with_2_base_nodes_with_config}; -// use tari_core::{ -// base_node::{ -// comms_interface::BlockEvent, -// service::BaseNodeServiceConfig, -// state_machine_service::{ -// states::{HorizonStateSync, HorizonSyncConfig, StateEvent, StatusInfo, SyncPeerConfig}, -// BaseNodeStateMachine, -// BaseNodeStateMachineConfig, -// }, -// BlockSyncConfig, -// }, -// chain_storage::{BlockchainBackend, BlockchainDatabase, BlockchainDatabaseConfig, MmrTree}, -// consensus::{ConsensusConstantsBuilder, ConsensusManagerBuilder, Network}, -// mempool::MempoolServiceConfig, -// transactions::{ -// fee::Fee, -// helpers::{create_utxo, spend_utxos}, -// tari_amount::uT, -// transaction::UnblindedOutput, -// types::CryptoFactories, -// }, -// txn_schema, -// validation::mocks::MockValidator, -// }; -// use tari_mmr::MmrCacheConfig; -// use tari_p2p::services::liveness::LivenessConfig; -// use tari_shutdown::Shutdown; -// use tari_test_utils::unpack_enum; -// use tempfile::tempdir; -// use tokio::{ -// runtime::Runtime, -// sync::{broadcast, watch}, -// }; -// -// static EMISSION: [u64; 2] = [10, 10]; -// #[test] -// fn test_pruned_mode_sync_with_future_horizon_sync_height() { -// // Number of blocks to create in addition to the genesis -// const NUM_BLOCKS: u64 = 10; -// const SYNC_OFFSET: u64 = 0; -// const PRUNING_HORIZON: u64 = 4; -// let mut runtime = Runtime::new().unwrap(); -// let factories = CryptoFactories::default(); -// let temp_dir = tempdir().unwrap(); -// let network = Network::LocalNet; -// let consensus_constants = ConsensusConstantsBuilder::new(network) -// .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) -// .build(); -// let (genesis_block, _) = create_genesis_block(&factories, &consensus_constants); -// let consensus_manager = ConsensusManagerBuilder::new(network) -// .with_consensus_constants(consensus_constants) -// .with_block(genesis_block.clone()) -// .build(); -// let blockchain_db_config = BlockchainDatabaseConfig { -// orphan_storage_capacity: 3, -// pruning_horizon: PRUNING_HORIZON, -// pruning_interval: 5, -// }; -// let (alice_node, bob_node, consensus_manager) = create_network_with_2_base_nodes_with_config( -// &mut runtime, -// blockchain_db_config, -// BaseNodeServiceConfig::default(), -// MmrCacheConfig::default(), -// MempoolServiceConfig::default(), -// LivenessConfig::default(), -// consensus_manager, -// temp_dir.path().to_str().unwrap(), -// ); -// let mut horizon_sync_config = HorizonSyncConfig::default(); -// horizon_sync_config.horizon_sync_height_offset = SYNC_OFFSET; -// let state_machine_config = BaseNodeStateMachineConfig { -// block_sync_config: Default::default(), -// horizon_sync_config, -// sync_peer_config: Default::default(), -// orphan_db_clean_out_threshold: 100, -// }; -// let shutdown = Shutdown::new(); -// let (state_change_event_publisher, _) = broadcast::channel(10); -// let (status_event_sender, _status_event_receiver) = tokio::sync::watch::channel(StatusInfo::new()); -// let mut alice_state_machine = BaseNodeStateMachine::new( -// alice_node.blockchain_db.clone(), -// alice_node.local_nci.clone(), -// alice_node.outbound_nci.clone(), -// alice_node.comms.connectivity(), -// alice_node.comms.peer_manager(), -// alice_node.chain_metadata_handle.get_event_stream(), -// state_machine_config, -// SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), -// status_event_sender, -// state_change_event_publisher, -// alice_node.blockchain_state.clone(), -// consensus_manager, -// shutdown.to_signal(), -// ); -// -// runtime.block_on(async { -// let alice_db = &alice_node.blockchain_db; -// let bob_db = &bob_node.blockchain_db; -// let mut prev_block = genesis_block.clone(); -// for _ in 0..NUM_BLOCKS { -// // Need coinbases for kernels and utxos -// let (block, _) = -// append_block_with_coinbase(&factories, bob_db, &prev_block, vec![], &consensus_manager, 1.into()) -// .unwrap(); -// prev_block = block; -// } -// -// let node_count = bob_db.fetch_mmr_node_count(MmrTree::Kernel, 6).unwrap(); -// assert_eq!(node_count, 7); -// // Both nodes are running in pruned mode and can not use block sync to synchronize state. Sync horizon state -// // from genesis block to horizon_sync_height and then block sync to the tip. -// let network_tip = bob_db.get_chain_metadata().unwrap(); -// assert_eq!(network_tip.effective_pruned_height(), 6); -// let mut sync_peers = vec![SyncPeer { -// node_id: bob_node.node_identity.node_id().clone(), -// chain_metadata: network_tip.clone(), -// }]; -// -// // Synchronize headers -// let state_event = HeaderSync::new(network_tip.clone(), sync_peers.clone()) -// .next_event(&mut alice_state_machine) -// .await; -// unpack_enum!(StateEvent::HeadersSynchronized(local_metadata, sync_height) = state_event); -// -// // Synchronize Kernels and UTXOs -// assert_eq!(sync_height, NUM_BLOCKS - PRUNING_HORIZON + SYNC_OFFSET); -// let state_event = HorizonStateSync::new(local_metadata, network_tip.clone(), sync_peers.clone(), sync_height) -// .next_event(&mut alice_state_machine) -// .await; -// assert_eq!(state_event, StateEvent::HorizonStateSynchronized); -// let alice_metadata = alice_db.get_chain_metadata().unwrap(); -// // Local height should now be at the horizon sync height -// assert_eq!(alice_metadata.height_of_longest_chain(), sync_height); -// assert_eq!(alice_metadata.effective_pruned_height(), sync_height); -// -// // Check Kernel MMR nodes after horizon sync -// let alice_num_kernels = alice_db.fetch_mmr_node_count(MmrTree::Kernel, sync_height).unwrap(); -// let bob_num_kernels = bob_db.fetch_mmr_node_count(MmrTree::Kernel, sync_height).unwrap(); -// assert_eq!(alice_num_kernels, bob_num_kernels); -// let alice_kernel_nodes = alice_db -// .fetch_mmr_nodes(MmrTree::Kernel, 0, alice_num_kernels, Some(sync_height)) -// .unwrap(); -// let bob_kernel_nodes = bob_db -// .fetch_mmr_nodes(MmrTree::Kernel, 0, bob_num_kernels, Some(sync_height)) -// .unwrap(); -// assert_eq!(alice_kernel_nodes, bob_kernel_nodes); -// -// // Synchronize full blocks -// let state_event = BestChainMetadataBlockSync -// .next_event(&mut alice_state_machine, &network_tip, &mut sync_peers) -// .await; -// assert_eq!(state_event, StateEvent::BlocksSynchronized); -// let alice_metadata = alice_db.get_chain_metadata().unwrap(); -// // Local height should now be at the horizon sync height -// assert_eq!( -// alice_metadata.effective_pruned_height(), -// network_tip.height_of_longest_chain() - network_tip.pruning_horizon() -// ); -// -// check_final_state(&alice_db, &bob_db); -// }); -// } -// -// #[test] -// fn test_pruned_mode_sync_with_spent_utxos() { -// let mut runtime = Runtime::new().unwrap(); -// let factories = CryptoFactories::default(); -// let temp_dir = tempdir().unwrap(); -// let network = Network::LocalNet; -// let consensus_constants = ConsensusConstantsBuilder::new(network) -// .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) -// .build(); -// let (genesis_block, output) = create_genesis_block(&factories, &consensus_constants); -// let consensus_manager = ConsensusManagerBuilder::new(network) -// .with_consensus_constants(consensus_constants) -// .with_block(genesis_block.clone()) -// .build(); -// let blockchain_db_config = BlockchainDatabaseConfig { -// orphan_storage_capacity: 3, -// pruning_horizon: 4, -// pruning_interval: 5, -// }; -// let (alice_node, bob_node, consensus_manager) = create_network_with_2_base_nodes_with_config( -// &mut runtime, -// blockchain_db_config, -// BaseNodeServiceConfig::default(), -// MmrCacheConfig::default(), -// MempoolServiceConfig::default(), -// LivenessConfig::default(), -// consensus_manager, -// temp_dir.path().to_str().unwrap(), -// ); -// let mut horizon_sync_config = HorizonSyncConfig::default(); -// horizon_sync_config.horizon_sync_height_offset = 0; -// let state_machine_config = BaseNodeStateMachineConfig { -// block_sync_config: BlockSyncConfig::default(), -// horizon_sync_config, -// sync_peer_config: SyncPeerConfig::default(), -// }; -// let shutdown = Shutdown::new(); -// let (state_change_event_publisher, _) = broadcast::channel(10); -// let (status_event_sender, _status_event_receiver) = tokio::sync::watch::channel(StatusInfo::new()); -// let mut alice_state_machine = BaseNodeStateMachine::new( -// &alice_node.blockchain_db, -// &alice_node.local_nci, -// &alice_node.outbound_nci, -// alice_node.comms.connectivity(), -// alice_node.comms.peer_manager(), -// alice_node.chain_metadata_handle.get_event_stream(), -// state_machine_config, -// SyncValidators::new( -// MockValidator::new(true), -// // TODO: Need a test helper which adds the correct reward to a coinbase UTXO as per consensus to use the -// // ChainBalanceValidator -// MockValidator::new(true), -// ), -// shutdown.to_signal(), -// status_event_sender, -// state_change_event_publisher, -// ); -// -// runtime.block_on(async { -// let mut outputs = vec![output]; -// let mut prev_block = genesis_block; -// for _ in 0..4 { -// // Need coinbases for kernels and utxos -// let (block, coinbase) = append_block_with_coinbase( -// &factories, -// &bob_node.blockchain_db, -// &prev_block, -// vec![], -// &consensus_manager, -// 1.into(), -// ) -// .unwrap(); -// prev_block = block; -// outputs.push(coinbase); -// } -// -// // Spend coinbases before horizon height -// { -// let supply = consensus_manager.emission_schedule().supply_at_block(4); -// let fee = Fee::calculate(25 * uT, 5, 5, 2); -// let schema = txn_schema!(from: outputs, to: vec![supply - fee], fee: 25 * uT); -// let (tx, _, _) = spend_utxos(schema); -// -// let (block, _) = append_block_with_coinbase( -// &factories, -// &bob_node.blockchain_db, -// &prev_block, -// vec![tx], -// &consensus_manager, -// 1.into(), -// ) -// .unwrap(); -// prev_block = block; -// } -// -// let mut outputs = vec![]; -// for _ in 0..6 { -// // Need coinbases for kernels and utxos -// let (block, coinbase) = append_block_with_coinbase( -// &factories, -// &bob_node.blockchain_db, -// &prev_block, -// vec![], -// &consensus_manager, -// 1.into(), -// ) -// .unwrap(); -// prev_block = block; -// outputs.push(coinbase); -// } -// -// // Spend the other coinbases (why not?) -// { -// let supply = consensus_manager.emission_schedule().supply_at_block(4); -// let fee = Fee::calculate(25 * uT, 5, 5, 2); -// let schema = txn_schema!(from: outputs, to: vec![supply - fee], fee: 25 * uT); -// let (tx, _, _) = spend_utxos(schema); -// -// let (_, _) = append_block_with_coinbase( -// &factories, -// &bob_node.blockchain_db, -// &prev_block, -// vec![tx], -// &consensus_manager, -// 1.into(), -// ) -// .unwrap(); -// } -// -// // Both nodes are running in pruned mode and can not use block sync to synchronize state. Sync horizon state -// // from genesis block to horizon_sync_height and then block sync to the tip. -// let alice_db = &alice_node.blockchain_db; -// let bob_db = &bob_node.blockchain_db; -// let network_tip = bob_db.get_chain_metadata().unwrap(); -// // effective_pruned_height is 6 because the interval is 5 - we have 12 blocks but the last time the node was -// // pruned was at 10 (10 - 4 = 6) -// assert_eq!(network_tip.effective_pruned_height(), 6); -// assert_eq!(network_tip.height_of_longest_chain(), 12); -// let mut sync_peers = vec![SyncPeer { -// node_id: bob_node.node_identity.node_id().clone(), -// chain_metadata: network_tip.clone(), -// }]; -// let state_event = HeaderSync::new(network_tip.clone(), sync_peers.clone()) -// .next_event(&mut alice_state_machine) -// .await; -// unpack_enum!(StateEvent::HeadersSynchronized(local_metadata, sync_height) = state_event); -// // network tip - pruning horizon + offset -// assert_eq!(sync_height, 12 - 4 + 0); -// let state_event = HorizonStateSync::new(local_metadata, network_tip.clone(), sync_peers.clone(), sync_height) -// .next_event(&mut alice_state_machine) -// .await; -// assert_eq!(state_event, StateEvent::HorizonStateSynchronized); -// let state_event = BestChainMetadataBlockSync -// .next_event(&mut alice_state_machine, &network_tip, &mut sync_peers) -// .await; -// assert_eq!(state_event, StateEvent::BlocksSynchronized); -// -// check_final_state(&alice_db, &bob_db); -// }); -// } -// -// #[test] -// fn test_pruned_mode_sync_with_spent_faucet_utxo_before_horizon() { -// let mut runtime = Runtime::new().unwrap(); -// let factories = CryptoFactories::default(); -// let temp_dir = tempdir().unwrap(); -// -// let consensus_manager = ConsensusManagerBuilder::new(Network::Rincewind).build(); -// let mut genesis_block = consensus_manager.get_genesis_block(); -// let faucet_value = 5000 * uT; -// let (faucet_utxo, faucet_key) = create_utxo(faucet_value, &factories, None); -// genesis_block.body.add_output(faucet_utxo); -// // Create a LocalNet consensus manager that uses rincewind consensus constants and has a custom rincewind genesis -// // block that contains an extra faucet utxo -// let consensus_manager = ConsensusManagerBuilder::new(Network::LocalNet) -// .with_block(genesis_block.clone()) -// .with_consensus_constants(consensus_manager.consensus_constants(0).clone()) -// .build(); -// -// let blockchain_db_config = BlockchainDatabaseConfig { -// orphan_storage_capacity: 3, -// pruning_horizon: 4, -// pruning_interval: 4, -// }; -// let (alice_node, bob_node, consensus_manager) = create_network_with_2_base_nodes_with_config( -// &mut runtime, -// blockchain_db_config, -// BaseNodeServiceConfig::default(), -// MmrCacheConfig::default(), -// MempoolServiceConfig::default(), -// LivenessConfig::default(), -// consensus_manager, -// temp_dir.path().to_str().unwrap(), -// ); -// let mut horizon_sync_config = HorizonSyncConfig::default(); -// horizon_sync_config.horizon_sync_height_offset = 0; -// let state_machine_config = BaseNodeStateMachineConfig { -// block_sync_config: BlockSyncConfig::default(), -// horizon_sync_config, -// sync_peer_config: SyncPeerConfig::default(), -// }; -// let shutdown = Shutdown::new(); -// let (state_change_event_publisher, _) = broadcast::channel(10); -// let (status_event_sender, _status_event_receiver) = watch::channel(StatusInfo::new()); -// let mut alice_state_machine = BaseNodeStateMachine::new( -// &alice_node.blockchain_db, -// &alice_node.local_nci, -// &alice_node.outbound_nci, -// alice_node.comms.connectivity(), -// alice_node.comms.peer_manager(), -// alice_node.chain_metadata_handle.get_event_stream(), -// state_machine_config, -// SyncValidators::new( -// MockValidator::new(true), -// // TODO: Need a test helper which adds the correct reward to a coinbase UTXO as per consensus to use the -// // ChainBalanceValidator -// MockValidator::new(true), -// ), -// shutdown.to_signal(), -// status_event_sender, -// state_change_event_publisher, -// ); -// -// runtime.block_on(async { -// let mut prev_block = genesis_block; -// for _ in 0..4 { -// // Need coinbases for kernels and utxos -// let (block, _) = append_block_with_coinbase( -// &factories, -// &bob_node.blockchain_db, -// &prev_block, -// vec![], -// &consensus_manager, -// 1.into(), -// ) -// .unwrap(); -// -// prev_block = block; -// } -// -// // Spend faucet UTXO -// { -// let fee = Fee::calculate(25 * uT, 1, 1, 2); -// let output = UnblindedOutput::new(faucet_value, faucet_key, None); -// let schema = txn_schema!(from: vec![output], to: vec![faucet_value - fee], fee: 25 * uT); -// let (tx, _, _) = spend_utxos(schema); -// -// // Need coinbases for kernels and utxos -// let (block, _) = append_block_with_coinbase( -// &factories, -// &bob_node.blockchain_db, -// &prev_block, -// vec![tx], -// &consensus_manager, -// 1.into(), -// ) -// .unwrap(); -// prev_block = block; -// } -// -// for _ in 0..6 { -// // Need coinbases for kernels and utxos -// let (block, _) = append_block_with_coinbase( -// &factories, -// &bob_node.blockchain_db, -// &prev_block, -// vec![], -// &consensus_manager, -// 1.into(), -// ) -// .unwrap(); -// prev_block = block; -// } -// -// // Both nodes are running in pruned mode and can not use block sync to synchronize state. Sync horizon state -// // from genesis block to horizon_sync_height and then block sync to the tip. -// let alice_db = &alice_node.blockchain_db; -// let bob_db = &bob_node.blockchain_db; -// let network_tip = bob_db.get_chain_metadata().unwrap(); -// assert_eq!(network_tip.height_of_longest_chain(), 11); -// let mut sync_peers = vec![SyncPeer { -// node_id: bob_node.node_identity.node_id().clone(), -// chain_metadata: network_tip.clone(), -// }]; -// let state_event = HeaderSync::new(network_tip.clone(), sync_peers.clone()) -// .next_event(&mut alice_state_machine) -// .await; -// unpack_enum!(StateEvent::HeadersSynchronized(local_metadata, sync_height) = state_event); -// // network tip - pruning horizon + offset -// assert_eq!(sync_height, 11 - 4 + 0); -// let state_event = HorizonStateSync::new(local_metadata, network_tip.clone(), sync_peers.clone(), sync_height) -// .next_event(&mut alice_state_machine) -// .await; -// assert_eq!(state_event, StateEvent::HorizonStateSynchronized); -// let state_event = BestChainMetadataBlockSync -// .next_event(&mut alice_state_machine, &network_tip, &mut sync_peers) -// .await; -// assert_eq!(state_event, StateEvent::BlocksSynchronized); -// -// check_final_state(&alice_db, &bob_db); -// }); -// } -// -// fn check_final_state(_alice_db: &BlockchainDatabase, _bob_db: &BlockchainDatabase) { -// // let network_tip = bob_db.get_chain_metadata().unwrap(); -// // -// // let alice_metadata = alice_db.get_chain_metadata().unwrap(); -// // assert_eq!( -// // alice_metadata.height_of_longest_chain(), -// // network_tip.height_of_longest_chain() -// // ); -// // assert_eq!( -// // alice_metadata.best_block.as_ref().unwrap(), -// // network_tip.best_block.as_ref().unwrap() -// // ); -// // assert_eq!( -// // alice_metadata.accumulated_difficulty.as_ref().unwrap(), -// // network_tip.accumulated_difficulty.as_ref().unwrap() -// // ); -// // -// // // Check headers -// // let network_tip_height = network_tip.height_of_longest_chain.unwrap_or(0); -// // let alice_headers = alice_db.fetch_headers(0, network_tip_height).unwrap(); -// // let bob_headers = bob_db.fetch_headers(0, network_tip_height).unwrap(); -// // assert_eq!(alice_headers, bob_headers); -// // -// // // Check Kernel MMR nodes -// // let alice_num_kernels = alice_db -// // .fetch_mmr_node_count(MmrTree::Kernel, network_tip_height) -// // .unwrap(); -// // let bob_num_kernels = bob_db -// // .fetch_mmr_node_count(MmrTree::Kernel, network_tip_height) -// // .unwrap(); -// // assert_eq!(alice_num_kernels, bob_num_kernels); -// // let alice_kernel_nodes = alice_db -// // .fetch_mmr_nodes(MmrTree::Kernel, 0, alice_num_kernels, Some(network_tip_height)) -// // .unwrap(); -// // let bob_kernel_nodes = bob_db -// // .fetch_mmr_nodes(MmrTree::Kernel, 0, bob_num_kernels, Some(network_tip_height)) -// // .unwrap(); -// // assert_eq!(alice_kernel_nodes, bob_kernel_nodes); -// // -// // // Check Kernels -// // let alice_kernel_hashes = alice_kernel_nodes.iter().map(|n| n.0.clone()).collect::>(); -// // let bob_kernels_hashes = bob_kernel_nodes.iter().map(|n| n.0.clone()).collect::>(); -// // let alice_kernels = alice_db.fetch_kernels(alice_kernel_hashes).unwrap(); -// // let bob_kernels = bob_db.fetch_kernels(bob_kernels_hashes).unwrap(); -// // assert_eq!(alice_kernels, bob_kernels); -// // -// // // Check UTXO MMR nodes -// // let alice_num_utxos = alice_db -// // .fetch_mmr_node_count(MmrTree::Utxo, network_tip_height) -// // .unwrap(); -// // let bob_num_utxos = bob_db.fetch_mmr_node_count(MmrTree::Utxo, network_tip_height).unwrap(); -// // assert_eq!(alice_num_utxos, bob_num_utxos); -// // let alice_utxo_nodes = alice_db -// // .fetch_mmr_nodes(MmrTree::Utxo, 0, alice_num_utxos, Some(network_tip_height)) -// // .unwrap(); -// // let bob_utxo_nodes = bob_db -// // .fetch_mmr_nodes(MmrTree::Utxo, 0, bob_num_utxos, Some(network_tip_height)) -// // .unwrap(); -// // assert_eq!(alice_utxo_nodes, bob_utxo_nodes); -// // // Check UTXOs -// // let mut alice_utxos = Vec::new(); -// // for (hash, deleted) in alice_utxo_nodes { -// // if !deleted { -// // alice_utxos.push(alice_db.fetch_utxo(hash).unwrap()); -// // } -// // } -// // let mut bob_utxos = Vec::new(); -// // for (hash, deleted) in bob_utxo_nodes { -// // if !deleted { -// // bob_utxos.push(bob_db.fetch_utxo(hash).unwrap()); -// // } -// // } -// // assert_eq!(alice_utxos, bob_utxos); -// // -// // // Check RangeProof MMR nodes -// // let alice_num_rps = alice_db -// // .fetch_mmr_node_count(MmrTree::RangeProof, network_tip_height) -// // .unwrap(); -// // let bob_num_rps = bob_db -// // .fetch_mmr_node_count(MmrTree::RangeProof, network_tip_height) -// // .unwrap(); -// // assert_eq!(alice_num_rps, bob_num_rps); -// // let alice_rps_nodes = alice_db -// // .fetch_mmr_nodes(MmrTree::RangeProof, 0, alice_num_rps, Some(network_tip_height)) -// // .unwrap(); -// // let bob_rps_nodes = bob_db -// // .fetch_mmr_nodes(MmrTree::RangeProof, 0, bob_num_rps, Some(network_tip_height)) -// // .unwrap(); -// // assert_eq!(alice_rps_nodes, bob_rps_nodes); -// // -// // let block = alice_db.fetch_block(network_tip_height).unwrap(); -// // assert_eq!(block.block.header.height, network_tip_height); -// // assert_eq!(block.block.header.hash(), network_tip.best_block.unwrap()); -// unimplemented!() -// } -// -// #[test] -// fn test_pruned_mode_sync_fail_final_validation() { -// // Number of blocks to create in addition to the genesis -// const NUM_BLOCKS: u64 = 10; -// const SYNC_OFFSET: u64 = 0; -// const PRUNING_HORIZON: u64 = 4; -// let mut runtime = Runtime::new().unwrap(); -// let factories = CryptoFactories::default(); -// let temp_dir = tempdir().unwrap(); -// let network = Network::LocalNet; -// let consensus_constants = ConsensusConstantsBuilder::new(network) -// .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) -// .build(); -// let (genesis_block, _) = create_genesis_block(&factories, &consensus_constants); -// let consensus_manager = ConsensusManagerBuilder::new(network) -// .with_consensus_constants(consensus_constants) -// .with_block(genesis_block.clone()) -// .build(); -// let blockchain_db_config = BlockchainDatabaseConfig { -// orphan_storage_capacity: 3, -// pruning_horizon: PRUNING_HORIZON, -// pruning_interval: 5, -// }; -// let (alice_node, bob_node, consensus_manager) = create_network_with_2_base_nodes_with_config( -// &mut runtime, -// blockchain_db_config, -// BaseNodeServiceConfig::default(), -// MmrCacheConfig::default(), -// MempoolServiceConfig::default(), -// LivenessConfig::default(), -// consensus_manager, -// temp_dir.path().to_str().unwrap(), -// ); -// let mut horizon_sync_config = HorizonSyncConfig::default(); -// horizon_sync_config.horizon_sync_height_offset = SYNC_OFFSET; -// let state_machine_config = BaseNodeStateMachineConfig { -// block_sync_config: BlockSyncConfig::default(), -// horizon_sync_config, -// sync_peer_config: SyncPeerConfig::default(), -// }; -// let shutdown = Shutdown::new(); -// let (state_change_event_publisher, _) = broadcast::channel(10); -// let (status_event_sender, _) = watch::channel(StatusInfo::new()); -// let mut alice_state_machine = BaseNodeStateMachine::new( -// &alice_node.blockchain_db, -// &alice_node.local_nci, -// &alice_node.outbound_nci, -// alice_node.comms.connectivity(), -// alice_node.comms.peer_manager(), -// alice_node.chain_metadata_handle.get_event_stream(), -// state_machine_config, -// SyncValidators::new(MockValidator::new(true), MockValidator::new(false)), -// shutdown.to_signal(), -// status_event_sender, -// state_change_event_publisher, -// ); -// -// runtime.block_on(async { -// let alice_db = &alice_node.blockchain_db; -// let bob_db = &bob_node.blockchain_db; -// let mut prev_block = genesis_block.clone(); -// for _ in 0..NUM_BLOCKS { -// // Need coinbases for kernels and utxos -// let (block, _) = -// append_block_with_coinbase(&factories, bob_db, &prev_block, vec![], &consensus_manager, 1.into()) -// .unwrap(); -// prev_block = block; -// } -// -// // Both nodes are running in pruned mode and can not use block sync to synchronize state. Sync horizon state -// // from genesis block to horizon_sync_height and then block sync to the tip. -// let network_tip = bob_db.get_chain_metadata().unwrap(); -// assert_eq!(network_tip.effective_pruned_height(), 6); -// let mut sync_peers = vec![SyncPeer { -// node_id: bob_node.node_identity.node_id().clone(), -// chain_metadata: network_tip.clone(), -// }]; -// -// // Synchronize headers -// let state_event = HeaderSync::new(network_tip.clone(), sync_peers.clone()) -// .next_event(&mut alice_state_machine) -// .await; -// unpack_enum!(StateEvent::HeadersSynchronized(local_metadata, sync_height) = state_event); -// -// // Sync horizon state. Final state validation will fail (MockValidator::new(false)) -// assert_eq!(sync_height, NUM_BLOCKS - PRUNING_HORIZON + SYNC_OFFSET); -// let state_event = HorizonStateSync::new(local_metadata, network_tip.clone(), sync_peers.clone(), sync_height) -// .next_event(&mut alice_state_machine) -// .await; -// assert_eq!(state_event, StateEvent::HorizonStateSyncFailure); -// -// // Check the state was rolled back -// let node_count = alice_db.fetch_mmr_node_count(MmrTree::Kernel, sync_height).unwrap(); -// assert_eq!(node_count, 1); -// let node_count = alice_db.fetch_mmr_node_count(MmrTree::Utxo, sync_height).unwrap(); -// assert_eq!(node_count, 1); -// let node_count = alice_db.fetch_mmr_node_count(MmrTree::RangeProof, sync_height).unwrap(); -// assert_eq!(node_count, 1); -// -// assert!(alice_db.get_horizon_sync_state().unwrap().is_none()); -// let (state_change_event_publisher, _) = broadcast::channel(10); -// let (status_event_sender, _) = watch::channel(StatusInfo::new()); -// let mut alice_state_machine = BaseNodeStateMachine::new( -// &alice_node.blockchain_db, -// &alice_node.local_nci, -// &alice_node.outbound_nci, -// alice_node.comms.connectivity(), -// alice_node.comms.peer_manager(), -// alice_node.chain_metadata_handle.get_event_stream(), -// state_machine_config, -// SyncValidators::new(MockValidator::new(true), MockValidator::new(true)), -// shutdown.to_signal(), -// status_event_sender, -// state_change_event_publisher, -// ); -// -// // Synchronize Kernels and UTXOs -// let local_metadata = alice_db.get_chain_metadata().unwrap(); -// let state_event = HorizonStateSync::new(local_metadata, network_tip.clone(), sync_peers.clone(), sync_height) -// .next_event(&mut alice_state_machine) -// .await; -// assert_eq!(state_event, StateEvent::HorizonStateSynchronized); -// -// let alice_metadata = alice_db.get_chain_metadata().unwrap(); -// // Local height should now be at the horizon sync height -// assert_eq!(alice_metadata.height_of_longest_chain(), sync_height); -// assert_eq!(alice_metadata.effective_pruned_height(), sync_height); -// -// // Check Kernel MMR nodes after horizon sync -// let alice_num_kernels = alice_db.fetch_mmr_node_count(MmrTree::Kernel, sync_height).unwrap(); -// let bob_num_kernels = bob_db.fetch_mmr_node_count(MmrTree::Kernel, sync_height).unwrap(); -// assert_eq!(alice_num_kernels, bob_num_kernels); -// let alice_kernel_nodes = alice_db -// .fetch_mmr_nodes(MmrTree::Kernel, 0, alice_num_kernels, Some(sync_height)) -// .unwrap(); -// let bob_kernel_nodes = bob_db -// .fetch_mmr_nodes(MmrTree::Kernel, 0, bob_num_kernels, Some(sync_height)) -// .unwrap(); -// assert_eq!(alice_kernel_nodes, bob_kernel_nodes); -// -// // Synchronize full blocks -// let mut block_events = alice_node.local_nci.get_block_event_stream(); -// let state_event = BestChainMetadataBlockSync -// .next_event(&mut alice_state_machine, &network_tip, &mut sync_peers) -// .await; -// assert_eq!(state_event, StateEvent::BlocksSynchronized); -// let next_event = block_events.next().await.unwrap().unwrap(); -// unpack_enum!(BlockEvent::BlockSyncComplete(block) = &*next_event); -// assert_eq!(block.header.height, network_tip.height_of_longest_chain()); -// let alice_metadata = alice_db.get_chain_metadata().unwrap(); -// // Local height should now be at the horizon sync height -// assert_eq!( -// alice_metadata.effective_pruned_height(), -// network_tip.height_of_longest_chain() - network_tip.pruning_horizon() -// ); -// -// check_final_state(&alice_db, &bob_db); -// }); -// } diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index b0ef42dbd8..80e187a99a 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -268,10 +268,10 @@ fn test_retrieve() { txn_schema!(from: vec![outputs[1][4].clone()], to: vec![], fee: 20*uT, lock: 2, features: OutputFeatures::default()), txn_schema!(from: vec![outputs[1][5].clone()], to: vec![], fee: 20*uT, lock: 3, features: OutputFeatures::default()), // Will be time locked when a tx is added to mempool with this as an input: - txn_schema!(from: vec![outputs[1][6].clone()], to: vec![800_000*uT], fee: 60*uT, lock: 0, + txn_schema!(from: vec![outputs[1][6].clone()], to: vec![800_000*uT], fee: 60*uT, lock: 0, features: OutputFeatures::with_maturity(4)), // Will be time locked when a tx is added to mempool with this as an input: - txn_schema!(from: vec![outputs[1][7].clone()], to: vec![800_000*uT], fee: 25*uT, lock: 0, + txn_schema!(from: vec![outputs[1][7].clone()], to: vec![800_000*uT], fee: 25*uT, lock: 0, features: OutputFeatures::with_maturity(3)), ]; let (tx, utxos) = schema_to_transaction(&txs); diff --git a/base_layer/core/tests/node_comms_interface.rs b/base_layer/core/tests/node_comms_interface.rs index 7677b76860..532d102bbe 100644 --- a/base_layer/core/tests/node_comms_interface.rs +++ b/base_layer/core/tests/node_comms_interface.rs @@ -22,7 +22,6 @@ #[allow(dead_code)] mod helpers; - use futures::{channel::mpsc, StreamExt}; use helpers::block_builders::append_block; use std::sync::Arc; @@ -39,10 +38,22 @@ use tari_core::{ consensus::{ConsensusManager, NetworkConsensus}, mempool::{Mempool, MempoolConfig}, test_helpers::blockchain::{create_store_with_consensus_and_validators_and_config, create_test_blockchain_db}, - transactions::{helpers::create_utxo, tari_amount::MicroTari, types::CryptoFactories}, + transactions::{ + helpers::{create_utxo, spend_utxos}, + tari_amount::MicroTari, + transaction::{OutputFeatures, TransactionOutput, UnblindedOutput}, + types::{CryptoFactories, PublicKey}, + }, + txn_schema, validation::{mocks::MockValidator, transaction_validators::TxInputAndMaturityValidator}, }; -use tari_crypto::{script::TariScript, tari_utilities::hash::Hashable}; +use tari_crypto::{ + inputs, + keys::PublicKey as PublicKeyTrait, + script, + script::TariScript, + tari_utilities::hash::Hashable, +}; use tari_service_framework::{reply_channel, reply_channel::Receiver}; use tokio::sync::broadcast; // use crate::helpers::database::create_test_db; @@ -193,7 +204,12 @@ async fn outbound_fetch_utxos() { let (block_sender, _) = mpsc::unbounded(); let mut outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender); - let (utxo, _, _) = create_utxo(MicroTari(10_000), &factories, None, &TariScript::default()); + let (utxo, _, _) = create_utxo( + MicroTari(10_000), + &factories, + Default::default(), + &TariScript::default(), + ); let hash = utxo.hash(); let utxo_response = NodeCommsResponse::TransactionOutputs(vec![utxo.clone()]); let (received_utxos, _) = futures::join!( @@ -228,7 +244,12 @@ async fn inbound_fetch_utxos() { let utxo_1 = block.body.outputs()[0].clone(); let hash_1 = utxo_1.hash(); - let (utxo_2, _, _) = create_utxo(MicroTari(10_000), &factories, None, &TariScript::default()); + let (utxo_2, _, _) = create_utxo( + MicroTari(10_000), + &factories, + Default::default(), + &TariScript::default(), + ); let hash_2 = utxo_2.hash(); // Only retrieve a subset of the actual hashes, including a fake hash in the list @@ -250,8 +271,18 @@ async fn outbound_fetch_txos() { let (block_sender, _) = mpsc::unbounded(); let mut outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender); - let (txo1, _, _) = create_utxo(MicroTari(10_000), &factories, None, &TariScript::default()); - let (txo2, _, _) = create_utxo(MicroTari(15_000), &factories, None, &TariScript::default()); + let (txo1, _, _) = create_utxo( + MicroTari(10_000), + &factories, + Default::default(), + &TariScript::default(), + ); + let (txo2, _, _) = create_utxo( + MicroTari(15_000), + &factories, + Default::default(), + &TariScript::default(), + ); let hash1 = txo1.hash(); let hash2 = txo2.hash(); let txo_response = NodeCommsResponse::TransactionOutputs(vec![txo1.clone(), txo2.clone()]); @@ -284,22 +315,47 @@ async fn inbound_fetch_txos() { outbound_nci, ); - let (utxo, _, _) = create_utxo(MicroTari(10_000), &factories, None, &TariScript::default()); - let (stxo, _, _) = create_utxo(MicroTari(10_000), &factories, None, &TariScript::default()); + let (utxo, _, _) = create_utxo( + MicroTari(10_000), + &factories, + Default::default(), + &TariScript::default(), + ); + let (pruned_utxo, _, _) = create_utxo( + MicroTari(10_000), + &factories, + Default::default(), + &TariScript::default(), + ); + let (stxo, _, _) = create_utxo( + MicroTari(10_000), + &factories, + Default::default(), + &TariScript::default(), + ); let utxo_hash = utxo.hash(); let stxo_hash = stxo.hash(); + let pruned_utxo_hash = pruned_utxo.hash(); let block = store.fetch_block(0).unwrap().block().clone(); let header_hash = block.header.hash(); let mut txn = DbTransaction::new(); txn.insert_utxo(utxo.clone(), header_hash.clone(), block.header.height, 6000); txn.insert_utxo(stxo.clone(), header_hash.clone(), block.header.height, 6001); + txn.insert_pruned_utxo( + pruned_utxo_hash.clone(), + pruned_utxo.witness_hash(), + header_hash.clone(), + 5, + 6002, + ); assert!(store.commit(txn).is_ok()); - // let mut txn = DbTransaction::new(); - // txn.insert_input(stxo.clone().into(), header_hash.clone(), 1); - // assert!(store.commit(txn).is_ok()); if let Ok(NodeCommsResponse::TransactionOutputs(received_txos)) = inbound_nch - .handle_request(NodeCommsRequest::FetchMatchingTxos(vec![utxo_hash, stxo_hash])) + .handle_request(NodeCommsRequest::FetchMatchingTxos(vec![ + utxo_hash, + stxo_hash, + pruned_utxo_hash, + ])) .await { assert_eq!(received_txos.len(), 2); @@ -360,9 +416,9 @@ async fn inbound_fetch_blocks() { } #[tokio_macros::test] -#[ignore] // Test needs to be updated to new pruned structure. async fn inbound_fetch_blocks_before_horizon_height() { + let factories = CryptoFactories::default(); let network = Network::LocalNet; let consensus_manager = ConsensusManager::builder(network).build(); let block0 = consensus_manager.get_genesis_block(); @@ -390,8 +446,36 @@ async fn inbound_fetch_blocks_before_horizon_height() { consensus_manager.clone(), outbound_nci, ); + let script = script!(Nop); + let (utxo, key, offset) = create_utxo(MicroTari(10_000), &factories, Default::default(), &script); + let metadata_signature = TransactionOutput::create_final_metadata_signature( + &MicroTari(10_000), + &key, + &script, + &OutputFeatures::default(), + &offset, + ) + .unwrap(); + let unblinded_output = UnblindedOutput::new( + MicroTari(10_000), + key.clone(), + Default::default(), + script, + inputs!(PublicKey::from_secret_key(&key)), + key, + PublicKey::from_secret_key(&offset), + metadata_signature, + ); + let mut txn = DbTransaction::new(); + txn.insert_utxo(utxo.clone(), block0.hash().clone(), 0, 4002); + assert!(store.commit(txn).is_ok()); - let block1 = append_block(&store, &block0, vec![], &consensus_manager, 1.into()).unwrap(); + let txn = txn_schema!( + from: vec![unblinded_output], + to: vec![MicroTari(5_000), MicroTari(4_000)] + ); + let (txn, _, _) = spend_utxos(txn); + let block1 = append_block(&store, &block0, vec![txn], &consensus_manager, 1.into()).unwrap(); let block2 = append_block(&store, &block1, vec![], &consensus_manager, 1.into()).unwrap(); let block3 = append_block(&store, &block2, vec![], &consensus_manager, 1.into()).unwrap(); let block4 = append_block(&store, &block3, vec![], &consensus_manager, 1.into()).unwrap(); @@ -401,7 +485,8 @@ async fn inbound_fetch_blocks_before_horizon_height() { .handle_request(NodeCommsRequest::FetchMatchingBlocks(vec![1])) .await { - assert_eq!(received_blocks.len(), 0); + assert_eq!(received_blocks.len(), 1); + assert_eq!(received_blocks[0].pruned_outputs().len(), 1) } else { panic!(); } diff --git a/base_layer/core/tests/node_service.rs b/base_layer/core/tests/node_service.rs index a7b7c4ddc3..14f277f19b 100644 --- a/base_layer/core/tests/node_service.rs +++ b/base_layer/core/tests/node_service.rs @@ -643,7 +643,7 @@ fn local_get_new_block_template_and_get_new_block() { assert_eq!(block.header.height, 1); assert_eq!(block.body, block_template.body); - assert!(node.blockchain_db.add_block(block.clone().into()).is_ok()); + node.blockchain_db.add_block(block.clone().into()).unwrap(); node.shutdown().await; }); @@ -722,7 +722,7 @@ fn local_get_new_block_with_zero_conf() { assert_eq!(block.body, block_template.body); assert_eq!(block_template.body.kernels().len(), 5); - assert!(node.blockchain_db.add_block(block.clone().into()).is_ok()); + node.blockchain_db.add_block(block.clone().into()).unwrap(); node.shutdown().await; }); @@ -796,7 +796,7 @@ fn local_get_new_block_with_combined_transaction() { assert_eq!(block.body, block_template.body); assert_eq!(block_template.body.kernels().len(), 5); - assert!(node.blockchain_db.add_block(block.clone().into()).is_ok()); + node.blockchain_db.add_block(block.clone().into()).unwrap(); node.shutdown().await; }); diff --git a/base_layer/key_manager/Cargo.toml b/base_layer/key_manager/Cargo.toml index 84f6835036..e57d0bf8b4 100644 --- a/base_layer/key_manager/Cargo.toml +++ b/base_layer/key_manager/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet key management" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] diff --git a/base_layer/mmr/Cargo.toml b/base_layer/mmr/Cargo.toml index 26f0841d18..38b723a5f2 100644 --- a/base_layer/mmr/Cargo.toml +++ b/base_layer/mmr/Cargo.toml @@ -4,7 +4,7 @@ authors = ["The Tari Development Community"] description = "A Merkle Mountain Range implementation" repository = "https://github.com/tari-project/tari" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [features] diff --git a/base_layer/p2p/Cargo.toml b/base_layer/p2p/Cargo.toml index 91304a5f5e..e9d3a0291c 100644 --- a/base_layer/p2p/Cargo.toml +++ b/base_layer/p2p/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_p2p" -version = "0.9.1" +version = "0.9.5" authors = ["The Tari Development community"] description = "Tari base layer-specific peer-to-peer communication features" repository = "https://github.com/tari-project/tari" diff --git a/base_layer/service_framework/Cargo.toml b/base_layer/service_framework/Cargo.toml index c0e9f41b78..49e829c949 100644 --- a/base_layer/service_framework/Cargo.toml +++ b/base_layer/service_framework/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_service_framework" -version = "0.9.1" +version = "0.9.5" authors = ["The Tari Development Community"] description = "The Tari communication stack service framework" repository = "https://github.com/tari-project/tari" diff --git a/base_layer/tari_stratum_ffi/Cargo.toml b/base_layer/tari_stratum_ffi/Cargo.toml new file mode 100644 index 0000000000..6598df9f06 --- /dev/null +++ b/base_layer/tari_stratum_ffi/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "tari_stratum_ffi" +authors = ["The Tari Development Community"] +description = "Tari cryptocurrency miningcore C FFI bindings" +license = "BSD-3-Clause" +version = "0.0.1" +edition = "2018" + +[dependencies] +tari_comms = { version = "^0.9", path = "../../comms" } +tari_crypto = "^0.11.1" +tari_common = { path = "../../common" } +tari_app_grpc = { path = "../../applications/tari_app_grpc" } +tari_core = { path = "../../base_layer/core", default-features = false, features = ["transactions"]} +tari_utilities = "^0.3" +libc = "0.2.65" +thiserror = "1.0.20" +hex = "0.4.2" +serde = { version="1.0.106", features = ["derive"] } +serde_json = "1.0.57" + +[lib] +crate-type = ["staticlib","cdylib"] diff --git a/base_layer/tari_stratum_ffi/src/error.rs b/base_layer/tari_stratum_ffi/src/error.rs new file mode 100644 index 0000000000..00d0a39bfe --- /dev/null +++ b/base_layer/tari_stratum_ffi/src/error.rs @@ -0,0 +1,87 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +use tari_crypto::tari_utilities::hex::HexError; +use thiserror::Error; + +#[derive(Debug, Error, PartialEq)] +pub enum InterfaceError { + #[error("An error has occurred due to one of the parameters being null: `{0}`")] + NullError(String), + #[error("An error has occurred due to conversion failing for: `{0}`")] + ConversionError(String), + #[error("An error has occurred due to validation failing for: `{0}`")] + InvalidHashError(String), + #[error("An error has occurred due to difficulty being too low for share: `{0}`")] + LowDifficultyError(String), +} + +/// This struct is meant to hold an error for use by Miningcore. The error has an integer code and string +/// message +#[derive(Debug, Clone)] +pub struct StratumTranscoderError { + pub code: i32, + pub message: String, +} + +impl From for StratumTranscoderError { + fn from(v: InterfaceError) -> Self { + match v { + InterfaceError::NullError(_) => Self { + code: 1, + message: format!("{:?}", v), + }, + InterfaceError::ConversionError(_) => Self { + code: 2, + message: format!("{:?}", v), + }, + InterfaceError::InvalidHashError(_) => Self { + code: 3, + message: format!("{:?}", v), + }, + InterfaceError::LowDifficultyError(_) => Self { + code: 4, + message: format!("{:?}", v), + }, + } + } +} + +/// This implementation maps the internal HexError to a set of StratumTranscoderErrors. +/// The mapping is explicitly managed here. +impl From for StratumTranscoderError { + fn from(h: HexError) -> Self { + match h { + HexError::HexConversionError => Self { + code: 404, + message: format!("{:?}", h), + }, + HexError::LengthError => Self { + code: 501, + message: format!("{:?}", h), + }, + HexError::InvalidCharacter(_) => Self { + code: 503, + message: format!("{:?}", h), + }, + } + } +} diff --git a/base_layer/tari_stratum_ffi/src/lib.rs b/base_layer/tari_stratum_ffi/src/lib.rs new file mode 100644 index 0000000000..33e523f39d --- /dev/null +++ b/base_layer/tari_stratum_ffi/src/lib.rs @@ -0,0 +1,409 @@ +// Copyright 2021. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +#![cfg_attr(not(debug_assertions), deny(unused_variables))] +#![cfg_attr(not(debug_assertions), deny(unused_imports))] +#![cfg_attr(not(debug_assertions), deny(dead_code))] +#![cfg_attr(not(debug_assertions), deny(unused_extern_crates))] +#![deny(unused_must_use)] +#![deny(unreachable_patterns)] +#![deny(unknown_lints)] + +mod error; + +use crate::error::{InterfaceError, StratumTranscoderError}; +use core::ptr; +use libc::{c_char, c_int, c_ulonglong}; +use std::ffi::CString; +use tari_core::{ + blocks::Block, + crypto::tari_utilities::{message_format::MessageFormat, Hashable}, + proof_of_work::{sha3_difficulty, Difficulty}, +}; +use tari_crypto::tari_utilities::hex::Hex; +pub type TariPublicKey = tari_comms::types::CommsPublicKey; + +/// Validates a hex string is convertible into a TariPublicKey +/// +/// ## Arguments +/// `hex` - The hex formatted cstring to be validated +/// +/// ## Returns +/// `bool` - Returns true/false +/// `error_out` - Error code returned, 0 means no error +/// +/// # Safety +/// None +#[no_mangle] +pub unsafe extern "C" fn public_key_hex_validate(hex: *const c_char, error_out: *mut c_int) -> bool { + let mut error = 0; + ptr::swap(error_out, &mut error as *mut c_int); + let native; + + if hex.is_null() { + error = StratumTranscoderError::from(InterfaceError::NullError("hex".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return false; + } else { + native = CString::from_raw(hex as *mut i8).to_str().unwrap().to_owned(); + } + let pk = TariPublicKey::from_hex(&native); + match pk { + Ok(_pk) => true, + Err(e) => { + error = StratumTranscoderError::from(e).code; + ptr::swap(error_out, &mut error as *mut c_int); + false + }, + } +} + +/// Injects a nonce into a blocktemplate +/// +/// ## Arguments +/// `hex` - The hex formatted cstring +/// `nonce` - The nonce to be injected +/// +/// ## Returns +/// `c_char` - The updated hex formatted cstring or null on error +/// `error_out` - Error code returned, 0 means no error +/// +/// # Safety +/// None +#[no_mangle] +pub unsafe extern "C" fn inject_nonce(hex: *const c_char, nonce: c_ulonglong, error_out: *mut c_int) -> *const c_char { + let mut error = 0; + ptr::swap(error_out, &mut error as *mut c_int); + let native; + + if hex.is_null() { + error = StratumTranscoderError::from(InterfaceError::NullError("hex".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + ptr::null() + } else { + native = CString::from_raw(hex as *mut i8).to_str().unwrap().to_owned(); + let block_hex = hex::decode(native); + match block_hex { + Ok(block_hex) => { + let block: Result = + serde_json::from_str(&String::from_utf8_lossy(&block_hex).to_string()); + match block { + Ok(mut block) => { + block.header.nonce = nonce; + let block_json = block.to_json().unwrap(); + let block_hex = hex::encode(block_json); + let result = CString::new(block_hex).unwrap(); + CString::into_raw(result) + }, + Err(_) => { + error = StratumTranscoderError::from(InterfaceError::ConversionError("block".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + ptr::null() + }, + } + }, + Err(_) => { + error = StratumTranscoderError::from(InterfaceError::ConversionError("hex".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + ptr::null() + }, + } + } +} + +/// Returns the difficulty of a share +/// +/// ## Arguments +/// `hex` - The hex formatted cstring to be validated +/// +/// ## Returns +/// `c_ulonglong` - Difficulty, 0 on error +/// `error_out` - Error code returned, 0 means no error +/// +/// # Safety +/// None +#[no_mangle] +pub unsafe extern "C" fn share_difficulty(hex: *const c_char, error_out: *mut c_int) -> c_ulonglong { + let mut error = 0; + ptr::swap(error_out, &mut error as *mut c_int); + let block_hex_string; + + if hex.is_null() { + error = StratumTranscoderError::from(InterfaceError::NullError("hex".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 0; + } else { + block_hex_string = CString::from_raw(hex as *mut i8).to_str().unwrap().to_owned(); + } + + let block_hex = hex::decode(block_hex_string); + match block_hex { + Ok(block_hex) => { + let block: Result = + serde_json::from_str(&String::from_utf8_lossy(&block_hex).to_string()); + match block { + Ok(block) => { + let difficulty = sha3_difficulty(&block.header); + difficulty.as_u64() + }, + Err(_) => { + error = StratumTranscoderError::from(InterfaceError::ConversionError("block".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + } + }, + Err(_) => { + error = StratumTranscoderError::from(InterfaceError::ConversionError("hex".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 0 + }, + } +} + +/// Validates a share submission +/// +/// ## Arguments +/// `hex` - The hex representation of the share to be validated +/// `hash` - The hash of the share to be validated +/// `nonce` - The nonce for the share to be validated +/// `stratum_difficulty` - The stratum difficulty to be checked against (meeting this means that the share is valid for +/// payout) `template_difficulty` - The difficulty to be checked against (meeting this means the share is also a block +/// to be submitted to the chain) +/// +/// ## Returns +/// `c_uint` - Returns one of the following: +/// 0: Valid Block +/// 1: Valid Share +/// 2: Invalid Share +/// `error_out` - Error code returned, 0 means no error +/// +/// # Safety +/// None +#[no_mangle] +pub unsafe extern "C" fn share_validate( + hex: *const c_char, + hash: *const c_char, + stratum_difficulty: c_ulonglong, + template_difficulty: c_ulonglong, + error_out: *mut c_int, +) -> c_int { + let mut error = 0; + ptr::swap(error_out, &mut error as *mut c_int); + let block_hex_string; + let block_hash_string; + + if hex.is_null() { + error = StratumTranscoderError::from(InterfaceError::NullError("hex".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 2; + } else { + block_hex_string = CString::from_raw(hex as *mut i8).to_str().unwrap().to_owned(); + } + + if hash.is_null() { + error = StratumTranscoderError::from(InterfaceError::NullError("hash".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return 2; + } else { + block_hash_string = CString::from_raw(hash as *mut i8).to_str().unwrap().to_owned(); + } + + let block_hex = hex::decode(block_hex_string); + match block_hex { + Ok(block_hex) => { + let block: Result = + serde_json::from_str(&String::from_utf8_lossy(&block_hex).to_string()); + match block { + Ok(block) => { + if block.header.hash().to_hex() == block_hash_string { + // Hash submitted by miner is the same hash produced for the nonce submitted by miner + let mut result = 2; + let difficulty = sha3_difficulty(&block.header); + if difficulty >= Difficulty::from(template_difficulty) { + // Valid block + result = 0; + } else if difficulty >= Difficulty::from(stratum_difficulty) { + // Valid share + result = 1; + } else { + // Difficulty not reached + error = StratumTranscoderError::from(InterfaceError::LowDifficultyError(block_hash_string)) + .code; + ptr::swap(error_out, &mut error as *mut c_int); + } + result + } else { + error = StratumTranscoderError::from(InterfaceError::InvalidHashError(block_hash_string)).code; + ptr::swap(error_out, &mut error as *mut c_int); + 2 + } + }, + Err(_) => { + error = StratumTranscoderError::from(InterfaceError::ConversionError("block".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 2 + }, + } + }, + Err(_) => { + error = StratumTranscoderError::from(InterfaceError::ConversionError("hex".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + 2 + }, + } +} + +#[cfg(test)] +mod tests { + use crate::{inject_nonce, public_key_hex_validate, share_difficulty, share_validate}; + use libc::{c_char, c_int}; + use std::{ffi::CString, str}; + + const BLOCK_HEX: &str = "7b22686561646572223a7b2276657273696f6e223a312c22686569676874223a343333382c22707265765f68617368223a2237663665626130376432373964366464316566656263376564346431386163396436666564663366613536303131363835636361326465336562656232633266222c2274696d657374616d70223a313632363138353739372c226f75747075745f6d72223a2237336230306466393130353263383831343061393765613831343138396239356335313634303662633434323238666562393262326563333238386534366564222c227769746e6573735f6d72223a2236326665643734633863633531633032363338356638626434663330326638306263353034393635656363363930393033646565623765613836303331376531222c226f75747075745f6d6d725f73697a65223a3130303439382c226b65726e656c5f6d72223a2263653233656430623561663938323236653936353533636631616539646538346230333432363665316164366435623231383531356431306663613930393132222c226b65726e656c5f6d6d725f73697a65223a32303438332c22696e7075745f6d72223a2232363134366135343335656631356538636637646333333534636237323638313337653862653231313739346539336430343535313537366336353631353635222c22746f74616c5f6b65726e656c5f6f6666736574223a2230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030222c22746f74616c5f7363726970745f6f6666736574223a2230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030222c226e6f6e6365223a302c22706f77223a7b22706f775f616c676f223a2253686133222c22706f775f64617461223a5b5d7d7d2c22626f6479223a7b22736f72746564223a66616c73652c22696e70757473223a5b5d2c226f757470757473223a5b7b226665617475726573223a7b22666c616773223a7b2262697473223a317d2c226d61747572697479223a343334347d2c22636f6d6d69746d656e74223a2264656138316332663165353461336465323035363764656665623335613730643530666165343730313438626532666335316134303330666335653764373036222c2270726f6f66223a22336339643164353032653165313637656132366336383538373931666138653836373833303062616334656264616635386261333435376566333735666432393138663566393034306638623534616363313264373031383463336234333362643236663161316234356335313739653233366535633434636665613336333362323764633031356663643266306139333861393864326633363164623466386231656335333466306262636135626661373731663838373430313764323132356331373839333437316235633462313665626262346165616137313434636666653332326361613438613436326436626462343661373534613132616336333532333365656530353463366337343766623132353436636664646561323562346365336230643364333332653763396363376137646365646334383662306533313866333132376233313735306336346634653533666339393239366666383365306332633232636235333464396262613533316562393364626433613034386635626431366563643239613939636630623436386133616332666233393439363735303964623033316332666363616636613831653030303766353330356563623730613638653537393166356462396237376162313634393434626430396665356439393564666337633933663865316435306639643362616639363330653164303737643565323237356436343834323833656461313163373139393330343637363037643761306631633561613139386463343331633736643732653436303736663030313738633466363535313432366161653437633263656263386165373932393966313732303163626261313837396565616238346637636430303737353639643864323933306437623464363261303337313765613731376632386363616366633438636135643665383037313239306234386132343736616430663562623039633762303930376231616533623133653262653136643531613465303832386364393366353734336534323939303835613936663032356338656633383436623430633634386563633733666431643065633535376166313632376362626538626639643430333232303833336138353633343337316334666639663636363663313239303436616263323939633633643064313532626437306464303336306265396339383961396133643930653639613031366164633064663937373664323661343434303237633033623263303639643438613031383762313365643236386430366530313961363733663163643636613436623838333335663663313562363566663232383737346334383536653564323466336465363633633636333739663639376162323039323537326265663434346363306361366433396562383732616538363765373536356131626539613731396231613130613833363937656133333666333438613033373864613365373036303534356434323233396138313438303632303564306466376138663961613438633834383362353432663862303564346330626235333039363534373032306137663366316362333137633733346532373866303232396234396263333635666539373935393730613662666163326462626537633337616436666337373266323038613463333637653634333030663963623136363332643034346333626436386237613939383830663533336630346465613030633761343637303035613261316432313766343261323935623264393565646664393632346463636535343432653763393039663661333834363036346466643765373538303066222c22736372697074223a223733222c2273656e6465725f6f66667365745f7075626c69635f6b6579223a2264616133376465323133323038636462323237623431666435313830643530306130643138356462346565353461646666643033386436346233386136353764222c226d657461646174615f7369676e6174757265223a7b227075626c69635f6e6f6e6365223a2261306565623636383862613363313331616565343538363435396662336533323463303537316535656639643937316462303461313331643061636435343331222c2275223a2262383633666563386336396361313136393166383363656165633531653839393833613235363334666563306438383035326232363066383862313835353032222c2276223a2264396535323238346662393536666665343837636238376538353666373837343939356366616162393034373264376432616537616539623431373537393032227d7d5d2c226b65726e656c73223a5b7b226665617475726573223a7b2262697473223a317d2c22666565223a302c226c6f636b5f686569676874223a302c22657863657373223a2263366263386263643162623836353964666664356537363634653263363265646333383639333566396230633033333130353265383836623235623264373465222c226578636573735f736967223a7b227075626c69635f6e6f6e6365223a2236326264336539663631643362633031323738386130373134633461666134353332383136663562616664613138303465623963643333616536356538323465222c227369676e6174757265223a2234643662323666383433623837623737393734343233613764656563303365663933653930326563633131393734303837646264643234333362643936363061227d7d5d7d7d"; + const HASH_HEX: &str = "3a9ea717ca7b2598d900e2ef98c270ac98ce993bce8a9e058929967ba37fbc6b"; + const NONCE: u64 = 15810795621223647638; + + #[test] + fn check_difficulty() { + // Difficulty 20025 + unsafe { + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let block_hex = CString::new(BLOCK_HEX).unwrap(); + let block_hex_ptr: *const c_char = CString::into_raw(block_hex) as *const c_char; + let block_hex_ptr2 = inject_nonce(block_hex_ptr, NONCE, error_ptr); + let result = share_difficulty(block_hex_ptr2, error_ptr); + assert_eq!(result, 20025); + } + } + + #[test] + fn check_invalid_share() { + // Difficulty 20025 + unsafe { + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let block_hex = CString::new(BLOCK_HEX).unwrap(); + let hash_hex = CString::new(HASH_HEX).unwrap(); + let block_hex_ptr: *const c_char = CString::into_raw(block_hex) as *const c_char; + let hash_hex_ptr: *const c_char = CString::into_raw(hash_hex) as *const c_char; + let template_difficulty = 30000; + let stratum_difficulty = 22200; + let block_hex_ptr2 = inject_nonce(block_hex_ptr, NONCE, error_ptr); + let result = share_validate( + block_hex_ptr2, + hash_hex_ptr, + stratum_difficulty, + template_difficulty, + error_ptr, + ); + assert_eq!(result, 2); + assert_eq!(error, 4); + } + } + + #[test] + fn check_valid_share() { + // Difficulty 20025 + unsafe { + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let block_hex = CString::new(BLOCK_HEX).unwrap(); + let hash_hex = CString::new(HASH_HEX).unwrap(); + let block_hex_ptr: *const c_char = CString::into_raw(block_hex) as *const c_char; + let hash_hex_ptr: *const c_char = CString::into_raw(hash_hex) as *const c_char; + let template_difficulty = 30000; + let stratum_difficulty = 20000; + let block_hex_ptr2 = inject_nonce(block_hex_ptr, NONCE, error_ptr); + let result = share_validate( + block_hex_ptr2, + hash_hex_ptr, + stratum_difficulty, + template_difficulty, + error_ptr, + ); + assert_eq!(result, 1); + assert_eq!(error, 0); + } + } + + #[test] + fn check_valid_block() { + // Difficulty 20025 + unsafe { + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let block_hex = CString::new(BLOCK_HEX).unwrap(); + let hash_hex = CString::new(HASH_HEX).unwrap(); + let block_hex_ptr: *const c_char = CString::into_raw(block_hex) as *const c_char; + let hash_hex_ptr: *const c_char = CString::into_raw(hash_hex) as *const c_char; + let template_difficulty = 20000; + let stratum_difficulty = 15000; + let block_hex_ptr2 = inject_nonce(block_hex_ptr, NONCE, error_ptr); + let result = share_validate( + block_hex_ptr2, + hash_hex_ptr, + stratum_difficulty, + template_difficulty, + error_ptr, + ); + assert_eq!(result, 0); + assert_eq!(error, 0); + } + } + + #[test] + fn check_valid_address() { + unsafe { + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let test_pk = CString::new("5ce83bf62521629ca185098ac24c7b02b184c2e0a2b01455f3a5957d5df94126").unwrap(); + let test_pk_ptr: *const c_char = CString::into_raw(test_pk) as *const c_char; + let success = public_key_hex_validate(test_pk_ptr, error_ptr); + assert_eq!(error, 0); + assert!(success); + } + } + + #[test] + fn check_invalid_address() { + unsafe { + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let test_pk = CString::new("5fe83bf62521629ca185098ac24c7b02b184c2e0a2b01455f3a5957d5df94126").unwrap(); + let test_pk_ptr: *const c_char = CString::into_raw(test_pk) as *const c_char; + let success = public_key_hex_validate(test_pk_ptr, error_ptr); + assert!(!success); + assert_ne!(error, 0); + } + unsafe { + let mut error = -1; + let error_ptr = &mut error as *mut c_int; + let test_pk = CString::new("5fe83bf62521629ca185098ac24c7b02b184c2e0a2b01455f3a5957d5d").unwrap(); + let test_pk_ptr: *const c_char = CString::into_raw(test_pk) as *const c_char; + let success = public_key_hex_validate(test_pk_ptr, error_ptr); + assert!(!success); + assert_ne!(error, 0); + } + } +} diff --git a/base_layer/wallet/Cargo.toml b/base_layer/wallet/Cargo.toml index 3b7ab37105..c2cf12ca22 100644 --- a/base_layer/wallet/Cargo.toml +++ b/base_layer/wallet/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_wallet" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet library" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] @@ -16,7 +16,6 @@ tari_p2p = { version = "^0.9", path = "../p2p" } tari_service_framework = { version = "^0.9", path = "../service_framework"} tari_shutdown = { version = "^0.9", path = "../../infrastructure/shutdown" } tari_storage = { version = "^0.9", path = "../../infrastructure/storage"} -tari_test_utils = { version = "^0.9", path = "../../infrastructure/test_utils", optional = true} aes-gcm = "^0.8" blake2 = "0.9.0" @@ -30,7 +29,7 @@ fs2 = "0.3.0" futures = { version = "^0.3.1", features =["compat", "std"]} lazy_static = "1.4.0" log = "0.4.6" -log4rs = {version = "0.8.3", features = ["console_appender", "file_appender", "file", "yaml_format"]} +log4rs = {version = "1.0.0", features = ["console_appender", "file_appender", "yaml_format"]} lmdb-zero = "0.4.4" rand = "0.8" serde = {version = "1.0.89", features = ["derive"] } @@ -58,6 +57,5 @@ prost = "0.6.1" tokio-macros = "0.2.4" [features] -test_harness = ["tari_test_utils"] c_integration = [] avx2 = ["tari_crypto/avx2", "tari_core/avx2"] diff --git a/base_layer/wallet/src/base_node_service/config.rs b/base_layer/wallet/src/base_node_service/config.rs index 8140f10a7f..e7468a809e 100644 --- a/base_layer/wallet/src/base_node_service/config.rs +++ b/base_layer/wallet/src/base_node_service/config.rs @@ -28,29 +28,37 @@ const LOG_TARGET: &str = "wallet::base_node_service::config"; #[derive(Clone, Debug)] pub struct BaseNodeServiceConfig { pub base_node_monitor_refresh_interval: Duration, + pub base_node_rpc_pool_size: usize, pub request_max_age: Duration, + pub event_channel_size: usize, } impl Default for BaseNodeServiceConfig { fn default() -> Self { Self { base_node_monitor_refresh_interval: Duration::from_secs(5), + base_node_rpc_pool_size: 10, request_max_age: Duration::from_secs(60), + event_channel_size: 250, } } } impl BaseNodeServiceConfig { - pub fn new(refresh_interval: u64, request_max_age: u64) -> Self { + pub fn new(refresh_interval: u64, request_max_age: u64, event_channel_size: usize) -> Self { info!( target: LOG_TARGET, - "Setting new wallet base node service config, refresh interval: {}s, request max age: {}s", + "Setting new wallet base node service config, refresh interval: {}s, request max age: {}s, event channel \ + size : {}", refresh_interval, - request_max_age + request_max_age, + event_channel_size ); Self { base_node_monitor_refresh_interval: Duration::from_secs(refresh_interval), request_max_age: Duration::from_secs(request_max_age), + event_channel_size, + ..Default::default() } } } diff --git a/base_layer/wallet/src/base_node_service/error.rs b/base_layer/wallet/src/base_node_service/error.rs index 65ab411667..e737663f1f 100644 --- a/base_layer/wallet/src/base_node_service/error.rs +++ b/base_layer/wallet/src/base_node_service/error.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::error::WalletStorageError; +use crate::{connectivity_service::WalletConnectivityError, error::WalletStorageError}; use tari_comms::{connectivity::ConnectivityError, protocol::rpc::RpcError}; use tari_comms_dht::outbound::DhtOutboundError; use tari_service_framework::reply_channel::TransportChannelError; @@ -46,4 +46,6 @@ pub enum BaseNodeServiceError { InvalidBaseNodeResponse(String), #[error("Wallet storage error: `{0}`")] WalletStorageError(#[from] WalletStorageError), + #[error("Wallet connectivity error: `{0}`")] + WalletConnectivityError(#[from] WalletConnectivityError), } diff --git a/base_layer/wallet/src/base_node_service/handle.rs b/base_layer/wallet/src/base_node_service/handle.rs index 0ba5ae0032..4957823c72 100644 --- a/base_layer/wallet/src/base_node_service/handle.rs +++ b/base_layer/wallet/src/base_node_service/handle.rs @@ -22,11 +22,9 @@ use super::{error::BaseNodeServiceError, service::BaseNodeState}; use futures::{stream::Fuse, StreamExt}; -use std::sync::Arc; -use tari_comms::peer_manager::Peer; - -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use tari_common_types::chain_metadata::ChainMetadata; +use tari_comms::peer_manager::Peer; use tari_service_framework::reply_channel::SenderService; use tokio::sync::broadcast; use tower::Service; diff --git a/base_layer/wallet/src/base_node_service/mock_base_node_service.rs b/base_layer/wallet/src/base_node_service/mock_base_node_service.rs index e226602247..1bc57ed9d2 100644 --- a/base_layer/wallet/src/base_node_service/mock_base_node_service.rs +++ b/base_layer/wallet/src/base_node_service/mock_base_node_service.rs @@ -20,10 +20,13 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::base_node_service::{ - error::BaseNodeServiceError, - handle::{BaseNodeServiceRequest, BaseNodeServiceResponse}, - service::{BaseNodeState, OnlineState}, +use crate::{ + base_node_service::{ + error::BaseNodeServiceError, + handle::{BaseNodeServiceRequest, BaseNodeServiceResponse}, + service::BaseNodeState, + }, + connectivity_service::OnlineStatus, }; use futures::StreamExt; use tari_common_types::chain_metadata::ChainMetadata; @@ -81,9 +84,9 @@ impl MockBaseNodeService { let (chain_metadata, is_synced, online) = match height { Some(height) => { let metadata = ChainMetadata::new(height, Vec::new(), 0, 0, 0); - (Some(metadata), Some(true), OnlineState::Online) + (Some(metadata), Some(true), OnlineStatus::Online) }, - None => (None, None, OnlineState::Offline), + None => (None, None, OnlineStatus::Offline), }; self.state = BaseNodeState { chain_metadata, @@ -91,7 +94,6 @@ impl MockBaseNodeService { updated: None, latency: None, online, - base_node_peer: self.state.base_node_peer.clone(), } } @@ -102,13 +104,12 @@ impl MockBaseNodeService { is_synced: Some(true), updated: None, latency: None, - online: OnlineState::Online, - base_node_peer: None, + online: OnlineStatus::Online, } } fn set_base_node_peer(&mut self, peer: Peer) { - self.state.base_node_peer = Some(peer); + self.base_node_peer = Some(peer); } /// This handler is called when requests arrive from the various streams @@ -122,7 +123,7 @@ impl MockBaseNodeService { Ok(BaseNodeServiceResponse::BaseNodePeerSet) }, BaseNodeServiceRequest::GetBaseNodePeer => { - let peer = self.state.base_node_peer.clone(); + let peer = self.base_node_peer.clone(); Ok(BaseNodeServiceResponse::BaseNodePeer(peer.map(Box::new))) }, BaseNodeServiceRequest::GetChainMetadata => Ok(BaseNodeServiceResponse::ChainMetadata( diff --git a/base_layer/wallet/src/base_node_service/mod.rs b/base_layer/wallet/src/base_node_service/mod.rs index 3c067dd712..94ff82aaf6 100644 --- a/base_layer/wallet/src/base_node_service/mod.rs +++ b/base_layer/wallet/src/base_node_service/mod.rs @@ -30,10 +30,10 @@ mod monitor; use crate::{ base_node_service::{config::BaseNodeServiceConfig, handle::BaseNodeServiceHandle, service::BaseNodeService}, + connectivity_service::WalletConnectivityHandle, storage::database::{WalletBackend, WalletDatabase}, }; use log::*; -use tari_comms::connectivity::ConnectivityRequester; use tari_service_framework::{ async_trait, reply_channel, @@ -69,7 +69,7 @@ where T: WalletBackend + 'static let (sender, request_stream) = reply_channel::unbounded(); - let (event_publisher, _) = broadcast::channel(200); + let (event_publisher, _) = broadcast::channel(self.config.event_channel_size); let basenode_service_handle = BaseNodeServiceHandle::new(sender, event_publisher.clone()); @@ -80,12 +80,12 @@ where T: WalletBackend + 'static let db = self.db.clone(); context.spawn_when_ready(move |handles| async move { - let connectivity_manager = handles.expect_handle::(); + let wallet_connectivity = handles.expect_handle::(); let service = BaseNodeService::new( config, request_stream, - connectivity_manager, + wallet_connectivity, event_publisher, handles.get_shutdown_signal(), db, diff --git a/base_layer/wallet/src/base_node_service/monitor.rs b/base_layer/wallet/src/base_node_service/monitor.rs index 201a27e01b..5a2c3a7e76 100644 --- a/base_layer/wallet/src/base_node_service/monitor.rs +++ b/base_layer/wallet/src/base_node_service/monitor.rs @@ -23,29 +23,18 @@ use crate::{ base_node_service::{ handle::{BaseNodeEvent, BaseNodeEventSender}, - service::{BaseNodeState, OnlineState}, + service::BaseNodeState, }, + connectivity_service::{OnlineStatus, WalletConnectivityHandle}, error::WalletStorageError, storage::database::{WalletBackend, WalletDatabase}, }; use chrono::Utc; -use futures::{future, future::Either}; use log::*; use std::{convert::TryFrom, sync::Arc, time::Duration}; use tari_common_types::chain_metadata::ChainMetadata; -use tari_comms::{ - connectivity::{ConnectivityError, ConnectivityRequester}, - peer_manager::NodeId, - protocol::rpc::RpcError, - PeerConnection, -}; -use tari_core::base_node::rpc::BaseNodeWalletRpcClient; -use tari_shutdown::ShutdownSignal; -use tokio::{ - stream::StreamExt, - sync::{broadcast, RwLock}, - time, -}; +use tari_comms::{peer_manager::NodeId, protocol::rpc::RpcError}; +use tokio::{sync::RwLock, time}; const LOG_TARGET: &str = "wallet::base_node_service::chain_metadata_monitor"; @@ -53,9 +42,8 @@ pub struct BaseNodeMonitor { interval: Duration, state: Arc>, db: WalletDatabase, - connectivity_manager: ConnectivityRequester, + wallet_connectivity: WalletConnectivityHandle, event_publisher: BaseNodeEventSender, - shutdown_signal: ShutdownSignal, } impl BaseNodeMonitor { @@ -63,24 +51,22 @@ impl BaseNodeMonitor { interval: Duration, state: Arc>, db: WalletDatabase, - connectivity_manager: ConnectivityRequester, + wallet_connectivity: WalletConnectivityHandle, event_publisher: BaseNodeEventSender, - shutdown_signal: ShutdownSignal, ) -> Self { Self { interval, state, db, - connectivity_manager, + wallet_connectivity, event_publisher, - shutdown_signal, } } pub async fn run(mut self) { loop { trace!(target: LOG_TARGET, "Beginning new base node monitoring round"); - match self.process().await { + match self.monitor_node().await { Ok(_) => continue, Err(BaseNodeMonitorError::NodeShuttingDown) => { debug!( @@ -90,34 +76,16 @@ impl BaseNodeMonitor { ); break; }, - Err(e @ BaseNodeMonitorError::RpcFailed(_)) | Err(e @ BaseNodeMonitorError::DialFailed(_)) => { - debug!(target: LOG_TARGET, "Connectivity failure to base node: {}", e,); - debug!( - target: LOG_TARGET, - "Setting as OFFLINE and retrying after {:.2?}", self.interval - ); + Err(e @ BaseNodeMonitorError::RpcFailed(_)) => { + warn!(target: LOG_TARGET, "Connectivity failure to base node: {}", e); + debug!(target: LOG_TARGET, "Setting as OFFLINE and retrying...",); self.set_offline().await; - if self.sleep_or_shutdown().await.is_err() { - break; - } - continue; - }, - Err(BaseNodeMonitorError::BaseNodeChanged) => { - debug!( - target: LOG_TARGET, - "Base node has changed. Connecting to new base node...", - ); - - self.set_connecting().await; continue; }, Err(e @ BaseNodeMonitorError::InvalidBaseNodeResponse(_)) | Err(e @ BaseNodeMonitorError::WalletStorageError(_)) => { error!(target: LOG_TARGET, "{}", e); - if self.sleep_or_shutdown().await.is_err() { - break; - } continue; }, } @@ -128,79 +96,35 @@ impl BaseNodeMonitor { ); } - async fn process(&mut self) -> Result<(), BaseNodeMonitorError> { - let peer = self.wait_for_peer_to_be_set().await?; - let connection = self.attempt_dial(peer.clone()).await?; - debug!( - target: LOG_TARGET, - "Base node connected. Establishing RPC connection...", - ); - let client = self.connect_client(connection).await?; - debug!(target: LOG_TARGET, "RPC established",); - self.monitor_node(peer, client).await?; - Ok(()) - } - - async fn wait_for_peer_to_be_set(&mut self) -> Result { - // We aren't worried about late subscription here because we also check the state for a set base node peer, as - // long as we subscribe before checking state. - let mut event_subscription = self.event_publisher.subscribe(); + async fn update_connectivity_status(&self) -> NodeId { + let mut watcher = self.wallet_connectivity.get_connectivity_status_watch(); loop { - let peer = self - .state - .read() - .await - .base_node_peer - .as_ref() - .map(|p| p.node_id.clone()); - - match peer { - Some(peer) => return Ok(peer), - None => { - trace!(target: LOG_TARGET, "Base node peer not set yet. Waiting for event"); - let either = future::select(event_subscription.next(), &mut self.shutdown_signal).await; - match either { - Either::Left((Some(Ok(_)), _)) | - Either::Left((Some(Err(broadcast::RecvError::Lagged(_))), _)) => { - trace!(target: LOG_TARGET, "Base node monitor got event"); - // If we get any event (or some were missed), let's check base node peer has been set - continue; - }, - // All of these indicate that the node has been shut down - Either::Left((Some(Err(broadcast::RecvError::Closed)), _)) | - Either::Left((None, _)) | - Either::Right((_, _)) => return Err(BaseNodeMonitorError::NodeShuttingDown), - } + use OnlineStatus::*; + match watcher.recv().await.unwrap_or(Offline) { + Online => match self.wallet_connectivity.get_current_base_node_id() { + Some(node_id) => return node_id, + _ => continue, + }, + Connecting => { + self.set_connecting().await; + }, + Offline => { + self.set_offline().await; }, } } } - async fn attempt_dial(&mut self, peer: NodeId) -> Result { - let conn = self.connectivity_manager.dial_peer(peer).await?; - Ok(conn) - } - - async fn connect_client(&self, mut conn: PeerConnection) -> Result { - let client = conn.connect_rpc().await?; - Ok(client) - } - - async fn monitor_node( - &self, - peer_node_id: NodeId, - mut client: BaseNodeWalletRpcClient, - ) -> Result<(), BaseNodeMonitorError> { + async fn monitor_node(&mut self) -> Result<(), BaseNodeMonitorError> { loop { - let latency = client.get_last_request_latency().await?; - trace!( - target: LOG_TARGET, - "Base node latency: {} ms", - latency.unwrap_or_default().as_millis() - ); + let peer_node_id = self.update_connectivity_status().await; + let mut client = self + .wallet_connectivity + .obtain_base_node_wallet_rpc_client() + .await + .ok_or(BaseNodeMonitorError::NodeShuttingDown)?; let tip_info = client.get_tip_info().await?; - let is_synced = tip_info.is_synced; let chain_metadata = tip_info .metadata @@ -209,20 +133,29 @@ impl BaseNodeMonitor { ChainMetadata::try_from(metadata).map_err(BaseNodeMonitorError::InvalidBaseNodeResponse) })?; + let latency = client.ping().await?; + let is_synced = tip_info.is_synced; + debug!( + target: LOG_TARGET, + "Base node {} Tip: {} ({}) Latency: {} ms", + peer_node_id, + chain_metadata.height_of_longest_chain(), + if is_synced { "Synced" } else { "Syncing..." }, + latency.as_millis() + ); + self.db.set_chain_metadata(chain_metadata.clone()).await?; - self.map_state(move |state| BaseNodeState { + self.map_state(move |_| BaseNodeState { chain_metadata: Some(chain_metadata), is_synced: Some(is_synced), updated: Some(Utc::now().naive_utc()), - latency, - online: OnlineState::Online, - base_node_peer: state.base_node_peer.clone(), + latency: Some(latency), + online: OnlineStatus::Online, }) .await; - self.sleep_or_shutdown().await?; - self.check_if_base_node_changed(&peer_node_id).await?; + time::delay_for(self.interval).await } // loop only exits on shutdown/error @@ -230,43 +163,24 @@ impl BaseNodeMonitor { Ok(()) } - async fn check_if_base_node_changed(&self, peer_node_id: &NodeId) -> Result<(), BaseNodeMonitorError> { - // Check if the base node peer is no longer set or has changed - if self - .state - .read() - .await - .base_node_peer - .as_ref() - .filter(|p| &p.node_id == peer_node_id) - .is_some() - { - Ok(()) - } else { - Err(BaseNodeMonitorError::BaseNodeChanged) - } - } - async fn set_connecting(&self) { - self.map_state(|state| BaseNodeState { + self.map_state(|_| BaseNodeState { chain_metadata: None, is_synced: None, updated: Some(Utc::now().naive_utc()), latency: None, - online: OnlineState::Connecting, - base_node_peer: state.base_node_peer.clone(), + online: OnlineStatus::Connecting, }) .await; } async fn set_offline(&self) { - self.map_state(|state| BaseNodeState { + self.map_state(|_| BaseNodeState { chain_metadata: None, is_synced: None, updated: Some(Utc::now().naive_utc()), latency: None, - online: OnlineState::Offline, - base_node_peer: state.base_node_peer.clone(), + online: OnlineStatus::Offline, }) .await; } @@ -282,23 +196,8 @@ impl BaseNodeMonitor { self.publish_event(BaseNodeEvent::BaseNodeStateChanged(new_state)); } - async fn sleep_or_shutdown(&self) -> Result<(), BaseNodeMonitorError> { - let delay = time::delay_for(self.interval); - let mut shutdown_signal = self.shutdown_signal.clone(); - if let Either::Right(_) = future::select(delay, &mut shutdown_signal).await { - return Err(BaseNodeMonitorError::NodeShuttingDown); - } - Ok(()) - } - fn publish_event(&self, event: BaseNodeEvent) { - trace!(target: LOG_TARGET, "Publishing event: {:?}", event); - let _ = self.event_publisher.send(Arc::new(event)).map_err(|_| { - trace!( - target: LOG_TARGET, - "Could not publish BaseNodeEvent as there are no subscribers" - ) - }); + let _ = self.event_publisher.send(Arc::new(event)); } } @@ -306,14 +205,10 @@ impl BaseNodeMonitor { enum BaseNodeMonitorError { #[error("Node is shutting down")] NodeShuttingDown, - #[error("Failed to dial base node: {0}")] - DialFailed(#[from] ConnectivityError), #[error("Rpc error: {0}")] RpcFailed(#[from] RpcError), #[error("Invalid base node response: {0}")] InvalidBaseNodeResponse(String), #[error("Wallet storage error: {0}")] WalletStorageError(#[from] WalletStorageError), - #[error("Base node changed")] - BaseNodeChanged, } diff --git a/base_layer/wallet/src/base_node_service/service.rs b/base_layer/wallet/src/base_node_service/service.rs index 279186cee4..3da987c8b1 100644 --- a/base_layer/wallet/src/base_node_service/service.rs +++ b/base_layer/wallet/src/base_node_service/service.rs @@ -27,14 +27,15 @@ use super::{ }; use crate::{ base_node_service::monitor::BaseNodeMonitor, + connectivity_service::{OnlineStatus, WalletConnectivityHandle}, storage::database::{WalletBackend, WalletDatabase}, }; use chrono::NaiveDateTime; -use futures::StreamExt; +use futures::{future, StreamExt}; use log::*; use std::{sync::Arc, time::Duration}; use tari_common_types::chain_metadata::ChainMetadata; -use tari_comms::{connectivity::ConnectivityRequester, peer_manager::Peer}; +use tari_comms::peer_manager::Peer; use tari_service_framework::reply_channel::Receiver; use tari_shutdown::ShutdownSignal; use tokio::sync::RwLock; @@ -48,16 +49,8 @@ pub struct BaseNodeState { pub is_synced: Option, pub updated: Option, pub latency: Option, - pub online: OnlineState, - pub base_node_peer: Option, -} - -/// Connection state of the Base Node -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum OnlineState { - Connecting, - Online, - Offline, + pub online: OnlineStatus, + // pub base_node_peer: Option, } impl Default for BaseNodeState { @@ -67,8 +60,7 @@ impl Default for BaseNodeState { is_synced: None, updated: None, latency: None, - online: OnlineState::Connecting, - base_node_peer: None, + online: OnlineStatus::Connecting, } } } @@ -79,7 +71,7 @@ where T: WalletBackend + 'static { config: BaseNodeServiceConfig, request_stream: Option>>, - connectivity_manager: ConnectivityRequester, + wallet_connectivity: WalletConnectivityHandle, event_publisher: BaseNodeEventSender, shutdown_signal: Option, state: Arc>, @@ -92,7 +84,7 @@ where T: WalletBackend + 'static pub fn new( config: BaseNodeServiceConfig, request_stream: Receiver>, - connectivity_manager: ConnectivityRequester, + wallet_connectivity: WalletConnectivityHandle, event_publisher: BaseNodeEventSender, shutdown_signal: ShutdownSignal, db: WalletDatabase, @@ -100,7 +92,7 @@ where T: WalletBackend + 'static Self { config, request_stream: Some(request_stream), - connectivity_manager, + wallet_connectivity, event_publisher, shutdown_signal: Some(shutdown_signal), state: Default::default(), @@ -124,12 +116,18 @@ where T: WalletBackend + 'static self.config.base_node_monitor_refresh_interval, self.state.clone(), self.db.clone(), - self.connectivity_manager.clone(), + self.wallet_connectivity.clone(), self.event_publisher.clone(), - shutdown_signal.clone(), ); - tokio::spawn(monitor.run()); + tokio::spawn({ + let shutdown_signal = shutdown_signal.clone(); + async move { + let monitor_fut = monitor.run(); + futures::pin_mut!(monitor_fut); + future::select(shutdown_signal, monitor_fut).await; + } + }); let mut request_stream = self .request_stream @@ -158,19 +156,10 @@ where T: WalletBackend + 'static Ok(()) } - async fn set_base_node_peer(&self, peer: Peer) { - let new_state = BaseNodeState { - base_node_peer: Some(peer.clone()), - ..Default::default() - }; - - { - let mut lock = self.state.write().await; - *lock = new_state.clone(); - }; - - self.publish_event(BaseNodeEvent::BaseNodeStateChanged(new_state)); + async fn set_base_node_peer(&mut self, peer: Peer) -> Result<(), BaseNodeServiceError> { + self.wallet_connectivity.set_base_node(peer.clone()).await?; self.publish_event(BaseNodeEvent::BaseNodePeerSet(Box::new(peer))); + Ok(()) } /// This handler is called when requests arrive from the various streams @@ -184,11 +173,11 @@ where T: WalletBackend + 'static ); match request { BaseNodeServiceRequest::SetBaseNodePeer(peer) => { - self.set_base_node_peer(*peer).await; + self.set_base_node_peer(*peer).await?; Ok(BaseNodeServiceResponse::BaseNodePeerSet) }, BaseNodeServiceRequest::GetBaseNodePeer => { - let peer = self.get_state().await.base_node_peer.map(Box::new); + let peer = self.wallet_connectivity.get_current_base_node_peer().map(Box::new); Ok(BaseNodeServiceResponse::BaseNodePeer(peer)) }, BaseNodeServiceRequest::GetChainMetadata => match self.get_state().await.chain_metadata.clone() { diff --git a/comms/src/protocol/messaging/consts.rs b/base_layer/wallet/src/connectivity_service/error.rs similarity index 62% rename from comms/src/protocol/messaging/consts.rs rename to base_layer/wallet/src/connectivity_service/error.rs index 483512a39f..a151003faf 100644 --- a/comms/src/protocol/messaging/consts.rs +++ b/base_layer/wallet/src/connectivity_service/error.rs @@ -1,4 +1,4 @@ -// Copyright 2020, The Tari Project +// Copyright 2021, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: @@ -20,14 +20,26 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -/// Buffer size for inbound messages from _all_ peers. This should be large enough to buffer quite a few incoming -/// messages before creating backpressure on peers speaking the messaging protocol. -pub const INBOUND_MESSAGE_BUFFER_SIZE: usize = 100; -/// Buffer size notifications that a peer wants to speak /tari/messaging. This buffer is used for all peers, but a low -/// value is ok because this events happen once (or less) per connecting peer. For e.g. a value of 10 would allow 10 -/// peers to concurrently request to speak /tari/messaging. -pub const MESSAGING_PROTOCOL_EVENTS_BUFFER_SIZE: usize = 10; +use futures::channel::{mpsc, oneshot}; +use tari_comms::connectivity::ConnectivityError; -/// Buffer size for requests to the messaging protocol. All outbound messages will be sent along this channel. Some -/// buffering may be required if the node needs to send many messages out at the same time. -pub const MESSAGING_REQUEST_BUFFER_SIZE: usize = 50; +#[derive(Debug, thiserror::Error)] +pub enum WalletConnectivityError { + #[error("Base node has not been set")] + BaseNodeNotSet, + #[error("Connectivity error: {0}")] + ConnectivityError(#[from] ConnectivityError), + #[error("Service is terminated and can no longer response to requests")] + ServiceTerminated, +} + +impl From for WalletConnectivityError { + fn from(_: mpsc::SendError) -> Self { + WalletConnectivityError::ServiceTerminated + } +} +impl From for WalletConnectivityError { + fn from(_: oneshot::Canceled) -> Self { + WalletConnectivityError::ServiceTerminated + } +} diff --git a/base_layer/wallet/src/connectivity_service/handle.rs b/base_layer/wallet/src/connectivity_service/handle.rs new file mode 100644 index 0000000000..ac218edc5e --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/handle.rs @@ -0,0 +1,120 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use super::service::OnlineStatus; +use crate::connectivity_service::{error::WalletConnectivityError, watch::Watch}; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, +}; +use tari_comms::{ + peer_manager::{NodeId, Peer}, + protocol::rpc::RpcClientLease, +}; +use tari_core::base_node::{rpc::BaseNodeWalletRpcClient, sync::rpc::BaseNodeSyncRpcClient}; +use tokio::sync::watch; + +pub enum WalletConnectivityRequest { + ObtainBaseNodeWalletRpcClient(oneshot::Sender>), + ObtainBaseNodeSyncRpcClient(oneshot::Sender>), +} + +#[derive(Clone)] +pub struct WalletConnectivityHandle { + sender: mpsc::Sender, + base_node_watch: Watch>, + online_status_rx: watch::Receiver, +} + +impl WalletConnectivityHandle { + pub(super) fn new( + sender: mpsc::Sender, + base_node_watch: Watch>, + online_status_rx: watch::Receiver, + ) -> Self { + Self { + sender, + base_node_watch, + online_status_rx, + } + } + + pub async fn set_base_node(&mut self, base_node_peer: Peer) -> Result<(), WalletConnectivityError> { + self.base_node_watch.broadcast(Some(base_node_peer)); + Ok(()) + } + + /// Obtain a BaseNodeWalletRpcClient. + /// + /// This can be relied on to obtain a pooled BaseNodeWalletRpcClient rpc session from a currently selected base + /// node/nodes. It will block until this happens. The ONLY other time it will return is if the node is + /// shutting down, where it will return None. Use this function whenever no work can be done without a + /// BaseNodeWalletRpcClient RPC session. + pub async fn obtain_base_node_wallet_rpc_client(&mut self) -> Option> { + let (reply_tx, reply_rx) = oneshot::channel(); + // Under what conditions do the (1) mpsc channel and (2) oneshot channel error? + // (1) when the receiver has been dropped + // (2) when the sender has been dropped + // When can this happen? + // Only when the service is shutdown (or there is a bug in the service that should be fixed) + // None is returned in these cases, which we say means that you will never ever get a client connection + // because the node is shutting down. + self.sender + .send(WalletConnectivityRequest::ObtainBaseNodeWalletRpcClient(reply_tx)) + .await + .ok()?; + + reply_rx.await.ok() + } + + /// Obtain a BaseNodeSyncRpcClient. + /// + /// This can be relied on to obtain a pooled BaseNodeSyncRpcClient rpc session from a currently selected base + /// node/nodes. It will block until this happens. The ONLY other time it will return is if the node is + /// shutting down, where it will return None. Use this function whenever no work can be done without a + /// BaseNodeSyncRpcClient RPC session. + pub async fn obtain_base_node_sync_rpc_client(&mut self) -> Option> { + let (reply_tx, reply_rx) = oneshot::channel(); + self.sender + .send(WalletConnectivityRequest::ObtainBaseNodeSyncRpcClient(reply_tx)) + .await + .ok()?; + + reply_rx.await.ok() + } + + pub async fn get_connectivity_status(&mut self) -> OnlineStatus { + self.online_status_rx.recv().await.unwrap_or(OnlineStatus::Offline) + } + + pub fn get_connectivity_status_watch(&self) -> watch::Receiver { + self.online_status_rx.clone() + } + + pub fn get_current_base_node_peer(&self) -> Option { + self.base_node_watch.borrow().clone() + } + + pub fn get_current_base_node_id(&self) -> Option { + self.base_node_watch.borrow().as_ref().map(|p| p.node_id.clone()) + } +} diff --git a/base_layer/wallet/src/connectivity_service/initializer.rs b/base_layer/wallet/src/connectivity_service/initializer.rs new file mode 100644 index 0000000000..d0c2b94126 --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/initializer.rs @@ -0,0 +1,69 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use super::{handle::WalletConnectivityHandle, service::WalletConnectivityService, watch::Watch}; +use crate::{base_node_service::config::BaseNodeServiceConfig, connectivity_service::service::OnlineStatus}; +use futures::channel::mpsc; +use tari_service_framework::{async_trait, ServiceInitializationError, ServiceInitializer, ServiceInitializerContext}; + +pub struct WalletConnectivityInitializer { + config: BaseNodeServiceConfig, +} + +impl WalletConnectivityInitializer { + pub fn new(config: BaseNodeServiceConfig) -> Self { + Self { config } + } +} + +#[async_trait] +impl ServiceInitializer for WalletConnectivityInitializer { + async fn initialize(&mut self, context: ServiceInitializerContext) -> Result<(), ServiceInitializationError> { + let (sender, receiver) = mpsc::channel(5); + let base_node_watch = Watch::new(None); + let online_status_watch = Watch::new(OnlineStatus::Offline); + context.register_handle(WalletConnectivityHandle::new( + sender, + base_node_watch.clone(), + online_status_watch.get_receiver(), + )); + + let config = self.config.clone(); + + context.spawn_until_shutdown(move |handles| { + let connectivity = handles.expect_handle(); + let service = + WalletConnectivityService::new(config, receiver, base_node_watch, online_status_watch, connectivity); + service.start() + }); + + Ok(()) + } +} diff --git a/base_layer/wallet/src/connectivity_service/mod.rs b/base_layer/wallet/src/connectivity_service/mod.rs new file mode 100644 index 0000000000..035bd34d64 --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/mod.rs @@ -0,0 +1,38 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +mod error; +pub use error::WalletConnectivityError; + +mod handle; +pub use handle::WalletConnectivityHandle; + +mod initializer; +pub use initializer::WalletConnectivityInitializer; + +mod service; +pub use service::OnlineStatus; + +mod watch; + +#[cfg(test)] +mod test; diff --git a/base_layer/wallet/src/connectivity_service/service.rs b/base_layer/wallet/src/connectivity_service/service.rs new file mode 100644 index 0000000000..c0cf474b96 --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/service.rs @@ -0,0 +1,325 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + base_node_service::config::BaseNodeServiceConfig, + connectivity_service::{error::WalletConnectivityError, handle::WalletConnectivityRequest, watch::Watch}, +}; +use core::mem; +use futures::{ + channel::{mpsc, oneshot}, + future, + future::Either, + stream::Fuse, + StreamExt, +}; +use log::*; +use tari_comms::{ + connectivity::ConnectivityRequester, + peer_manager::{NodeId, Peer}, + protocol::rpc::{RpcClientLease, RpcClientPool}, + PeerConnection, +}; +use tari_core::base_node::{rpc::BaseNodeWalletRpcClient, sync::rpc::BaseNodeSyncRpcClient}; +use tokio::time; + +const LOG_TARGET: &str = "wallet::connectivity"; + +/// Connection status of the Base Node +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum OnlineStatus { + Connecting, + Online, + Offline, +} + +pub struct WalletConnectivityService { + config: BaseNodeServiceConfig, + request_stream: Fuse>, + connectivity: ConnectivityRequester, + base_node_watch: Watch>, + pools: Option, + online_status_watch: Watch, + pending_requests: Vec, +} + +struct ClientPoolContainer { + pub base_node_wallet_rpc_client: RpcClientPool, + pub base_node_sync_rpc_client: RpcClientPool, +} + +impl WalletConnectivityService { + pub(super) fn new( + config: BaseNodeServiceConfig, + request_stream: mpsc::Receiver, + base_node_watch: Watch>, + online_status_watch: Watch, + connectivity: ConnectivityRequester, + ) -> Self { + Self { + config, + request_stream: request_stream.fuse(), + connectivity, + base_node_watch, + pools: None, + pending_requests: Vec::new(), + online_status_watch, + } + } + + pub async fn start(mut self) { + debug!(target: LOG_TARGET, "Wallet connectivity service has started."); + let mut base_node_watch_rx = self.base_node_watch.get_receiver().fuse(); + loop { + futures::select! { + req = self.request_stream.select_next_some() => { + self.handle_request(req).await; + }, + maybe_peer = base_node_watch_rx.select_next_some() => { + if maybe_peer.is_some() { + // This will block the rest until the connection is established. This is what we want. + self.setup_base_node_connection().await; + } + } + } + } + } + + async fn handle_request(&mut self, request: WalletConnectivityRequest) { + use WalletConnectivityRequest::*; + match request { + ObtainBaseNodeWalletRpcClient(reply) => { + self.handle_pool_request(reply.into()).await; + }, + ObtainBaseNodeSyncRpcClient(reply) => { + self.handle_pool_request(reply.into()).await; + }, + } + } + + async fn handle_pool_request(&mut self, reply: ReplyOneshot) { + use ReplyOneshot::*; + match reply { + WalletRpc(tx) => self.handle_get_base_node_wallet_rpc_client(tx).await, + SyncRpc(tx) => self.handle_get_base_node_sync_rpc_client(tx).await, + } + } + + async fn handle_get_base_node_wallet_rpc_client( + &mut self, + reply: oneshot::Sender>, + ) { + match self.pools { + Some(ref pools) => match pools.base_node_wallet_rpc_client.get().await { + Ok(client) => { + let _ = reply.send(client); + }, + Err(e) => { + warn!( + target: LOG_TARGET, + "Base node connection failed: {}. Reconnecting...", e + ); + self.trigger_reconnect(); + self.pending_requests.push(reply.into()); + }, + }, + None => { + self.pending_requests.push(reply.into()); + if self.base_node_watch.borrow().is_none() { + warn!( + target: LOG_TARGET, + "{} requests are waiting for base node to be set", + self.pending_requests.len() + ); + } + }, + } + } + + async fn handle_get_base_node_sync_rpc_client( + &mut self, + reply: oneshot::Sender>, + ) { + match self.pools { + Some(ref pools) => match pools.base_node_sync_rpc_client.get().await { + Ok(client) => { + let _ = reply.send(client); + }, + Err(e) => { + warn!( + target: LOG_TARGET, + "Base node connection failed: {}. Reconnecting...", e + ); + self.trigger_reconnect(); + self.pending_requests.push(reply.into()); + }, + }, + None => { + self.pending_requests.push(reply.into()); + if self.base_node_watch.borrow().is_none() { + warn!( + target: LOG_TARGET, + "{} requests are waiting for base node to be set", + self.pending_requests.len() + ); + } + }, + } + } + + fn trigger_reconnect(&mut self) { + let peer = self + .base_node_watch + .borrow() + .clone() + .expect("trigger_reconnect called before base node is set"); + // Trigger the watch so that a peer connection is reinitiated + self.set_base_node_peer(peer); + } + + fn set_base_node_peer(&mut self, peer: Peer) { + self.pools = None; + self.base_node_watch.broadcast(Some(peer)); + } + + fn current_base_node(&self) -> Option { + self.base_node_watch.borrow().as_ref().map(|p| p.node_id.clone()) + } + + async fn setup_base_node_connection(&mut self) { + self.pools = None; + loop { + let node_id = match self.current_base_node() { + Some(n) => n, + None => return, + }; + debug!( + target: LOG_TARGET, + "Attempting to connect to base node peer {}...", node_id + ); + self.set_online_status(OnlineStatus::Connecting); + match self.try_setup_rpc_pool(node_id.clone()).await { + Ok(true) => { + self.set_online_status(OnlineStatus::Online); + debug!( + target: LOG_TARGET, + "Wallet is ONLINE and connected to base node {}", node_id + ); + break; + }, + Ok(false) => { + // Retry with updated peer + continue; + }, + Err(e) => { + if self.current_base_node() != Some(node_id) { + self.set_online_status(OnlineStatus::Connecting); + } else { + self.set_online_status(OnlineStatus::Offline); + } + error!(target: LOG_TARGET, "{}", e); + time::delay_for(self.config.base_node_monitor_refresh_interval).await; + continue; + }, + } + } + } + + fn set_online_status(&self, status: OnlineStatus) { + let _ = self.online_status_watch.broadcast(status); + } + + async fn try_setup_rpc_pool(&mut self, peer: NodeId) -> Result { + self.connectivity.add_managed_peers(vec![peer.clone()]).await?; + let conn = match self.try_dial_peer(peer).await? { + Some(peer) => peer, + None => return Ok(false), + }; + debug!( + target: LOG_TARGET, + "Successfully established peer connection to base node {}", + conn.peer_node_id() + ); + self.pools = Some(ClientPoolContainer { + base_node_sync_rpc_client: conn + .create_rpc_client_pool(self.config.base_node_rpc_pool_size, Default::default()), + base_node_wallet_rpc_client: conn + .create_rpc_client_pool(self.config.base_node_rpc_pool_size, Default::default()), + }); + self.notify_pending_requests().await?; + debug!( + target: LOG_TARGET, + "Successfully established RPC connection {}", + conn.peer_node_id() + ); + Ok(true) + } + + async fn try_dial_peer(&mut self, peer: NodeId) -> Result, WalletConnectivityError> { + let recv_fut = self.base_node_watch.recv(); + futures::pin_mut!(recv_fut); + let dial_fut = self.connectivity.dial_peer(peer); + futures::pin_mut!(dial_fut); + match future::select(recv_fut, dial_fut).await { + Either::Left(_) => Ok(None), + Either::Right((conn, _)) => Ok(Some(conn?)), + } + } + + async fn notify_pending_requests(&mut self) -> Result<(), WalletConnectivityError> { + let current_pending = mem::take(&mut self.pending_requests); + for reply in current_pending { + if reply.is_canceled() { + continue; + } + + self.handle_pool_request(reply).await; + } + Ok(()) + } +} + +enum ReplyOneshot { + WalletRpc(oneshot::Sender>), + SyncRpc(oneshot::Sender>), +} + +impl ReplyOneshot { + pub fn is_canceled(&self) -> bool { + use ReplyOneshot::*; + match self { + WalletRpc(tx) => tx.is_canceled(), + SyncRpc(tx) => tx.is_canceled(), + } + } +} + +impl From>> for ReplyOneshot { + fn from(tx: oneshot::Sender>) -> Self { + ReplyOneshot::WalletRpc(tx) + } +} +impl From>> for ReplyOneshot { + fn from(tx: oneshot::Sender>) -> Self { + ReplyOneshot::SyncRpc(tx) + } +} diff --git a/base_layer/wallet/src/connectivity_service/test.rs b/base_layer/wallet/src/connectivity_service/test.rs new file mode 100644 index 0000000000..7c24ef5b46 --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/test.rs @@ -0,0 +1,235 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use super::service::WalletConnectivityService; +use crate::connectivity_service::{watch::Watch, OnlineStatus, WalletConnectivityHandle}; +use core::convert; +use futures::{channel::mpsc, future}; +use std::{iter, sync::Arc}; +use tari_comms::{ + peer_manager::PeerFeatures, + protocol::rpc::{ + mock::{MockRpcImpl, MockRpcServer}, + RpcPoolClient, + }, + test_utils::{ + mocks::{create_connectivity_mock, ConnectivityManagerMockState}, + node_identity::build_node_identity, + }, + Substream, +}; +use tari_shutdown::Shutdown; +use tari_test_utils::runtime::spawn_until_shutdown; +use tokio::{sync::Barrier, task}; + +async fn setup() -> ( + WalletConnectivityHandle, + MockRpcServer, + ConnectivityManagerMockState, + Shutdown, +) { + let (tx, rx) = mpsc::channel(1); + let base_node_watch = Watch::new(None); + let online_status_watch = Watch::new(OnlineStatus::Offline); + let handle = WalletConnectivityHandle::new(tx, base_node_watch.clone(), online_status_watch.get_receiver()); + let (connectivity, mock) = create_connectivity_mock(); + let mock_state = mock.spawn(); + // let peer_manager = create_peer_manager(tempdir().unwrap()); + let service = WalletConnectivityService::new( + Default::default(), + rx, + base_node_watch, + online_status_watch, + connectivity, + ); + let shutdown = spawn_until_shutdown(service.start()); + + let mock_svc = MockRpcImpl::new(); + let mut mock_server = MockRpcServer::new(mock_svc, build_node_identity(PeerFeatures::COMMUNICATION_NODE)); + mock_server.serve(); + + (handle, mock_server, mock_state, shutdown) +} + +#[tokio_macros::test] +async fn it_dials_peer_when_base_node_is_set() { + let (mut handle, mock_server, mock_state, _shutdown) = setup().await; + let base_node_peer = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + let conn = mock_server.create_mockimpl_connection(base_node_peer.to_peer()).await; + + // Set the mock to defer returning a result for the peer connection + mock_state.set_pending_connection(base_node_peer.node_id()).await; + // Initiate a connection to the base node + handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + + // Wait for connection request + mock_state.await_call_count(1).await; + mock_state.expect_dial_peer(base_node_peer.node_id()).await; + + // Now a connection will given to the service + mock_state.add_active_connection(conn).await; + + let rpc_client = handle.obtain_base_node_wallet_rpc_client().await.unwrap(); + assert!(rpc_client.is_connected()); +} + +#[tokio_macros::test] +async fn it_resolves_many_pending_rpc_session_requests() { + let (mut handle, mock_server, mock_state, _shutdown) = setup().await; + let base_node_peer = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + let conn = mock_server.create_mockimpl_connection(base_node_peer.to_peer()).await; + + // Set the mock to defer returning a result for the peer connection + mock_state.set_pending_connection(base_node_peer.node_id()).await; + + // Initiate a connection to the base node + handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + + let pending_requests = iter::repeat_with(|| { + let mut handle = handle.clone(); + task::spawn(async move { + let rpc_client = handle.obtain_base_node_wallet_rpc_client().await.unwrap(); + rpc_client.is_connected() + }) + }) + .take(10) + // Eagerly call `obtain_base_node_rpc_client` + .collect::>(); + + // Now a connection will given to the service + mock_state.add_active_connection(conn).await; + + let results = future::join_all(pending_requests).await; + assert!(results.into_iter().map(Result::unwrap).all(convert::identity)); +} + +#[tokio_macros::test] +async fn it_changes_to_a_new_base_node() { + let (mut handle, mock_server, mock_state, _shutdown) = setup().await; + let base_node_peer1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + let conn1 = mock_server.create_mockimpl_connection(base_node_peer1.to_peer()).await; + let base_node_peer2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + let conn2 = mock_server.create_mockimpl_connection(base_node_peer2.to_peer()).await; + + mock_state.add_active_connection(conn1).await; + mock_state.add_active_connection(conn2).await; + + // Initiate a connection to the base node + handle.set_base_node(base_node_peer1.to_peer()).await.unwrap(); + + mock_state.await_call_count(2).await; + mock_state.expect_dial_peer(base_node_peer1.node_id()).await; + assert_eq!(mock_state.count_calls_containing("AddManagedPeer").await, 2); + let _ = mock_state.take_calls().await; + + let rpc_client = handle.obtain_base_node_wallet_rpc_client().await.unwrap(); + assert!(rpc_client.is_connected()); + + // Initiate a connection to the base node + handle.set_base_node(base_node_peer2.to_peer()).await.unwrap(); + + mock_state.await_call_count(2).await; + mock_state.expect_dial_peer(base_node_peer2.node_id()).await; + assert_eq!(mock_state.count_calls_containing("AddManagedPeer").await, 2); + + let rpc_client = handle.obtain_base_node_wallet_rpc_client().await.unwrap(); + assert!(rpc_client.is_connected()); +} + +#[tokio_macros::test] +async fn it_gracefully_handles_connect_fail_reconnect() { + let (mut handle, mock_server, mock_state, _shutdown) = setup().await; + let base_node_peer = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + let mut conn = mock_server.create_mockimpl_connection(base_node_peer.to_peer()).await; + + // Set the mock to defer returning a result for the peer connection + mock_state.set_pending_connection(base_node_peer.node_id()).await; + + // Initiate a connection to the base node + handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + + // Now a connection will given to the service + mock_state.add_active_connection(conn.clone()).await; + // Empty out all the calls + let _ = mock_state.take_calls().await; + conn.disconnect().await.unwrap(); + + let barrier = Arc::new(Barrier::new(2)); + let pending_request = task::spawn({ + let mut handle = handle.clone(); + let barrier = barrier.clone(); + async move { + barrier.wait().await; + let rpc_client = handle.obtain_base_node_wallet_rpc_client().await.unwrap(); + assert!(rpc_client.is_connected()); + } + }); + + mock_state.await_call_count(2).await; + mock_state.expect_dial_peer(base_node_peer.node_id()).await; + + // Make sure that the task has actually started before continuing, otherwise we may not be testing the client asking + // for a client session before a connection is made + barrier.wait().await; + + // "Establish" a new connections + let conn = mock_server.create_mockimpl_connection(base_node_peer.to_peer()).await; + mock_state.add_active_connection(conn).await; + + pending_request.await.unwrap(); +} + +#[tokio_macros::test] +async fn it_gracefully_handles_multiple_connection_failures() { + let (mut handle, mock_server, mock_state, _shutdown) = setup().await; + let base_node_peer = build_node_identity(PeerFeatures::COMMUNICATION_NODE); + let conn = mock_server.create_mockimpl_connection(base_node_peer.to_peer()).await; + + // Initiate a connection to the base node + handle.set_base_node(base_node_peer.to_peer()).await.unwrap(); + + // Now a connection will given to the service + mock_state.add_active_connection(conn.clone()).await; + let barrier = Arc::new(Barrier::new(2)); + + let pending_request = task::spawn({ + let mut handle = handle.clone(); + let barrier = barrier.clone(); + async move { + barrier.wait().await; + let rpc_client = handle.obtain_base_node_wallet_rpc_client().await.unwrap(); + assert!(rpc_client.is_connected()); + } + }); + + mock_state.await_call_count(2).await; + mock_state.expect_dial_peer(base_node_peer.node_id()).await; + + barrier.wait().await; + + // Peer has failed up until this point, but finally the base node "comes online" + let conn = mock_server.create_mockimpl_connection(base_node_peer.to_peer()).await; + mock_state.add_active_connection(conn).await; + + // Still able to get a base node rpc client + pending_request.await.unwrap(); +} diff --git a/base_layer/wallet/src/connectivity_service/watch.rs b/base_layer/wallet/src/connectivity_service/watch.rs new file mode 100644 index 0000000000..1f1e868d47 --- /dev/null +++ b/base_layer/wallet/src/connectivity_service/watch.rs @@ -0,0 +1,67 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::sync::Arc; +use tokio::sync::watch; + +#[derive(Clone)] +pub struct Watch(Arc>, watch::Receiver); + +impl Watch { + pub fn new(initial: T) -> Self { + let (tx, rx) = watch::channel(initial); + Self(Arc::new(tx), rx) + } + + #[allow(dead_code)] + pub async fn recv(&mut self) -> Option { + self.receiver_mut().recv().await + } + + pub fn borrow(&self) -> watch::Ref<'_, T> { + self.receiver().borrow() + } + + pub fn broadcast(&self, item: T) { + // SAFETY: broadcast becomes infallible because the receiver is owned in Watch and so has the same lifetime + if self.sender().broadcast(item).is_err() { + // Result::expect requires E: fmt::Debug and `watch::SendError` is not, this is equivalent + panic!("watch internal receiver is dropped"); + } + } + + fn sender(&self) -> &watch::Sender { + &self.0 + } + + fn receiver_mut(&mut self) -> &mut watch::Receiver { + &mut self.1 + } + + pub fn receiver(&self) -> &watch::Receiver { + &self.1 + } + + pub fn get_receiver(&self) -> watch::Receiver { + self.receiver().clone() + } +} diff --git a/base_layer/wallet/src/lib.rs b/base_layer/wallet/src/lib.rs index 53e06b041a..bc0b4c1a04 100644 --- a/base_layer/wallet/src/lib.rs +++ b/base_layer/wallet/src/lib.rs @@ -6,11 +6,11 @@ #![deny(unreachable_patterns)] #![deny(unknown_lints)] #![recursion_limit = "2048"] -#![feature(drain_filter)] #[macro_use] mod macros; pub mod base_node_service; +pub mod connectivity_service; pub mod contacts_service; pub mod error; pub mod output_manager_service; @@ -21,9 +21,6 @@ pub mod types; pub mod util; pub mod wallet; -#[cfg(feature = "test_harness")] -pub mod testnet_utils; - #[macro_use] extern crate diesel; #[macro_use] diff --git a/base_layer/wallet/src/output_manager_service/config.rs b/base_layer/wallet/src/output_manager_service/config.rs index fb5158e5ce..bd44a02761 100644 --- a/base_layer/wallet/src/output_manager_service/config.rs +++ b/base_layer/wallet/src/output_manager_service/config.rs @@ -30,6 +30,8 @@ pub struct OutputManagerServiceConfig { pub prevent_fee_gt_amount: bool, pub peer_dial_retry_timeout: Duration, pub seed_word_language: MnemonicLanguage, + pub event_channel_size: usize, + pub base_node_update_publisher_channel_size: usize, } impl Default for OutputManagerServiceConfig { @@ -40,6 +42,8 @@ impl Default for OutputManagerServiceConfig { prevent_fee_gt_amount: true, peer_dial_retry_timeout: Duration::from_secs(20), seed_word_language: MnemonicLanguage::English, + event_channel_size: 250, + base_node_update_publisher_channel_size: 50, } } } diff --git a/base_layer/wallet/src/output_manager_service/handle.rs b/base_layer/wallet/src/output_manager_service/handle.rs index 8b4ae008f7..659fab4a42 100644 --- a/base_layer/wallet/src/output_manager_service/handle.rs +++ b/base_layer/wallet/src/output_manager_service/handle.rs @@ -57,8 +57,8 @@ pub enum OutputManagerRequest { GetCoinbaseTransaction((u64, MicroTari, MicroTari, u64)), ConfirmPendingTransaction(u64), ConfirmTransaction((u64, Vec, Vec)), - PrepareToSendTransaction((MicroTari, MicroTari, Option, String, TariScript)), - CreatePayToSelfTransaction((MicroTari, MicroTari, Option, String)), + PrepareToSendTransaction((TxId, MicroTari, MicroTari, Option, String, TariScript)), + CreatePayToSelfTransaction((TxId, MicroTari, MicroTari, Option, String)), CancelTransaction(u64), TimeoutTransactions(Duration), GetPendingTransactions, @@ -76,6 +76,7 @@ pub enum OutputManagerRequest { ScanForRecoverableOutputs(Vec), ScanOutputs(Vec), AddKnownOneSidedPaymentScript(KnownOneSidedPaymentScript), + ReinstateCancelledInboundTx(TxId), } impl fmt::Display for OutputManagerRequest { @@ -95,8 +96,8 @@ impl fmt::Display for OutputManagerRequest { GetRecipientTransaction(_) => write!(f, "GetRecipientTransaction"), ConfirmTransaction(v) => write!(f, "ConfirmTransaction ({})", v.0), ConfirmPendingTransaction(v) => write!(f, "ConfirmPendingTransaction ({})", v), - PrepareToSendTransaction((_, _, _, msg, _)) => write!(f, "PrepareToSendTransaction ({})", msg), - CreatePayToSelfTransaction((_, _, _, msg)) => write!(f, "CreatePayToSelfTransaction ({})", msg), + PrepareToSendTransaction((_, _, _, _, msg, _)) => write!(f, "PrepareToSendTransaction ({})", msg), + CreatePayToSelfTransaction((_, _, _, _, msg)) => write!(f, "CreatePayToSelfTransaction ({})", msg), CancelTransaction(v) => write!(f, "CancelTransaction ({})", v), TimeoutTransactions(d) => write!(f, "TimeoutTransactions ({}s)", d.as_secs()), GetPendingTransactions => write!(f, "GetPendingTransactions"), @@ -113,8 +114,9 @@ impl fmt::Display for OutputManagerRequest { GetPublicRewindKeys => write!(f, "GetPublicRewindKeys"), FeeEstimate(_) => write!(f, "FeeEstimate"), ScanForRecoverableOutputs(_) => write!(f, "ScanForRecoverableOutputs"), - ScanOutputs(_) => write!(f, "ScanRewindAndImportOutputs"), + ScanOutputs(_) => write!(f, "ScanOutputs"), AddKnownOneSidedPaymentScript(_) => write!(f, "AddKnownOneSidedPaymentScript"), + ReinstateCancelledInboundTx(_) => write!(f, "ReinstateCancelledInboundTx"), } } } @@ -129,7 +131,7 @@ pub enum OutputManagerResponse { CoinbaseTransaction(Transaction), OutputConfirmed, PendingTransactionConfirmed, - PayToSelfTransaction((TxId, MicroTari, Transaction)), + PayToSelfTransaction((MicroTari, Transaction)), TransactionConfirmed, TransactionToSend(SenderTransactionProtocol), TransactionCancelled, @@ -149,6 +151,7 @@ pub enum OutputManagerResponse { RewoundOutputs(Vec), ScanOutputs(Vec), AddKnownOneSidedPaymentScript, + ReinstatedCancelledInboundTx, } pub type OutputManagerEventSender = broadcast::Sender>; @@ -277,6 +280,7 @@ impl OutputManagerHandle { pub async fn prepare_transaction_to_send( &mut self, + tx_id: TxId, amount: MicroTari, fee_per_gram: MicroTari, lock_height: Option, @@ -286,6 +290,7 @@ impl OutputManagerHandle { match self .handle .call(OutputManagerRequest::PrepareToSendTransaction(( + tx_id, amount, fee_per_gram, lock_height, @@ -526,14 +531,16 @@ impl OutputManagerHandle { pub async fn create_pay_to_self_transaction( &mut self, + tx_id: TxId, amount: MicroTari, fee_per_gram: MicroTari, lock_height: Option, message: String, - ) -> Result<(TxId, MicroTari, Transaction), OutputManagerError> { + ) -> Result<(MicroTari, Transaction), OutputManagerError> { match self .handle .call(OutputManagerRequest::CreatePayToSelfTransaction(( + tx_id, amount, fee_per_gram, lock_height, @@ -545,4 +552,15 @@ impl OutputManagerHandle { _ => Err(OutputManagerError::UnexpectedApiResponse), } } + + pub async fn reinstate_cancelled_inbound_transaction(&mut self, tx_id: TxId) -> Result<(), OutputManagerError> { + match self + .handle + .call(OutputManagerRequest::ReinstateCancelledInboundTx(tx_id)) + .await?? + { + OutputManagerResponse::ReinstatedCancelledInboundTx => Ok(()), + _ => Err(OutputManagerError::UnexpectedApiResponse), + } + } } diff --git a/base_layer/wallet/src/output_manager_service/mod.rs b/base_layer/wallet/src/output_manager_service/mod.rs index aac9c7b882..ce6fd70699 100644 --- a/base_layer/wallet/src/output_manager_service/mod.rs +++ b/base_layer/wallet/src/output_manager_service/mod.rs @@ -106,7 +106,7 @@ where T: OutputManagerBackend + 'static ); let (sender, receiver) = reply_channel::unbounded(); - let (publisher, _) = broadcast::channel(200); + let (publisher, _) = broadcast::channel(self.config.event_channel_size); // Register handle before waiting for handles to be ready let oms_handle = OutputManagerHandle::new(sender, publisher.clone()); diff --git a/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs b/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs index 68de7e400f..64e4d510d2 100644 --- a/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs +++ b/base_layer/wallet/src/output_manager_service/recovery/standard_outputs_recoverer.rs @@ -90,7 +90,7 @@ where TBackend: OutputManagerBackend + 'static UnblindedOutput::new( output.committed_value, output.blinding_factor.clone(), - Some(features), + features, script, inputs!(PublicKey::from_secret_key(&output.blinding_factor)), output.blinding_factor, diff --git a/base_layer/wallet/src/output_manager_service/service.rs b/base_layer/wallet/src/output_manager_service/service.rs index 0cddf579c4..c8946b9ff9 100644 --- a/base_layer/wallet/src/output_manager_service/service.rs +++ b/base_layer/wallet/src/output_manager_service/service.rs @@ -40,6 +40,7 @@ use crate::{ types::{HashDigest, ValidationRetryStrategy}, }; use blake2::Digest; +use chrono::Utc; use diesel::result::{DatabaseErrorKind, Error as DieselError}; use futures::{pin_mut, StreamExt}; use log::*; @@ -142,7 +143,8 @@ where TBackend: OutputManagerBackend + 'static shutdown_signal, }; - let (base_node_update_publisher, _) = broadcast::channel(50); + let (base_node_update_publisher, _) = + broadcast::channel(resources.config.base_node_update_publisher_channel_size); Ok(OutputManagerService { resources, @@ -228,19 +230,21 @@ where TBackend: OutputManagerBackend + 'static .await .map(OutputManagerResponse::CoinbaseTransaction), OutputManagerRequest::PrepareToSendTransaction(( + tx_id, amount, fee_per_gram, lock_height, message, recipient_script, )) => self - .prepare_transaction_to_send(amount, fee_per_gram, lock_height, message, recipient_script) + .prepare_transaction_to_send(tx_id, amount, fee_per_gram, lock_height, message, recipient_script) .await .map(OutputManagerResponse::TransactionToSend), - OutputManagerRequest::CreatePayToSelfTransaction((amount, fee_per_gram, lock_height, message)) => self - .create_pay_to_self_transaction(amount, fee_per_gram, lock_height, message) - .await - .map(OutputManagerResponse::PayToSelfTransaction), + OutputManagerRequest::CreatePayToSelfTransaction((tx_id, amount, fee_per_gram, lock_height, message)) => { + self.create_pay_to_self_transaction(tx_id, amount, fee_per_gram, lock_height, message) + .await + .map(OutputManagerResponse::PayToSelfTransaction) + }, OutputManagerRequest::FeeEstimate((amount, fee_per_gram, num_kernels, num_outputs)) => self .fee_estimate(amount, fee_per_gram, num_kernels, num_outputs) .await @@ -343,6 +347,10 @@ where TBackend: OutputManagerBackend + 'static .add_known_script(known_script) .await .map(|_| OutputManagerResponse::AddKnownOneSidedPaymentScript), + OutputManagerRequest::ReinstateCancelledInboundTx(tx_id) => self + .reinstate_cancelled_inbound_transaction(tx_id) + .await + .map(|_| OutputManagerResponse::ReinstatedCancelledInboundTx), } } @@ -441,7 +449,7 @@ where TBackend: OutputManagerBackend + 'static UnblindedOutput::new( single_round_sender_data.amount, spending_key.clone(), - Some(single_round_sender_data.features.clone()), + single_round_sender_data.features.clone(), single_round_sender_data.script.clone(), // TODO: The input data should be variable; this will only work for a Nop script inputs!(PublicKey::from_secret_key(&script_private_key)), @@ -554,6 +562,7 @@ where TBackend: OutputManagerBackend + 'static /// will be produced. pub async fn prepare_transaction_to_send( &mut self, + tx_id: TxId, amount: MicroTari, fee_per_gram: MicroTari, lock_height: Option, @@ -584,7 +593,8 @@ where TBackend: OutputManagerBackend + 'static PrivateKey::random(&mut OsRng), ) .with_message(message) - .with_prevent_fee_gt_amount(self.resources.config.prevent_fee_gt_amount); + .with_prevent_fee_gt_amount(self.resources.config.prevent_fee_gt_amount) + .with_tx_id(tx_id); for uo in outputs.iter() { builder.with_input( @@ -635,7 +645,6 @@ where TBackend: OutputManagerBackend + 'static )?); } - let tx_id = stp.get_tx_id()?; // The Transaction Protocol built successfully so we will pull the unspent outputs out of the unspent list and // store them until the transaction times out OR is confirmed self.resources @@ -663,6 +672,11 @@ where TBackend: OutputManagerBackend + 'static fees: MicroTari, block_height: u64, ) -> Result { + debug!( + target: LOG_TARGET, + "Building coinbase transaction for block_height {} with TxId: {}", block_height, tx_id + ); + let (spending_key, script_key) = self .resources .master_key_manager @@ -713,11 +727,12 @@ where TBackend: OutputManagerBackend + 'static async fn create_pay_to_self_transaction( &mut self, + tx_id: TxId, amount: MicroTari, fee_per_gram: MicroTari, lock_height: Option, message: String, - ) -> Result<(TxId, MicroTari, Transaction), OutputManagerError> { + ) -> Result<(MicroTari, Transaction), OutputManagerError> { let (inputs, _, total) = self.select_utxos(amount, fee_per_gram, 1, None).await?; let offset = PrivateKey::random(&mut OsRng); @@ -732,7 +747,8 @@ where TBackend: OutputManagerBackend + 'static .with_offset(offset.clone()) .with_private_nonce(nonce.clone()) .with_message(message) - .with_prevent_fee_gt_amount(self.resources.config.prevent_fee_gt_amount); + .with_prevent_fee_gt_amount(self.resources.config.prevent_fee_gt_amount) + .with_tx_id(tx_id); for uo in &inputs { builder.with_input( @@ -760,7 +776,7 @@ where TBackend: OutputManagerBackend + 'static UnblindedOutput::new( amount, spending_key.clone(), - Some(output_features), + output_features, script, inputs!(PublicKey::from_secret_key(&script_private_key)), script_private_key, @@ -808,7 +824,6 @@ where TBackend: OutputManagerBackend + 'static outputs.push(change_output); } - let tx_id = stp.get_tx_id()?; trace!( target: LOG_TARGET, "Encumber send to self transaction ({}) outputs.", @@ -821,7 +836,7 @@ where TBackend: OutputManagerBackend + 'static stp.finalize(KernelFeatures::empty(), &factories)?; let tx = stp.take_transaction()?; - Ok((tx_id, fee, tx)) + Ok((fee, tx)) } /// Confirm that a transaction has finished being negotiated between parties so the short-term encumberance can be @@ -891,6 +906,27 @@ where TBackend: OutputManagerBackend + 'static Ok(self.resources.db.cancel_pending_transaction_outputs(tx_id).await?) } + /// Restore the pending transaction encumberance and output for an inbound transaction that was previously + /// cancelled. + async fn reinstate_cancelled_inbound_transaction(&mut self, tx_id: TxId) -> Result<(), OutputManagerError> { + self.resources.db.reinstate_inbound_output(tx_id).await?; + + self.resources + .db + .add_pending_transaction_outputs(PendingTransactionOutputs { + tx_id, + outputs_to_be_spent: Vec::new(), + outputs_to_be_received: Vec::new(), + timestamp: Utc::now().naive_utc(), + coinbase_block_height: None, + }) + .await?; + + self.confirm_encumberance(tx_id).await?; + + Ok(()) + } + /// Go through the pending transaction and if any have existed longer than the specified duration, cancel them async fn timeout_pending_transactions(&mut self, period: Duration) -> Result<(), OutputManagerError> { Ok(self.resources.db.timeout_pending_transaction_outputs(period).await?) @@ -1002,7 +1038,7 @@ where TBackend: OutputManagerBackend + 'static break; } fee_with_change = Fee::calculate(fee_per_gram, 1, utxos.len(), output_count + 1); - if utxos_total_value >= amount + fee_with_change { + if utxos_total_value > amount + fee_with_change { require_change_output = true; break; } @@ -1149,7 +1185,7 @@ where TBackend: OutputManagerBackend + 'static UnblindedOutput::new( output_amount, spending_key.clone(), - Some(output_features), + output_features, script, inputs!(PublicKey::from_secret_key(&script_private_key)), script_private_key, @@ -1231,7 +1267,7 @@ where TBackend: OutputManagerBackend + 'static let rewound_output = UnblindedOutput::new( rewound_result.committed_value, rewound_result.blinding_factor.clone(), - Some(output.features), + output.features, known_one_sided_payment_scripts[i].script.clone(), known_one_sided_payment_scripts[i].input.clone(), known_one_sided_payment_scripts[i].private_key.clone(), diff --git a/base_layer/wallet/src/output_manager_service/storage/database.rs b/base_layer/wallet/src/output_manager_service/storage/database.rs index d67a2a8219..52d552e016 100644 --- a/base_layer/wallet/src/output_manager_service/storage/database.rs +++ b/base_layer/wallet/src/output_manager_service/storage/database.rs @@ -23,7 +23,7 @@ use crate::output_manager_service::{ error::OutputManagerStorageError, service::Balance, - storage::models::{DbUnblindedOutput, KnownOneSidedPaymentScript}, + storage::models::{DbUnblindedOutput, KnownOneSidedPaymentScript, OutputStatus}, TxId, }; use aes_gcm::Aes256Gcm; @@ -135,6 +135,7 @@ pub enum DbKey { KeyManagerState, InvalidOutputs, KnownOneSidedPaymentScripts, + OutputsByTxIdAndStatus(TxId, OutputStatus), } #[derive(Debug)] @@ -149,6 +150,7 @@ pub enum DbValue { KeyManagerState(KeyManagerState), KnownOneSidedPaymentScripts(Vec), AnyOutput(Box), + AnyOutputs(Vec), } pub enum DbKeyValuePair { @@ -158,6 +160,7 @@ pub enum DbKeyValuePair { PendingTransactionOutputs(TxId, Box), KeyManagerState(KeyManagerState), KnownOneSidedPaymentScripts(KnownOneSidedPaymentScript), + UpdateOutputStatus(Commitment, OutputStatus), } pub enum WriteOperation { @@ -700,10 +703,7 @@ where T: OutputManagerBackend + 'static let db_clone = self.db.clone(); tokio::task::spawn_blocking(move || { match db_clone.write(WriteOperation::Remove(DbKey::AnyOutputByCommitment(commitment.clone()))) { - Ok(None) => log_error( - DbKey::AnyOutputByCommitment(commitment.clone()), - OutputManagerStorageError::ValueNotFound, - ), + Ok(None) => Ok(()), Ok(Some(DbValue::AnyOutput(_))) => Ok(()), Ok(Some(other)) => unexpected_result(DbKey::AnyOutputByCommitment(commitment.clone()), other), Err(e) => log_error(DbKey::AnyOutputByCommitment(commitment), e), @@ -713,6 +713,48 @@ where T: OutputManagerBackend + 'static .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; Ok(()) } + + /// Check if a single cancelled inbound output exists that matches this TxID, if it does then return its status to + /// EncumberedToBeReceived + pub async fn reinstate_inbound_output(&self, tx_id: TxId) -> Result<(), OutputManagerStorageError> { + let db_clone = self.db.clone(); + let outputs = tokio::task::spawn_blocking(move || { + match db_clone.fetch(&DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound)) { + Ok(None) => Err(OutputManagerStorageError::ValueNotFound), + Ok(Some(DbValue::AnyOutputs(o))) => Ok(o), + Ok(Some(other)) => unexpected_result( + DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound), + other, + ), + Err(e) => log_error(DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound), e), + } + }) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string())) + .and_then(|inner_result| inner_result)?; + + if outputs.len() != 1 { + return Err(OutputManagerStorageError::UnexpectedResult( + "There should be only 1 output for a cancelled inbound transaction but more were found".to_string(), + )); + } + let db_clone2 = self.db.clone(); + + tokio::task::spawn_blocking(move || { + db_clone2.write(WriteOperation::Insert(DbKeyValuePair::UpdateOutputStatus( + outputs + .first() + .expect("Must be only one element in outputs") + .commitment + .clone(), + OutputStatus::EncumberedToBeReceived, + ))) + }) + .await + .map_err(|err| OutputManagerStorageError::BlockingTaskSpawnError(err.to_string()))??; + + Ok(()) + } } fn unexpected_result(req: DbKey, res: DbValue) -> Result { @@ -737,6 +779,7 @@ impl Display for DbKey { DbKey::TimeLockedUnspentOutputs(_t) => f.write_str(&"Timelocked Outputs"), DbKey::KnownOneSidedPaymentScripts => f.write_str(&"Known claiming scripts"), DbKey::AnyOutputByCommitment(_) => f.write_str(&"AnyOutputByCommitment"), + DbKey::OutputsByTxIdAndStatus(_, _) => f.write_str(&"OutputsByTxIdAndStatus"), } } } @@ -754,6 +797,7 @@ impl Display for DbValue { DbValue::InvalidOutputs(_) => f.write_str("Invalid Outputs"), DbValue::KnownOneSidedPaymentScripts(_) => f.write_str(&"Known claiming scripts"), DbValue::AnyOutput(_) => f.write_str(&"Any Output"), + DbValue::AnyOutputs(_) => f.write_str(&"Any Outputs"), } } } diff --git a/base_layer/wallet/src/output_manager_service/storage/models.rs b/base_layer/wallet/src/output_manager_service/storage/models.rs index f276f1d2df..dd36eb6934 100644 --- a/base_layer/wallet/src/output_manager_service/storage/models.rs +++ b/base_layer/wallet/src/output_manager_service/storage/models.rs @@ -105,3 +105,14 @@ impl PartialEq for KnownOneSidedPaymentScript { self.script_hash == other.script_hash } } + +/// The status of a given output +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum OutputStatus { + Unspent, + Spent, + EncumberedToBeReceived, + EncumberedToBeSpent, + Invalid, + CancelledInbound, +} diff --git a/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs b/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs index df5b2c89da..665c408bdf 100644 --- a/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/output_manager_service/storage/sqlite_db.rs @@ -33,7 +33,7 @@ use crate::{ PendingTransactionOutputs, WriteOperation, }, - models::{DbUnblindedOutput, KnownOneSidedPaymentScript}, + models::{DbUnblindedOutput, KnownOneSidedPaymentScript, OutputStatus}, }, TxId, }, @@ -105,6 +105,7 @@ impl OutputManagerSqliteDatabase { } } impl OutputManagerBackend for OutputManagerSqliteDatabase { + #[allow(clippy::cognitive_complexity)] fn fetch(&self, key: &DbKey) -> Result, OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); @@ -135,6 +136,7 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { None }, }, + DbKey::AnyOutputByCommitment(commitment) => { match OutputSql::find_by_commitment(&commitment.to_vec(), &(*conn)) { Ok(mut o) => { @@ -173,6 +175,18 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { None }, }, + DbKey::OutputsByTxIdAndStatus(tx_id, status) => { + let mut outputs = OutputSql::find_by_tx_id_and_status(*tx_id, *status, &(*conn))?; + for o in outputs.iter_mut() { + self.decrypt_if_necessary(o)?; + } + Some(DbValue::AnyOutputs( + outputs + .iter() + .map(|o| DbUnblindedOutput::try_from(o.clone())) + .collect::, _>>()?, + )) + }, DbKey::UnspentOutputs => { let mut outputs = OutputSql::index_status(OutputStatus::Unspent, &(*conn))?; for o in outputs.iter_mut() { @@ -273,6 +287,7 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { Ok(result) } + #[allow(clippy::cognitive_complexity)] fn write(&self, op: WriteOperation) -> Result, OutputManagerStorageError> { let conn = self.database_connection.acquire_lock(); @@ -337,6 +352,20 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { self.encrypt_if_necessary(&mut script_sql)?; script_sql.commit(&(*conn))? }, + DbKeyValuePair::UpdateOutputStatus(commitment, status) => { + let output = OutputSql::find_by_commitment(&commitment.to_vec(), &(*conn))?; + output.update( + UpdateOutput { + status: Some(status), + tx_id: None, + spending_key: None, + script_private_key: None, + metadata_signature_nonce: None, + metadata_signature_u_key: None, + }, + &(*conn), + )?; + }, }, WriteOperation::Remove(k) => match k { DbKey::SpentOutput(s) => match OutputSql::find_status(&s.to_vec(), OutputStatus::Spent, &(*conn)) { @@ -409,6 +438,7 @@ impl OutputManagerBackend for OutputManagerSqliteDatabase { DbKey::InvalidOutputs => return Err(OutputManagerStorageError::OperationNotSupported), DbKey::TimeLockedUnspentOutputs(_) => return Err(OutputManagerStorageError::OperationNotSupported), DbKey::KnownOneSidedPaymentScripts => return Err(OutputManagerStorageError::OperationNotSupported), + DbKey::OutputsByTxIdAndStatus(_, _) => return Err(OutputManagerStorageError::OperationNotSupported), }, } @@ -840,17 +870,6 @@ fn pending_transaction_outputs_from_sql_outputs( }) } -/// The status of a given output -#[derive(PartialEq)] -enum OutputStatus { - Unspent, - Spent, - EncumberedToBeReceived, - EncumberedToBeSpent, - Invalid, - CancelledInbound, -} - impl TryFrom for OutputStatus { type Error = OutputManagerStorageError; @@ -1011,6 +1030,17 @@ impl OutputSql { Ok(request.first::(conn)?) } + pub fn find_by_tx_id_and_status( + tx_id: TxId, + status: OutputStatus, + conn: &SqliteConnection, + ) -> Result, OutputManagerStorageError> { + Ok(outputs::table + .filter(outputs::tx_id.eq(Some(tx_id as i64))) + .filter(outputs::status.eq(status as i32)) + .load(conn)?) + } + /// Find outputs via tx_id that are encumbered. Any outputs that are encumbered cannot be marked as spent. pub fn find_by_tx_id_and_encumbered( tx_id: TxId, @@ -1117,10 +1147,10 @@ impl TryFrom for DbUnblindedOutput { ); OutputManagerStorageError::ConversionError })?, - Some(OutputFeatures { + OutputFeatures { flags: OutputFlags::from_bits(o.flags as u8).ok_or(OutputManagerStorageError::ConversionError)?, maturity: o.maturity as u64, - }), + }, TariScript::from_bytes(o.script.as_slice())?, ExecutionStack::from_bytes(o.input_data.as_slice())?, PrivateKey::from_vec(&o.script_private_key).map_err(|_| { diff --git a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs index 0eebf7b6db..e3e022e4fb 100644 --- a/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs +++ b/base_layer/wallet/src/output_manager_service/tasks/txo_validation_task.rs @@ -264,7 +264,9 @@ where TBackend: OutputManagerBackend + 'static let mut client = match base_node_connection .connect_rpc_using_builder( - BaseNodeWalletRpcClient::builder().with_deadline(self.resources.config.base_node_query_timeout), + BaseNodeWalletRpcClient::builder() + .with_deadline(self.resources.config.base_node_query_timeout) + .with_handshake_timeout(self.resources.config.base_node_query_timeout), ) .await { diff --git a/base_layer/wallet/src/testnet_utils.rs b/base_layer/wallet/src/testnet_utils.rs deleted file mode 100644 index 1f6e29da2f..0000000000 --- a/base_layer/wallet/src/testnet_utils.rs +++ /dev/null @@ -1,859 +0,0 @@ -// Copyright 2019. The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use crate::{ - contacts_service::storage::{ - database::{Contact, ContactsBackend}, - sqlite_db::ContactsServiceSqliteDatabase, - }, - error::{WalletError, WalletStorageError}, - output_manager_service::{ - storage::{database::OutputManagerBackend, sqlite_db::OutputManagerSqliteDatabase}, - TxId, - }, - storage::{ - database::{DbKeyValuePair, WalletBackend, WalletDatabase, WriteOperation}, - sqlite_db::WalletSqliteDatabase, - }, - test_utils::make_wallet_databases, - transaction_service::{ - handle::TransactionEvent, - storage::{ - database::TransactionBackend, - models::{CompletedTransaction, TransactionDirection, TransactionStatus}, - sqlite_db::TransactionServiceSqliteDatabase, - }, - }, - Wallet, - WalletConfig, -}; -use chrono::{Duration as ChronoDuration, Utc}; -use futures::{FutureExt, StreamExt}; -use log::*; -use rand::{rngs::OsRng, CryptoRng, Rng, RngCore}; -use std::{ - path::{Path, PathBuf}, - sync::Arc, - time::Duration, -}; -use tari_common_types::chain_metadata::ChainMetadata; -use tari_comms::{ - multiaddr::Multiaddr, - peer_manager::{NodeIdentity, PeerFeatures}, - transports::MemoryTransport, - types::{CommsPublicKey, CommsSecretKey}, -}; -use tari_comms_dht::DhtConfig; -use tari_core::transactions::{ - helpers::{create_unblinded_output, TestParams as TestParamsHelpers}, - tari_amount::MicroTari, - transaction::{OutputFeatures, Transaction, TransactionInput, UnblindedOutput}, - types::{BlindingFactor, CryptoFactories, PrivateKey, PublicKey}, -}; -use tari_crypto::{ - keys::{PublicKey as PublicKeyTrait, SecretKey as SecretKeyTrait}, - script, - tari_utilities::hex::Hex, -}; -use tari_p2p::{initialization::CommsConfig, transport::TransportType, Network}; -use tari_shutdown::{Shutdown, ShutdownSignal}; -use tari_test_utils::random; -use tokio::{runtime::Handle, time::delay_for}; - -// Used to generate test wallet data - -const LOG_TARGET: &str = "wallet::test_utils"; - -pub struct TestParams { - pub spend_key: PrivateKey, - pub change_key: PrivateKey, - pub offset: PrivateKey, - pub nonce: PrivateKey, - pub public_nonce: PublicKey, -} -impl TestParams { - pub fn new(rng: &mut R) -> TestParams { - let r = PrivateKey::random(rng); - TestParams { - spend_key: PrivateKey::random(rng), - change_key: PrivateKey::random(rng), - offset: PrivateKey::random(rng), - public_nonce: PublicKey::from_secret_key(&r), - nonce: r, - } - } -} -pub fn make_input(val: MicroTari, factories: &CryptoFactories) -> (TransactionInput, UnblindedOutput) { - let test_params = TestParamsHelpers::new(); - let utxo = create_unblinded_output(script!(Nop), OutputFeatures::default(), test_params, val); - ( - utxo.as_transaction_input(&factories.commitment) - .expect("Should be able to make transaction input"), - utxo, - ) -} - -/// Create a wallet for testing purposes -pub async fn create_wallet( - public_address: Multiaddr, - datastore_path: PathBuf, - shutdown_signal: ShutdownSignal, -) -> Wallet< - WalletSqliteDatabase, - TransactionServiceSqliteDatabase, - OutputManagerSqliteDatabase, - ContactsServiceSqliteDatabase, -> { - let factories = CryptoFactories::default(); - - let node_identity = Arc::new(NodeIdentity::new( - CommsSecretKey::random(&mut OsRng), - public_address.clone(), - PeerFeatures::COMMUNICATION_NODE, - )); - let comms_config = CommsConfig { - network: Network::Weatherwax, - transport_type: TransportType::Memory { - listener_address: public_address, - }, - auxilary_tcp_listener_address: None, - node_identity, - datastore_path: datastore_path.clone(), - peer_database_name: random::string(8), - max_concurrent_inbound_tasks: 100, - outbound_buffer_size: 100, - user_agent: "/tari/wallet/test".to_string(), - dht: DhtConfig { - discovery_request_timeout: Duration::from_secs(30), - allow_test_addresses: true, - ..Default::default() - }, - allow_test_addresses: true, - listener_liveness_allowlist_cidrs: Vec::new(), - listener_liveness_max_sessions: 0, - dns_seeds: Default::default(), - dns_seeds_name_server: "1.1.1.1:53".parse().unwrap(), - dns_seeds_use_dnssec: false, - peer_seeds: Default::default(), - }; - - let config = WalletConfig::new( - comms_config, - factories, - None, - None, - Network::Weatherwax.into(), - None, - None, - None, - None, - ); - - let (db, backend, oms_backend, contacts_backend, _) = - make_wallet_databases(Some(datastore_path.to_str().unwrap().to_string())); - - let metadata = ChainMetadata::new(std::u64::MAX, Vec::new(), 0, 0, 0); - - db.write(WriteOperation::Insert(DbKeyValuePair::BaseNodeChainMetadata(metadata))) - .unwrap(); - Wallet::start( - config, - WalletDatabase::new(db), - backend, - oms_backend, - contacts_backend, - shutdown_signal, - None, - ) - .await - .expect("Could not create Wallet") -} - -pub fn get_next_memory_address() -> Multiaddr { - let port = MemoryTransport::acquire_next_memsocket_port(); - format!("/memory/{}", port).parse().unwrap() -} - -/// This function will generate a set of test data for the supplied wallet. Takes a few seconds to complete -pub async fn generate_wallet_test_data< - T: WalletBackend, - U: TransactionBackend, - V: OutputManagerBackend, - W: ContactsBackend, - P: AsRef, ->( - wallet: &mut Wallet, - data_path: P, - transaction_service_backend: U, -) -> Result<(), WalletError> { - let factories = CryptoFactories::default(); - let names = ["Alice", "Bob", "Carol", "Dave"]; - let private_keys = [ - "3264e7a05ff669c1b71f691ab181ba3dd915306114a26c4a84c8da1dc1c40209", - "fdad65858c7e7985168972f3117e31f7cee5a1d961fce690bd05a2a15ca6f00e", - "07beb0d0d1eef08c246b70da8b060f7f8e885f5c0f2fd04b10607dc744b5f502", - "bb2dcd0b477c8d709afe2547122a7199d6d4516bc6f35c2adb1a8afedbf97e0e", - ]; - - let messages: Vec = vec![ - "My half of dinner", - "Cheers", - "April's rent", - "Thanks for the Skywalker skin", - "Here you go", - "💰💰💰", - "For the 'Tacos' 😉", - "😀", - "My share of the movie tickets", - "Enjoy!", - "😎", - "Tickets!!", - "For the cab fare", - "👍👍", - "🥡", - ] - .iter() - .map(|i| (*i).to_string()) - .collect(); - let mut message_index = 0; - - let mut wallet_event_stream = wallet.transaction_service.get_event_stream_fused(); - - // Generate contacts - let mut generated_contacts = Vec::new(); - for i in 0..names.len() { - let secret_key = CommsSecretKey::from_hex(private_keys[i]).expect("Could not parse hex key"); - let public_key = CommsPublicKey::from_secret_key(&secret_key); - wallet - .contacts_service - .upsert_contact(Contact { - alias: names[i].to_string(), - public_key: public_key.clone(), - }) - .await?; - - let addr = get_next_memory_address(); - generated_contacts.push((secret_key, addr)); - } - let mut contacts = wallet.contacts_service.get_contacts().await?; - assert_eq!(contacts.len(), names.len()); - info!(target: LOG_TARGET, "Added test contacts to wallet"); - - // Generate outputs - let num_outputs = 75; - for i in 0..num_outputs { - let (_ti, uo) = make_input(MicroTari::from(5_000_000 + i * 35_000), &factories); - wallet.output_manager_service.add_output(uo).await?; - } - info!(target: LOG_TARGET, "Added test outputs to wallet"); - // Generate some Tx history - info!( - target: LOG_TARGET, - "Spinning up Alice wallet to generate test transactions" - ); - let alice_temp_dir = data_path.as_ref().join(random::string(8)); - let _ = std::fs::create_dir(&alice_temp_dir); - - let mut shutdown_a = Shutdown::new(); - let mut shutdown_b = Shutdown::new(); - let mut wallet_alice = create_wallet( - generated_contacts[0].1.clone(), - alice_temp_dir.clone(), - shutdown_a.to_signal(), - ) - .await; - let mut alice_event_stream = wallet_alice.transaction_service.get_event_stream_fused(); - contacts[0].public_key = wallet_alice.comms.node_identity().public_key().clone(); - - for i in 0..20 { - let (_ti, uo) = make_input(MicroTari::from(1_500_000 + i * 530_500), &factories); - wallet_alice.output_manager_service.add_output(uo).await?; - } - info!(target: LOG_TARGET, "Alice Wallet created"); - info!( - target: LOG_TARGET, - "Spinning up Bob wallet to generate test transactions" - ); - let bob_temp_dir = data_path.as_ref().join(random::string(8)); - let _ = std::fs::create_dir(&bob_temp_dir); - - let mut wallet_bob = create_wallet( - generated_contacts[1].1.clone(), - bob_temp_dir.clone(), - shutdown_b.to_signal(), - ) - .await; - let mut bob_event_stream = wallet_bob.transaction_service.get_event_stream_fused(); - contacts[1].public_key = wallet_bob.comms.node_identity().public_key().clone(); - - for i in 0..20 { - let (_ti, uo) = make_input(MicroTari::from(2_000_000 + i * i * 61_050), &factories); - wallet_bob.output_manager_service.add_output(uo).await?; - } - info!(target: LOG_TARGET, "Bob Wallet created"); - - let alice_peer = wallet_alice.comms.node_identity().to_peer(); - - wallet.comms.peer_manager().add_peer(alice_peer).await?; - - let bob_peer = wallet_bob.comms.node_identity().to_peer(); - - wallet.comms.peer_manager().add_peer(bob_peer).await?; - - wallet - .comms - .connectivity() - .dial_peer(wallet_alice.comms.node_identity().node_id().clone()) - .await - .unwrap(); - - wallet - .comms - .connectivity() - .dial_peer(wallet_bob.comms.node_identity().node_id().clone()) - .await - .unwrap(); - info!(target: LOG_TARGET, "Starting to execute test transactions"); - - // Grab the first 2 outbound tx_ids for later - let mut outbound_tx_ids = Vec::new(); - - // Completed TX - let tx_id = wallet - .transaction_service - .send_transaction( - contacts[0].public_key.clone(), - MicroTari::from(1_100_000), - MicroTari::from(100), - messages[message_index].clone(), - ) - .await?; - outbound_tx_ids.push(tx_id); - message_index = (message_index + 1) % messages.len(); - - let tx_id = wallet - .transaction_service - .send_transaction( - contacts[0].public_key.clone(), - MicroTari::from(2_010_500), - MicroTari::from(110), - messages[message_index].clone(), - ) - .await?; - outbound_tx_ids.push(tx_id); - message_index = (message_index + 1) % messages.len(); - - let mut delay = delay_for(Duration::from_secs(60)).fuse(); - let mut count = 0; - loop { - futures::select! { - event = alice_event_stream.select_next_some() => { - match &*event.unwrap() { - TransactionEvent::ReceivedTransaction(_) => { - count +=1; - }, - TransactionEvent::ReceivedFinalizedTransaction(_) => { - count +=1; - }, - _ => (), - } - if count >=4 { - break; - } - }, - () = delay => { - break; - }, - } - } - assert!(count >= 4, "Event waiting timed out before receiving expected events 1"); - - wallet - .transaction_service - .send_transaction( - contacts[0].public_key.clone(), - MicroTari::from(10_000_000), - MicroTari::from(110), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet - .transaction_service - .send_transaction( - contacts[1].public_key.clone(), - MicroTari::from(3_441_000), - MicroTari::from(105), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet - .transaction_service - .send_transaction( - contacts[1].public_key.clone(), - MicroTari::from(14_100_000), - MicroTari::from(100), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - wallet - .transaction_service - .send_transaction( - contacts[0].public_key.clone(), - MicroTari::from(22_010_500), - MicroTari::from(110), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet - .transaction_service - .send_transaction( - contacts[0].public_key.clone(), - MicroTari::from(17_000_000), - MicroTari::from(110), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet - .transaction_service - .send_transaction( - contacts[1].public_key.clone(), - MicroTari::from(31_441_000), - MicroTari::from(105), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet - .transaction_service - .send_transaction( - contacts[0].public_key.clone(), - MicroTari::from(12_100_000), - MicroTari::from(100), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - wallet - .transaction_service - .send_transaction( - contacts[1].public_key.clone(), - MicroTari::from(28_010_500), - MicroTari::from(110), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - // Pending Outbound - let _ = wallet - .transaction_service - .send_transaction( - contacts[2].public_key.clone(), - MicroTari::from(2_500_000), - MicroTari::from(107), - messages[message_index].clone(), - ) - .await; - message_index = (message_index + 1) % messages.len(); - - let _ = wallet - .transaction_service - .send_transaction( - contacts[3].public_key.clone(), - MicroTari::from(3_512_000), - MicroTari::from(117), - messages[message_index].clone(), - ) - .await; - message_index = (message_index + 1) % messages.len(); - - let mut delay = delay_for(Duration::from_secs(60)).fuse(); - let mut count = 0; - loop { - futures::select! { - event = wallet_event_stream.select_next_some() => { - if let TransactionEvent::TransactionDirectSendResult(_,_) = &*event.unwrap() { - count+=1; - if count >= 10 { - break; - } - } - }, - () = delay => { - break; - }, - } - } - assert!( - count >= 10, - "Event waiting timed out before receiving expected events 2" - ); - - let mut delay = delay_for(Duration::from_secs(60)).fuse(); - let mut count = 0; - loop { - futures::select! { - event = bob_event_stream.select_next_some() => { - match &*event.unwrap() { - TransactionEvent::ReceivedTransaction(_) => { - count+=1; - }, - TransactionEvent::ReceivedFinalizedTransaction(_) => { - count+=1; - }, - _ => (), - } - if count >= 8 { - break; - } - }, - () = delay => { - break; - }, - } - } - assert!(count >= 8, "Event waiting timed out before receiving expected events 3"); - - log::error!("Inbound Transactions starting"); - // Pending Inbound - wallet_alice - .transaction_service - .send_transaction( - wallet.comms.node_identity().public_key().clone(), - MicroTari::from(1_235_000), - MicroTari::from(117), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet_alice - .transaction_service - .send_transaction( - wallet.comms.node_identity().public_key().clone(), - MicroTari::from(3_500_000), - MicroTari::from(117), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet_alice - .transaction_service - .send_transaction( - wallet.comms.node_identity().public_key().clone(), - MicroTari::from(2_335_000), - MicroTari::from(117), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet_bob - .transaction_service - .send_transaction( - wallet.comms.node_identity().public_key().clone(), - MicroTari::from(8_035_000), - MicroTari::from(117), - messages[message_index].clone(), - ) - .await?; - message_index = (message_index + 1) % messages.len(); - - wallet_bob - .transaction_service - .send_transaction( - wallet.comms.node_identity().public_key().clone(), - MicroTari::from(5_135_000), - MicroTari::from(117), - messages[message_index].clone(), - ) - .await?; - - let mut delay = delay_for(Duration::from_secs(60)).fuse(); - let mut count = 0; - loop { - futures::select! { - event = wallet_event_stream.select_next_some() => { - if let TransactionEvent::ReceivedFinalizedTransaction(_) = &*event.unwrap() { - count+=1; - if count >= 5 { - break; - } - } - }, - () = delay => { - break; - }, - } - } - assert!(count >= 5, "Event waiting timed out before receiving expected events 4"); - - let txs = wallet.transaction_service.get_completed_transactions().await.unwrap(); - - let timestamps = vec![ - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::seconds(60)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::minutes(5)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::minutes(11)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::hours(2)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::hours(3)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::hours(8)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::hours(27)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::hours(34)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::hours(51)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::hours(59)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::days(9)) - .unwrap() - .checked_sub_signed(ChronoDuration::hours(3)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::days(10)) - .unwrap() - .checked_sub_signed(ChronoDuration::hours(6)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::days(12)) - .unwrap() - .checked_sub_signed(ChronoDuration::hours(2)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::days(15)) - .unwrap() - .checked_sub_signed(ChronoDuration::hours(2)) - .unwrap(), - Utc::now() - .naive_utc() - .checked_sub_signed(ChronoDuration::days(16)) - .unwrap() - .checked_sub_signed(ChronoDuration::hours(2)) - .unwrap(), - ]; - let mut timestamp_index = 0; - - for k in txs.keys() { - let _ = transaction_service_backend.update_completed_transaction_timestamp(*k, timestamps[timestamp_index]); - timestamp_index = (timestamp_index + 1) % timestamps.len(); - } - - // Broadcast a tx - - wallet - .transaction_service - .test_broadcast_transaction(outbound_tx_ids[0]) - .await - .unwrap(); - - // Mine a tx - wallet - .transaction_service - .test_mine_transaction(outbound_tx_ids[1]) - .await - .unwrap(); - - delay_for(Duration::from_secs(1)).await; - - shutdown_a.trigger().unwrap(); - shutdown_b.trigger().unwrap(); - wallet_alice.wait_until_shutdown().await; - wallet_bob.wait_until_shutdown().await; - - let _ = std::fs::remove_dir_all(&alice_temp_dir); - let _ = std::fs::remove_dir_all(&bob_temp_dir); - - info!(target: LOG_TARGET, "Finished generating test data"); - - Ok(()) -} - -/// This function is only available for testing and development by the client of LibWallet. It simulates a this node, -/// who sent a transaction out, accepting a reply to the Pending Outbound Transaction. That transaction then becomes a -/// CompletedTransaction with the Broadcast status indicating it is in a base node Mempool but not yet mined -pub async fn complete_sent_transaction< - T: WalletBackend, - U: TransactionBackend, - V: OutputManagerBackend, - W: ContactsBackend, ->( - wallet: &mut Wallet, - tx_id: TxId, -) -> Result<(), WalletError> { - let pending_outbound_tx = wallet.transaction_service.get_pending_outbound_transactions().await?; - match pending_outbound_tx.get(&tx_id) { - Some(p) => { - let completed_tx: CompletedTransaction = CompletedTransaction::new( - p.tx_id, - wallet.comms.node_identity().public_key().clone(), - p.destination_public_key.clone(), - p.amount, - p.fee, - Transaction::new( - Vec::new(), - Vec::new(), - Vec::new(), - BlindingFactor::default(), - BlindingFactor::default(), - ), - TransactionStatus::Completed, - p.message.clone(), - Utc::now().naive_utc(), - TransactionDirection::Outbound, - None, - ); - - wallet - .transaction_service - .test_complete_pending_transaction(completed_tx) - .await?; - }, - None => { - return Err(WalletError::WalletStorageError(WalletStorageError::UnexpectedResult( - "Pending outbound transaction does not exist".to_string(), - ))) - }, - } - - Ok(()) -} - -/// This function is only available for testing by the client of LibWallet. This function simulates an external -/// wallet sending a transaction to this wallet which will become a PendingInboundTransaction -pub async fn receive_test_transaction< - T: WalletBackend, - U: TransactionBackend, - V: OutputManagerBackend, - W: ContactsBackend, ->( - wallet: &mut Wallet, - handle: &Handle, -) -> Result<(), WalletError> { - let contacts = wallet.contacts_service.get_contacts().await.unwrap(); - let (_secret_key, mut public_key): (CommsSecretKey, CommsPublicKey) = PublicKey::random_keypair(&mut OsRng); - - if !contacts.is_empty() { - public_key = contacts[0].public_key.clone(); - } - - wallet - .transaction_service - .test_accept_transaction( - OsRng.next_u64(), - MicroTari::from(10_000 + OsRng.next_u64() % 101_000), - public_key, - handle, - ) - .await?; - - Ok(()) -} - -/// This function is only available for testing and development by the client of LibWallet. It simulates this node, -/// who received a prior inbound transaction, accepting the Finalized Completed transaction from the Sender. That -/// transaction then becomes a CompletedTransaction with the Broadcast status indicating it is in a base node Mempool -/// but not yet mined -pub async fn finalize_received_transaction< - T: WalletBackend, - U: TransactionBackend, - V: OutputManagerBackend, - W: ContactsBackend, ->( - wallet: &mut Wallet, - tx_id: TxId, -) -> Result<(), WalletError> { - wallet.transaction_service.test_finalize_transaction(tx_id).await?; - - Ok(()) -} - -/// This function is only available for testing and development by the client of LibWallet. This function will simulate -/// the event when a CompletedTransaction that is in the Complete status is broadcast to the Mempool and its status -/// moves to Broadcast. After this function is called the status of the CompletedTransaction becomes `Mined` and the -/// funds that were pending become spent and available respectively. -pub async fn broadcast_transaction< - T: WalletBackend, - U: TransactionBackend, - V: OutputManagerBackend, - W: ContactsBackend, ->( - wallet: &mut Wallet, - tx_id: TxId, -) -> Result<(), WalletError> { - wallet.transaction_service.test_broadcast_transaction(tx_id).await?; - - Ok(()) -} - -/// This function is only available for testing and development by the client of LibWallet. This function will simulate -/// the event when a CompletedTransaction that is in the Broadcast status, is in a mempool but not mined, beocmes -/// mined/confirmed. After this function is called the status of the CompletedTransaction becomes `Mined` and the funds -/// that were pending become spent and available respectively. -pub async fn mine_transaction( - wallet: &mut Wallet, - tx_id: TxId, -) -> Result<(), WalletError> { - wallet.transaction_service.test_mine_transaction(tx_id).await?; - - Ok(()) -} diff --git a/base_layer/wallet/src/transaction_service/config.rs b/base_layer/wallet/src/transaction_service/config.rs index 7d3faa4f33..b109b62969 100644 --- a/base_layer/wallet/src/transaction_service/config.rs +++ b/base_layer/wallet/src/transaction_service/config.rs @@ -38,6 +38,7 @@ pub struct TransactionServiceConfig { pub num_confirmations_required: u64, pub max_tx_query_batch_size: usize, pub transaction_routing_mechanism: TransactionRoutingMechanism, + pub transaction_event_channel_size: usize, } impl Default for TransactionServiceConfig { @@ -54,6 +55,7 @@ impl Default for TransactionServiceConfig { num_confirmations_required: 3, max_tx_query_batch_size: 5000, transaction_routing_mechanism: TransactionRoutingMechanism::default(), + transaction_event_channel_size: 1000, } } } diff --git a/base_layer/wallet/src/transaction_service/error.rs b/base_layer/wallet/src/transaction_service/error.rs index d7fc27243c..c197dd2024 100644 --- a/base_layer/wallet/src/transaction_service/error.rs +++ b/base_layer/wallet/src/transaction_service/error.rs @@ -80,7 +80,7 @@ pub enum TransactionServiceError { InvalidCompletedTransaction, #[error("No Base Node public keys are provided for Base chain broadcast and monitoring")] NoBaseNodeKeysProvided, - #[error("Error sending data to Protocol via register channels")] + #[error("Error sending data to Protocol via registered channels")] ProtocolChannelError, #[error("Transaction detected as rejected by mempool")] MempoolRejection, @@ -102,9 +102,6 @@ pub enum TransactionServiceError { TransactionStorageError(#[from] TransactionStorageError), #[error("Invalid message error: `{0}`")] InvalidMessageError(String), - #[cfg(feature = "test_harness")] - #[error("Test harness error: `{0}`")] - TestHarnessError(String), #[error("Transaction error: `{0}`")] TransactionError(#[from] TransactionError), #[error("Conversion error: `{0}`")] @@ -141,6 +138,8 @@ pub enum TransactionServiceError { MaximumAttemptsExceeded, #[error("Byte array error")] ByteArrayError(#[from] tari_crypto::tari_utilities::ByteArrayError), + #[error("Transaction Service Error: `{0}`")] + ServiceError(String), } #[derive(Debug, Error)] diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index 3bb156e78d..f34a5f667f 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -37,8 +37,6 @@ use tokio::sync::broadcast; use tower::Service; use crate::types::ValidationRetryStrategy; -#[cfg(feature = "test_harness")] -use tokio::runtime::Handle; /// API Request enum #[allow(clippy::large_enum_variant)] @@ -68,16 +66,6 @@ pub enum TransactionServiceRequest { SetNumConfirmationsRequired(u64), SetCompletedTransactionValidity(u64, bool), ValidateTransactions(ValidationRetryStrategy), - #[cfg(feature = "test_harness")] - CompletePendingOutboundTransaction(CompletedTransaction), - #[cfg(feature = "test_harness")] - FinalizePendingInboundTransaction(TxId), - #[cfg(feature = "test_harness")] - AcceptTestTransaction((TxId, MicroTari, CommsPublicKey, Handle)), - #[cfg(feature = "test_harness")] - MineTransaction(TxId), - #[cfg(feature = "test_harness")] - BroadcastTransaction(TxId), } impl fmt::Display for TransactionServiceRequest { @@ -117,20 +105,6 @@ impl fmt::Display for TransactionServiceRequest { Self::RestartBroadcastProtocols => f.write_str("RestartBroadcastProtocols"), Self::GetNumConfirmationsRequired => f.write_str("GetNumConfirmationsRequired"), Self::SetNumConfirmationsRequired(_) => f.write_str("SetNumConfirmationsRequired"), - #[cfg(feature = "test_harness")] - Self::CompletePendingOutboundTransaction(tx) => { - f.write_str(&format!("CompletePendingOutboundTransaction ({})", tx.tx_id)) - }, - #[cfg(feature = "test_harness")] - Self::FinalizePendingInboundTransaction(id) => { - f.write_str(&format!("FinalizePendingInboundTransaction ({})", id)) - }, - #[cfg(feature = "test_harness")] - Self::AcceptTestTransaction((id, _, _, _)) => f.write_str(&format!("AcceptTestTransaction ({})", id)), - #[cfg(feature = "test_harness")] - Self::MineTransaction(id) => f.write_str(&format!("MineTransaction ({})", id)), - #[cfg(feature = "test_harness")] - Self::BroadcastTransaction(id) => f.write_str(&format!("BroadcastTransaction ({})", id)), Self::GetAnyTransaction(t) => f.write_str(&format!("GetAnyTransaction({})", t)), TransactionServiceRequest::ValidateTransactions(t) => f.write_str(&format!("ValidateTransaction({:?})", t)), TransactionServiceRequest::SetCompletedTransactionValidity(tx_id, s) => f.write_str(&format!( @@ -164,16 +138,6 @@ pub enum TransactionServiceResponse { NumConfirmationsSet, ValidationStarted(u64), CompletedTransactionValidityChanged, - #[cfg(feature = "test_harness")] - CompletedPendingTransaction, - #[cfg(feature = "test_harness")] - FinalizedPendingInboundTransaction, - #[cfg(feature = "test_harness")] - AcceptedTestTransaction, - #[cfg(feature = "test_harness")] - TransactionMined, - #[cfg(feature = "test_harness")] - TransactionBroadcast, } /// Events that can be published on the Text Message Service Event Stream @@ -568,80 +532,4 @@ impl TransactionServiceHandle { _ => Err(TransactionServiceError::UnexpectedApiResponse), } } - - #[cfg(feature = "test_harness")] - pub async fn test_complete_pending_transaction( - &mut self, - completed_tx: CompletedTransaction, - ) -> Result<(), TransactionServiceError> { - match self - .handle - .call(TransactionServiceRequest::CompletePendingOutboundTransaction( - completed_tx, - )) - .await?? - { - TransactionServiceResponse::CompletedPendingTransaction => Ok(()), - _ => Err(TransactionServiceError::UnexpectedApiResponse), - } - } - - #[cfg(feature = "test_harness")] - pub async fn test_accept_transaction( - &mut self, - tx_id: TxId, - amount: MicroTari, - source_public_key: CommsPublicKey, - handle: &Handle, - ) -> Result<(), TransactionServiceError> { - match self - .handle - .call(TransactionServiceRequest::AcceptTestTransaction(( - tx_id, - amount, - source_public_key, - handle.clone(), - ))) - .await?? - { - TransactionServiceResponse::AcceptedTestTransaction => Ok(()), - _ => Err(TransactionServiceError::UnexpectedApiResponse), - } - } - - #[cfg(feature = "test_harness")] - pub async fn test_finalize_transaction(&mut self, tx_id: TxId) -> Result<(), TransactionServiceError> { - match self - .handle - .call(TransactionServiceRequest::FinalizePendingInboundTransaction(tx_id)) - .await?? - { - TransactionServiceResponse::FinalizedPendingInboundTransaction => Ok(()), - _ => Err(TransactionServiceError::UnexpectedApiResponse), - } - } - - #[cfg(feature = "test_harness")] - pub async fn test_broadcast_transaction(&mut self, tx_id: TxId) -> Result<(), TransactionServiceError> { - match self - .handle - .call(TransactionServiceRequest::BroadcastTransaction(tx_id)) - .await?? - { - TransactionServiceResponse::TransactionBroadcast => Ok(()), - _ => Err(TransactionServiceError::UnexpectedApiResponse), - } - } - - #[cfg(feature = "test_harness")] - pub async fn test_mine_transaction(&mut self, tx_id: TxId) -> Result<(), TransactionServiceError> { - match self - .handle - .call(TransactionServiceRequest::MineTransaction(tx_id)) - .await?? - { - TransactionServiceResponse::TransactionMined => Ok(()), - _ => Err(TransactionServiceError::UnexpectedApiResponse), - } - } } diff --git a/base_layer/wallet/src/transaction_service/mod.rs b/base_layer/wallet/src/transaction_service/mod.rs index 2ee17b43e1..3efbade3c3 100644 --- a/base_layer/wallet/src/transaction_service/mod.rs +++ b/base_layer/wallet/src/transaction_service/mod.rs @@ -172,7 +172,7 @@ where T: TransactionBackend + 'static let base_node_response_stream = self.base_node_response_stream(); let transaction_cancelled_stream = self.transaction_cancelled_stream(); - let (publisher, _) = broadcast::channel(200); + let (publisher, _) = broadcast::channel(self.config.transaction_event_channel_size); let transaction_handle = TransactionServiceHandle::new(sender, publisher.clone()); diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs index 92ad42c789..05191c8f8d 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_broadcast_protocol.rs @@ -230,7 +230,8 @@ where TBackend: TransactionBackend + 'static let mut client = match base_node_connection .connect_rpc_using_builder( BaseNodeWalletRpcClient::builder() - .with_deadline(self.resources.config.broadcast_monitoring_timeout), + .with_deadline(self.resources.config.broadcast_monitoring_timeout) + .with_handshake_timeout(self.resources.config.broadcast_monitoring_timeout), ) .await { diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_coinbase_monitoring_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_coinbase_monitoring_protocol.rs index aaf8b9dd3c..65fb58f601 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_coinbase_monitoring_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_coinbase_monitoring_protocol.rs @@ -121,17 +121,23 @@ where TBackend: TransactionBackend + 'static let completed_tx = match self.resources.db.get_completed_transaction(self.tx_id).await { Ok(tx) => tx, Err(e) => { - error!( + info!( target: LOG_TARGET, - "Cannot find Completed Transaction (TxId: {}) referred to by this Coinbase Monitoring \ - Protocol: {:?}", - self.tx_id, - e + "Cannot find Coinbase Transaction (TxId: {}) likely due to being cancelled: {}", self.tx_id, e ); - return Err(TransactionServiceProtocolError::new( - self.tx_id, - TransactionServiceError::TransactionDoesNotExistError, - )); + let _ = self + .resources + .event_publisher + .send(Arc::new(TransactionEvent::TransactionCancelled(self.tx_id))) + .map_err(|e| { + trace!( + target: LOG_TARGET, + "Error sending event, usually because there are no subscribers: {:?}", + e + ); + e + }); + return Ok(self.tx_id); }, }; debug!( @@ -278,7 +284,9 @@ where TBackend: TransactionBackend + 'static }; let mut client = match base_node_connection .connect_rpc_using_builder( - BaseNodeWalletRpcClient::builder().with_deadline(self.resources.config.chain_monitoring_timeout), + BaseNodeWalletRpcClient::builder() + .with_deadline(self.resources.config.chain_monitoring_timeout) + .with_handshake_timeout(self.resources.config.chain_monitoring_timeout), ) .await { @@ -332,9 +340,7 @@ where TBackend: TransactionBackend + 'static } } } - result = self.query_coinbase_transaction( - signature.clone(), completed_tx.clone(), &mut client - ).fuse() => { + result = self.query_coinbase_transaction(signature.clone(), completed_tx.clone(), &mut client).fuse() => { let (coinbase_kernel_found, metadata) = match result { Ok(r) => r, _ => (false, None), diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs index ff0c19c58c..cf30a7f928 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_send_protocol.rs @@ -29,7 +29,7 @@ use log::*; use crate::transaction_service::{ config::TransactionRoutingMechanism, error::{TransactionServiceError, TransactionServiceProtocolError}, - handle::TransactionEvent, + handle::{TransactionEvent, TransactionServiceResponse}, service::TransactionServiceResources, storage::{ database::TransactionBackend, @@ -53,6 +53,7 @@ use tari_core::transactions::{ transaction_protocol::{proto, recipient::RecipientSignedMessage, sender::SingleRoundSenderData}, SenderTransactionProtocol, }; +use tari_crypto::script; use tari_p2p::tari_message::TariMessageType; use tokio::time::delay_for; @@ -71,8 +72,9 @@ where TBackend: TransactionBackend + 'static id: u64, dest_pubkey: CommsPublicKey, amount: MicroTari, + fee_per_gram: MicroTari, message: String, - sender_protocol: SenderTransactionProtocol, + service_request_reply_channel: Option>>, stage: TransactionSendProtocolStage, resources: TransactionServiceResources, transaction_reply_receiver: Option>, @@ -90,8 +92,11 @@ where TBackend: TransactionBackend + 'static cancellation_receiver: oneshot::Receiver<()>, dest_pubkey: CommsPublicKey, amount: MicroTari, + fee_per_gram: MicroTari, message: String, - sender_protocol: SenderTransactionProtocol, + service_request_reply_channel: Option< + oneshot::Sender>, + >, stage: TransactionSendProtocolStage, ) -> Self { Self { @@ -101,8 +106,9 @@ where TBackend: TransactionBackend + 'static cancellation_receiver: Some(cancellation_receiver), dest_pubkey, amount, + fee_per_gram, message, - sender_protocol, + service_request_reply_channel, stage, } } @@ -116,7 +122,8 @@ where TBackend: TransactionBackend + 'static match self.stage { TransactionSendProtocolStage::Initial => { - self.initial_send_transaction().await?; + let sender_protocol = self.prepare_transaction().await?; + self.initial_send_transaction(sender_protocol).await?; self.wait_for_reply().await?; }, TransactionSendProtocolStage::WaitForReply => { @@ -127,8 +134,64 @@ where TBackend: TransactionBackend + 'static Ok(self.id) } - async fn initial_send_transaction(&mut self) -> Result<(), TransactionServiceProtocolError> { - if !self.sender_protocol.is_single_round_message_ready() { + async fn prepare_transaction(&mut self) -> Result { + let service_reply_channel = match self.service_request_reply_channel.take() { + Some(src) => src, + None => { + error!( + target: LOG_TARGET, + "Service Reply Channel not provided for new Send Transaction Protocol" + ); + return Err(TransactionServiceProtocolError::new( + self.id, + TransactionServiceError::ProtocolChannelError, + )); + }, + }; + + match self + .resources + .output_manager_service + .prepare_transaction_to_send( + self.id, + self.amount, + self.fee_per_gram, + None, + self.message.clone(), + script!(Nop), + ) + .await + { + Ok(sp) => { + let _ = service_reply_channel + .send(Ok(TransactionServiceResponse::TransactionSent(self.id))) + .map_err(|e| { + warn!(target: LOG_TARGET, "Failed to send service reply"); + e + }); + Ok(sp) + }, + Err(e) => { + let error_string = e.to_string(); + let _ = service_reply_channel + .send(Err(TransactionServiceError::from(e))) + .map_err(|e| { + warn!(target: LOG_TARGET, "Failed to send service reply"); + e + }); + Err(TransactionServiceProtocolError::new( + self.id, + TransactionServiceError::ServiceError(error_string), + )) + }, + } + } + + async fn initial_send_transaction( + &mut self, + mut sender_protocol: SenderTransactionProtocol, + ) -> Result<(), TransactionServiceProtocolError> { + if !sender_protocol.is_single_round_message_ready() { error!(target: LOG_TARGET, "Sender Transaction Protocol is in an invalid state"); return Err(TransactionServiceProtocolError::new( self.id, @@ -136,8 +199,7 @@ where TBackend: TransactionBackend + 'static )); } - let msg = self - .sender_protocol + let msg = sender_protocol .build_single_round_message() .map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?; let tx_id = msg.tx_id; @@ -161,8 +223,7 @@ where TBackend: TransactionBackend + 'static .await .map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?; - let fee = self - .sender_protocol + let fee = sender_protocol .get_fee_amount() .map_err(|e| TransactionServiceProtocolError::new(self.id, TransactionServiceError::from(e)))?; let outbound_tx = OutboundTransaction::new( @@ -170,7 +231,7 @@ where TBackend: TransactionBackend + 'static self.dest_pubkey.clone(), self.amount, fee, - self.sender_protocol.clone(), + sender_protocol.clone(), TransactionStatus::Pending, self.message.clone(), Utc::now().naive_utc(), diff --git a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs index d94e4a22df..d0b2f7f6ac 100644 --- a/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs +++ b/base_layer/wallet/src/transaction_service/protocols/transaction_validation_protocol.rs @@ -258,7 +258,11 @@ where TBackend: TransactionBackend + 'static }; let mut client = match base_node_connection - .connect_rpc_using_builder(BaseNodeWalletRpcClient::builder().with_deadline(self.timeout)) + .connect_rpc_using_builder( + BaseNodeWalletRpcClient::builder() + .with_deadline(self.timeout) + .with_handshake_timeout(self.timeout), + ) .await { Ok(c) => c, diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index 2068d96eac..c30fb7f412 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -65,8 +65,6 @@ use std::{ }; use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeIdentity, types::CommsPublicKey}; use tari_comms_dht::outbound::OutboundMessageRequester; -#[cfg(feature = "test_harness")] -use tari_core::transactions::{tari_amount::uT, types::BlindingFactor}; use tari_core::{ crypto::keys::SecretKey, proto::base_node as base_node_proto, @@ -286,20 +284,17 @@ where let (request, reply_tx) = request_context.split(); let event = format!("Handling Service API Request ({})", request); trace!(target: LOG_TARGET, "{}", event); - let response = self.handle_request(request, + let _ = self.handle_request(request, &mut send_transaction_protocol_handles, &mut receive_transaction_protocol_handles, &mut transaction_broadcast_protocol_handles, &mut coinbase_transaction_monitoring_protocol_handles, &mut transaction_validation_protocol_handles, + reply_tx, ).await.map_err(|e| { warn!(target: LOG_TARGET, "Error handling request: {:?}", e); e }); - let _ = reply_tx.send(response).map_err(|e| { - warn!(target: LOG_TARGET, "Failed to send reply"); - e - }); let finish = Instant::now(); trace!(target: LOG_TARGET, "{}, processed in {}ms", @@ -377,7 +372,11 @@ where "Handling Transaction Finalized Message, Trace: {}", msg.dht_header.message_tag.as_value() ); - let result = self.accept_finalized_transaction(origin_public_key, inner_msg, ).await; + let result = self.accept_finalized_transaction( + origin_public_key, + inner_msg, + &mut receive_transaction_protocol_handles, + ).await; match result { Err(TransactionServiceError::TransactionDoesNotExistError) => { @@ -510,20 +509,26 @@ where transaction_validation_join_handles: &mut FuturesUnordered< JoinHandle>, >, - ) -> Result { + reply_channel: oneshot::Sender>, + ) -> Result<(), TransactionServiceError> { + let mut reply_channel = Some(reply_channel); + trace!(target: LOG_TARGET, "Handling Service Request: {}", request); - match request { - TransactionServiceRequest::SendTransaction(dest_pubkey, amount, fee_per_gram, message) => self - .send_transaction( + let response = match request { + TransactionServiceRequest::SendTransaction(dest_pubkey, amount, fee_per_gram, message) => { + let rp = reply_channel.take().expect("Cannot be missing"); + self.send_transaction( dest_pubkey, amount, fee_per_gram, message, send_transaction_join_handles, transaction_broadcast_join_handles, + rp, ) - .await - .map(TransactionServiceResponse::TransactionSent), + .await?; + return Ok(()); + }, TransactionServiceRequest::SendOneSidedTransaction(dest_pubkey, amount, fee_per_gram, message) => self .send_one_sided_transaction( dest_pubkey, @@ -591,33 +596,6 @@ where .generate_coinbase_transaction(reward, fees, block_height, coinbase_monitoring_join_handles) .await .map(|tx| TransactionServiceResponse::CoinbaseTransactionGenerated(Box::new(tx))), - #[cfg(feature = "test_harness")] - TransactionServiceRequest::CompletePendingOutboundTransaction(completed_transaction) => { - self.complete_pending_outbound_transaction(completed_transaction) - .await?; - Ok(TransactionServiceResponse::CompletedPendingTransaction) - }, - #[cfg(feature = "test_harness")] - TransactionServiceRequest::FinalizePendingInboundTransaction(tx_id) => { - self.finalize_received_test_transaction(tx_id).await?; - Ok(TransactionServiceResponse::FinalizedPendingInboundTransaction) - }, - #[cfg(feature = "test_harness")] - TransactionServiceRequest::AcceptTestTransaction((tx_id, amount, source_pubkey, handle)) => { - self.receive_test_transaction(tx_id, amount, source_pubkey, handle) - .await?; - Ok(TransactionServiceResponse::AcceptedTestTransaction) - }, - #[cfg(feature = "test_harness")] - TransactionServiceRequest::BroadcastTransaction(tx_id) => { - self.broadcast_transaction(tx_id).await?; - Ok(TransactionServiceResponse::TransactionBroadcast) - }, - #[cfg(feature = "test_harness")] - TransactionServiceRequest::MineTransaction(tx_id) => { - self.mine_transaction(tx_id).await?; - Ok(TransactionServiceResponse::TransactionMined) - }, TransactionServiceRequest::SetLowPowerMode => { self.set_power_mode(PowerMode::Low).await?; Ok(TransactionServiceResponse::LowPowerModeSet) @@ -664,7 +642,16 @@ where .set_completed_transaction_validity(tx_id, validity) .await .map(|_| TransactionServiceResponse::CompletedTransactionValidityChanged), + }; + + // If the individual handlers did not already send the API response then do it here. + if let Some(rp) = reply_channel { + let _ = rp.send(response).map_err(|e| { + warn!(target: LOG_TARGET, "Failed to send reply"); + e + }); } + Ok(()) } /// Sends a new transaction to a recipient @@ -682,7 +669,10 @@ where transaction_broadcast_join_handles: &mut FuturesUnordered< JoinHandle>, >, - ) -> Result { + reply_channel: oneshot::Sender>, + ) -> Result<(), TransactionServiceError> { + let tx_id = OsRng.next_u64(); + // If we're paying ourselves, let's complete and submit the transaction immediately if self.node_identity.public_key() == &dest_pubkey { debug!( @@ -690,9 +680,9 @@ where "Received transaction with spend-to-self transaction" ); - let (tx_id, fee, transaction) = self + let (fee, transaction) = self .output_manager_service - .create_pay_to_self_transaction(amount, fee_per_gram, None, message.clone()) + .create_pay_to_self_transaction(tx_id, amount, fee_per_gram, None, message.clone()) .await?; // Notify that the transaction was successfully resolved. @@ -718,22 +708,22 @@ where ) .await?; - return Ok(tx_id); - } - - let sender_protocol = self - .output_manager_service - .prepare_transaction_to_send(amount, fee_per_gram, None, message.clone(), script!(Nop)) - .await?; + let _ = reply_channel + .send(Ok(TransactionServiceResponse::TransactionSent(tx_id))) + .map_err(|e| { + warn!(target: LOG_TARGET, "Failed to send service reply"); + e + }); - let tx_id = sender_protocol.get_tx_id()?; + return Ok(()); + } let (tx_reply_sender, tx_reply_receiver) = mpsc::channel(100); let (cancellation_sender, cancellation_receiver) = oneshot::channel(); self.pending_transaction_reply_senders.insert(tx_id, tx_reply_sender); - self.send_transaction_cancellation_senders .insert(tx_id, cancellation_sender); + let protocol = TransactionSendProtocol::new( tx_id, self.resources.clone(), @@ -741,15 +731,16 @@ where cancellation_receiver, dest_pubkey, amount, + fee_per_gram, message, - sender_protocol, + Some(reply_channel), TransactionSendProtocolStage::Initial, ); let join_handle = tokio::spawn(protocol.execute()); join_handles.push(join_handle); - Ok(tx_id) + Ok(()) } /// Sends a one side payment transaction to a recipient @@ -774,11 +765,13 @@ where )); } - // Prepare sender part of the transaction + let tx_id = OsRng.next_u64(); + // Prepare sender part of the transaction let mut stp = self .output_manager_service .prepare_transaction_to_send( + tx_id, amount, fee_per_gram, None, @@ -786,7 +779,6 @@ where script!(PushPubKey(Box::new(dest_pubkey.clone()))), ) .await?; - let tx_id = stp.get_tx_id()?; // This call is needed to advance the state from `SingleRoundMessageReady` to `SingleRoundMessageReady`, // but the returned value is not used @@ -1170,8 +1162,9 @@ where cancellation_receiver, tx.destination_public_key, tx.amount, + tx.fee, tx.message, - tx.sender_protocol, + None, TransactionSendProtocolStage::WaitForReply, ); @@ -1300,6 +1293,7 @@ where &mut self, source_pubkey: CommsPublicKey, finalized_transaction: proto::TransactionFinalizedMessage, + join_handles: &mut FuturesUnordered>>, ) -> Result<(), TransactionServiceError> { let tx_id = finalized_transaction.tx_id; let transaction: Transaction = finalized_transaction @@ -1317,7 +1311,39 @@ where })?; let sender = match self.finalized_transaction_senders.get_mut(&tx_id) { - None => return Err(TransactionServiceError::TransactionDoesNotExistError), + None => { + // First check if perhaps we know about this inbound transaction but it was cancelled + match self.db.get_cancelled_pending_inbound_transaction(tx_id).await { + Ok(t) => { + if t.source_public_key != source_pubkey { + debug!( + target: LOG_TARGET, + "Received Finalized Transaction for a cancelled pending Inbound Transaction (TxId: \ + {}) but Source Public Key did not match", + tx_id + ); + return Err(TransactionServiceError::TransactionDoesNotExistError); + } + info!( + target: LOG_TARGET, + "Received Finalized Transaction for a cancelled pending Inbound Transaction (TxId: {}). \ + Restarting protocol", + tx_id + ); + self.db.uncancel_pending_transaction(tx_id).await?; + self.output_manager_service + .reinstate_cancelled_inbound_transaction(tx_id) + .await?; + + self.restart_receive_transaction_protocol(tx_id, source_pubkey.clone(), join_handles); + match self.finalized_transaction_senders.get_mut(&tx_id) { + None => return Err(TransactionServiceError::TransactionDoesNotExistError), + Some(s) => s, + } + }, + Err(_) => return Err(TransactionServiceError::TransactionDoesNotExistError), + } + }, Some(s) => s, }; @@ -1401,34 +1427,43 @@ where ) -> Result<(), TransactionServiceError> { let inbound_txs = self.db.get_pending_inbound_transactions().await?; for (tx_id, tx) in inbound_txs { - if !self.pending_transaction_reply_senders.contains_key(&tx_id) { - debug!( - target: LOG_TARGET, - "Restarting listening for Transaction Finalize for Pending Inbound Transaction TxId: {}", tx_id - ); - let (tx_finalized_sender, tx_finalized_receiver) = mpsc::channel(100); - let (cancellation_sender, cancellation_receiver) = oneshot::channel(); - self.finalized_transaction_senders.insert(tx_id, tx_finalized_sender); - self.receiver_transaction_cancellation_senders - .insert(tx_id, cancellation_sender); - let protocol = TransactionReceiveProtocol::new( - tx_id, - tx.source_public_key, - TransactionSenderMessage::None, - TransactionReceiveProtocolStage::WaitForFinalize, - self.resources.clone(), - tx_finalized_receiver, - cancellation_receiver, - ); - - let join_handle = tokio::spawn(protocol.execute()); - join_handles.push(join_handle); - } + self.restart_receive_transaction_protocol(tx_id, tx.source_public_key.clone(), join_handles); } Ok(()) } + fn restart_receive_transaction_protocol( + &mut self, + tx_id: TxId, + source_public_key: CommsPublicKey, + join_handles: &mut FuturesUnordered>>, + ) { + if !self.pending_transaction_reply_senders.contains_key(&tx_id) { + debug!( + target: LOG_TARGET, + "Restarting listening for Transaction Finalize for Pending Inbound Transaction TxId: {}", tx_id + ); + let (tx_finalized_sender, tx_finalized_receiver) = mpsc::channel(100); + let (cancellation_sender, cancellation_receiver) = oneshot::channel(); + self.finalized_transaction_senders.insert(tx_id, tx_finalized_sender); + self.receiver_transaction_cancellation_senders + .insert(tx_id, cancellation_sender); + let protocol = TransactionReceiveProtocol::new( + tx_id, + source_public_key, + TransactionSenderMessage::None, + TransactionReceiveProtocolStage::WaitForFinalize, + self.resources.clone(), + tx_finalized_receiver, + cancellation_receiver, + ); + + let join_handle = tokio::spawn(protocol.execute()); + join_handles.push(join_handle); + } + } + /// Add a base node public key to the list that will be used to broadcast transactions and monitor the base chain /// for the presence of spendable outputs. If this is the first time the base node public key is set do the initial /// mempool broadcast @@ -1996,288 +2031,6 @@ where Ok(()) } - - /// This function is only available for testing by the client of LibWallet. It simulates a receiver accepting and - /// replying to a Pending Outbound Transaction. This results in that transaction being "completed" and it's status - /// set to `Broadcast` which indicated it is in a base_layer mempool. - #[cfg(feature = "test_harness")] - pub async fn complete_pending_outbound_transaction( - &mut self, - completed_tx: CompletedTransaction, - ) -> Result<(), TransactionServiceError> { - self.db - .complete_outbound_transaction(completed_tx.tx_id, completed_tx.clone()) - .await?; - Ok(()) - } - - /// This function is only available for testing by the client of LibWallet. This function will simulate the process - /// when a completed transaction is broadcast in a mempool on the base layer. The function will update the status of - /// the completed transaction. - #[cfg(feature = "test_harness")] - pub async fn broadcast_transaction(&mut self, tx_id: TxId) -> Result<(), TransactionServiceError> { - let completed_txs = self.db.get_completed_transactions().await?; - completed_txs.get(&tx_id).ok_or_else(|| { - TransactionServiceError::TestHarnessError("Could not find Completed TX to broadcast.".to_string()) - })?; - - self.db.broadcast_completed_transaction(tx_id).await?; - - let _ = self - .event_publisher - .send(Arc::new(TransactionEvent::TransactionBroadcast(tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event, usually because there are no subscribers: {:?}", - e - ); - e - }); - - Ok(()) - } - - /// This function is only available for testing by the client of LibWallet. This function will simulate the process - /// when a completed transaction is detected as mined on the base layer. The function will update the status of the - /// completed transaction AND complete the transaction on the Output Manager Service which will update the status of - /// the outputs - #[cfg(feature = "test_harness")] - pub async fn mine_transaction(&mut self, tx_id: TxId) -> Result<(), TransactionServiceError> { - let completed_txs = self.db.get_completed_transactions().await?; - let _found_tx = completed_txs.get(&tx_id).ok_or_else(|| { - TransactionServiceError::TestHarnessError("Could not find Completed TX to mine.".to_string()) - })?; - - let pending_tx_outputs = self.output_manager_service.get_pending_transactions().await?; - let pending_tx = pending_tx_outputs.get(&tx_id).ok_or_else(|| { - TransactionServiceError::TestHarnessError("Could not find Pending TX to complete.".to_string()) - })?; - - self.output_manager_service - .confirm_transaction( - tx_id, - pending_tx - .outputs_to_be_spent - .iter() - .map(|o| { - o.unblinded_output - .as_transaction_input(&self.resources.factories.commitment) - .expect("Should be able to make transaction input") - }) - .collect(), - pending_tx - .outputs_to_be_received - .iter() - .map(|o| { - o.unblinded_output - .as_transaction_output(&self.resources.factories) - .expect("Failed to convert to Transaction Output") - }) - .collect(), - ) - .await?; - - self.db.mine_completed_transaction(tx_id).await?; - - let _ = self - .event_publisher - .send(Arc::new(TransactionEvent::TransactionMined(tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event, usually because there are no subscribers: {:?}", - e - ); - e - }); - - Ok(()) - } - - /// This function is only available for testing by the client of LibWallet. This function simulates an external - /// wallet sending a transaction to this wallet which will become a PendingInboundTransaction - #[cfg(feature = "test_harness")] - pub async fn receive_test_transaction( - &mut self, - _tx_id: TxId, - amount: MicroTari, - source_public_key: CommsPublicKey, - handle: tokio::runtime::Handle, - ) -> Result<(), TransactionServiceError> { - use crate::{ - base_node_service::{handle::BaseNodeServiceHandle, mock_base_node_service::MockBaseNodeService}, - output_manager_service::{ - config::OutputManagerServiceConfig, - error::OutputManagerError, - service::OutputManagerService, - storage::{database::OutputManagerDatabase, sqlite_db::OutputManagerSqliteDatabase}, - }, - storage::sqlite_utilities::run_migration_and_create_sqlite_connection, - transaction_service::{handle::TransactionServiceHandle, storage::models::InboundTransaction}, - }; - use tari_comms::types::CommsSecretKey; - use tari_core::consensus::ConsensusConstantsBuilder; - use tari_p2p::Network; - use tari_test_utils::random; - use tempfile::tempdir; - - let (_sender, receiver) = reply_channel::unbounded(); - let (oms_event_publisher, _oms_event_subscriber) = broadcast::channel(100); - let (ts_request_sender, _ts_request_receiver) = reply_channel::unbounded(); - let (event_publisher, _) = broadcast::channel(100); - let ts_handle = TransactionServiceHandle::new(ts_request_sender, event_publisher.clone()); - let constants = ConsensusConstantsBuilder::new(Network::Weatherwax).build(); - let shutdown_signal = self.resources.shutdown_signal.clone(); - let (sender, receiver_bns) = reply_channel::unbounded(); - let (event_publisher_bns, _) = broadcast::channel(100); - let (connectivity_tx_publisher, _) = broadcast::channel(100); - let (connectivity_tx, _) = mpsc::channel(20); - - let connectivity_manager = ConnectivityRequester::new(connectivity_tx, connectivity_tx_publisher); - - let basenode_service_handle = BaseNodeServiceHandle::new(sender, event_publisher_bns); - let mut mock_base_node_service = MockBaseNodeService::new(receiver_bns, shutdown_signal.clone()); - mock_base_node_service.set_default_base_node_state(); - - let db_name = format!("{}.sqlite3", random::string(8).as_str()); - let db_tempdir = tempdir().unwrap(); - let db_folder = db_tempdir.path().to_str().unwrap().to_string(); - let db_path = format!("{}/{}", db_folder, db_name); - let connection = run_migration_and_create_sqlite_connection(&db_path).unwrap(); - let backend = OutputManagerSqliteDatabase::new(connection, None); - - handle.spawn(mock_base_node_service.run()); - let mut fake_oms = OutputManagerService::new( - OutputManagerServiceConfig::default(), - ts_handle, - receiver, - OutputManagerDatabase::new(backend), - oms_event_publisher, - self.resources.factories.clone(), - constants, - shutdown_signal, - basenode_service_handle, - connectivity_manager, - CommsSecretKey::default(), - ) - .await?; - - use crate::testnet_utils::make_input; - let (_ti, uo) = make_input(amount + 100000 * uT, &self.resources.factories); - - fake_oms.add_output(None, uo).await?; - - let mut stp = fake_oms - .prepare_transaction_to_send(amount, MicroTari::from(25), None, "".to_string(), script!(Nop)) - .await?; - - let msg = stp.build_single_round_message()?; - let proto_msg = proto::TransactionSenderMessage::single(msg.into()); - let sender_message: TransactionSenderMessage = proto_msg - .try_into() - .map_err(TransactionServiceError::InvalidMessageError)?; - - let (tx_id, _amount) = match sender_message.clone() { - TransactionSenderMessage::Single(data) => (data.tx_id, data.amount), - _ => { - return Err(TransactionServiceError::OutputManagerError( - OutputManagerError::InvalidSenderMessage, - )) - }, - }; - - let rtp = self - .output_manager_service - .get_recipient_transaction(sender_message) - .await?; - - let inbound_transaction = InboundTransaction::new( - tx_id, - source_public_key, - amount, - rtp, - TransactionStatus::Pending, - "".to_string(), - Utc::now().naive_utc(), - ); - - self.db - .add_pending_inbound_transaction(tx_id, inbound_transaction.clone()) - .await?; - - let _ = self - .event_publisher - .send(Arc::new(TransactionEvent::ReceivedTransaction(tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event, usually because there are no subscribers: {:?}", - e - ); - e - }); - - Ok(()) - } - - /// This function is only available for testing by the client of LibWallet. This function simulates an external - /// wallet sending a transaction to this wallet which will become a PendingInboundTransaction - #[cfg(feature = "test_harness")] - pub async fn finalize_received_test_transaction(&mut self, tx_id: TxId) -> Result<(), TransactionServiceError> { - use tari_core::transactions::{transaction::KernelBuilder, types::Signature}; - use tari_crypto::commitment::HomomorphicCommitmentFactory; - - let factories = CryptoFactories::default(); - - let inbound_txs = self.db.get_pending_inbound_transactions().await?; - - let found_tx = inbound_txs.get(&tx_id).ok_or_else(|| { - TransactionServiceError::TestHarnessError("Could not find Pending Inbound TX to finalize.".to_string()) - })?; - - let kernel = KernelBuilder::new() - .with_excess(&factories.commitment.zero()) - .with_signature(&Signature::default()) - .build() - .unwrap(); - - let completed_transaction = CompletedTransaction::new( - tx_id, - found_tx.source_public_key.clone(), - self.node_identity.public_key().clone(), - found_tx.amount, - MicroTari::from(2000), // a placeholder fee for this test function - Transaction::new( - Vec::new(), - Vec::new(), - vec![kernel], - BlindingFactor::default(), - BlindingFactor::default(), - ), - TransactionStatus::Completed, - found_tx.message.clone(), - found_tx.timestamp, - TransactionDirection::Inbound, - None, - ); - - self.db - .complete_inbound_transaction(tx_id, completed_transaction.clone()) - .await?; - let _ = self - .event_publisher - .send(Arc::new(TransactionEvent::ReceivedFinalizedTransaction(tx_id))) - .map_err(|e| { - trace!( - target: LOG_TARGET, - "Error sending event, usually because there are no subscribers: {:?}", - e - ); - e - }); - Ok(()) - } } /// This struct is a collection of the common resources that a protocol in the service requires. diff --git a/base_layer/wallet/src/transaction_service/storage/database.rs b/base_layer/wallet/src/transaction_service/storage/database.rs index b6844a7c44..aaad0b0618 100644 --- a/base_layer/wallet/src/transaction_service/storage/database.rs +++ b/base_layer/wallet/src/transaction_service/storage/database.rs @@ -34,8 +34,6 @@ use crate::{ }, }; use aes_gcm::Aes256Gcm; -#[cfg(feature = "test_harness")] -use chrono::NaiveDateTime; use chrono::Utc; use log::*; @@ -90,8 +88,12 @@ pub trait TransactionBackend: Send + Sync + Clone { fn set_completed_transaction_validity(&self, tx_id: TxId, valid: bool) -> Result<(), TransactionStorageError>; /// Cancel Completed transaction, this will update the transaction status fn cancel_completed_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; - /// Cancel Completed transaction, this will update the transaction status - fn cancel_pending_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError>; + /// Set cancellation on Pending transaction, this will update the transaction status + fn set_pending_transaction_cancellation_status( + &self, + tx_id: TxId, + cancelled: bool, + ) -> Result<(), TransactionStorageError>; /// Search all pending transaction for the provided tx_id and if it exists return the public key of the counterparty fn get_pending_transaction_counterparty_pub_key_by_tx_id( &self, @@ -107,13 +109,6 @@ pub trait TransactionBackend: Send + Sync + Clone { block_height: u64, amount: MicroTari, ) -> Result, TransactionStorageError>; - /// Update a completed transactions timestamp for use in test data generation - #[cfg(feature = "test_harness")] - fn update_completed_transaction_timestamp( - &self, - tx_id: TxId, - timestamp: NaiveDateTime, - ) -> Result<(), TransactionStorageError>; /// Apply encryption to the backend. fn apply_encryption(&self, cipher: Aes256Gcm) -> Result<(), TransactionStorageError>; /// Remove encryption from the backend. @@ -563,7 +558,15 @@ where T: TransactionBackend + 'static pub async fn cancel_pending_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { let db_clone = self.db.clone(); - tokio::task::spawn_blocking(move || db_clone.cancel_pending_transaction(tx_id)) + tokio::task::spawn_blocking(move || db_clone.set_pending_transaction_cancellation_status(tx_id, true)) + .await + .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; + Ok(()) + } + + pub async fn uncancel_pending_transaction(&self, tx_id: TxId) -> Result<(), TransactionStorageError> { + let db_clone = self.db.clone(); + tokio::task::spawn_blocking(move || db_clone.set_pending_transaction_cancellation_status(tx_id, false)) .await .map_err(|err| TransactionStorageError::BlockingTaskSpawnError(err.to_string()))??; Ok(()) diff --git a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs index 841eb96510..0cd55fc7f2 100644 --- a/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs +++ b/base_layer/wallet/src/transaction_service/storage/sqlite_db.rs @@ -576,16 +576,20 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(()) } - fn cancel_pending_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { + fn set_pending_transaction_cancellation_status( + &self, + tx_id: u64, + cancelled: bool, + ) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); - match InboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { + match InboundTransactionSql::find(tx_id, &(*conn)) { Ok(v) => { - v.cancel(&(*conn))?; + v.set_cancelled(cancelled, &(*conn))?; }, Err(_) => { - match OutboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { + match OutboundTransactionSql::find(tx_id, &(*conn)) { Ok(v) => { - v.cancel(&(*conn))?; + v.set_cancelled(cancelled, &(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValuesNotFound); @@ -636,34 +640,6 @@ impl TransactionBackend for TransactionServiceSqliteDatabase { Ok(()) } - #[cfg(feature = "test_harness")] - fn update_completed_transaction_timestamp( - &self, - tx_id: u64, - timestamp: NaiveDateTime, - ) -> Result<(), TransactionStorageError> { - let conn = self.database_connection.acquire_lock(); - - if let Ok(tx) = CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { - tx.update( - UpdateCompletedTransactionSql::from(UpdateCompletedTransaction { - status: None, - timestamp: Some(timestamp), - cancelled: None, - direction: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, - }), - &(*conn), - )?; - } - - Ok(()) - } - fn apply_encryption(&self, cipher: Aes256Gcm) -> Result<(), TransactionStorageError> { let mut current_cipher = acquire_write_lock!(self.cipher); @@ -1019,10 +995,10 @@ impl InboundTransactionSql { Ok(()) } - pub fn cancel(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateInboundTransactionSql { - cancelled: Some(1i32), + cancelled: Some(cancelled as i32), direct_send_success: None, receiver_protocol: None, send_count: None, @@ -1202,10 +1178,10 @@ impl OutboundTransactionSql { Ok(()) } - pub fn cancel(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { + pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateOutboundTransactionSql { - cancelled: Some(1i32), + cancelled: Some(cancelled as i32), direct_send_success: None, sender_protocol: None, send_count: None, @@ -1674,8 +1650,6 @@ impl From for UpdateCompletedTransactionSql { #[cfg(test)] mod test { - #[cfg(feature = "test_harness")] - use crate::transaction_service::storage::sqlite_db::UpdateCompletedTransactionSql; use crate::{ storage::sqlite_utilities::WalletDbConnection, transaction_service::storage::{ @@ -1929,7 +1903,7 @@ mod test { .commit(&conn) .is_err()); - CompletedTransactionSql::try_from(completed_tx2.clone()) + CompletedTransactionSql::try_from(completed_tx2) .unwrap() .commit(&conn) .unwrap(); @@ -1986,23 +1960,34 @@ mod test { assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); InboundTransactionSql::try_from(inbound_tx1.clone()) .unwrap() - .cancel(&conn) + .set_cancelled(true, &conn) .unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_ok()); - + InboundTransactionSql::try_from(inbound_tx1.clone()) + .unwrap() + .set_cancelled(false, &conn) + .unwrap(); + assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); + assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_ok()); OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() .commit(&conn) .unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); - OutboundTransactionSql::try_from(outbound_tx1) + OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() - .cancel(&conn) + .set_cancelled(true, &conn) .unwrap(); - assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_err()); - assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_ok()); + assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_err()); + assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_ok()); + OutboundTransactionSql::try_from(outbound_tx1.clone()) + .unwrap() + .set_cancelled(false, &conn) + .unwrap(); + assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); + assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_ok()); CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() @@ -2096,26 +2081,6 @@ mod test { assert!(coinbase_txs.iter().any(|c| c.tx_id == 101)); assert!(coinbase_txs.iter().any(|c| c.tx_id == 102)); assert!(!coinbase_txs.iter().any(|c| c.tx_id == 103)); - - #[cfg(feature = "test_harness")] - CompletedTransactionSql::find_by_cancelled(completed_tx2.tx_id, false, &conn) - .unwrap() - .update( - UpdateCompletedTransactionSql { - status: Some(TransactionStatus::MinedUnconfirmed as i32), - timestamp: None, - cancelled: None, - direction: None, - transaction_protocol: None, - send_count: None, - last_send_timestamp: None, - valid: None, - confirmations: None, - mined_height: None, - }, - &conn, - ) - .unwrap(); } #[test] diff --git a/base_layer/wallet/src/utxo_scanner_service/handle.rs b/base_layer/wallet/src/utxo_scanner_service/handle.rs index b735c7e904..c7a2e4fdeb 100644 --- a/base_layer/wallet/src/utxo_scanner_service/handle.rs +++ b/base_layer/wallet/src/utxo_scanner_service/handle.rs @@ -49,6 +49,7 @@ pub enum UtxoScannerEvent { ScanningRoundFailed { num_retries: usize, retry_limit: usize, + error: String, }, /// Progress of the recovery process (current_block, current_chain_height) Progress { diff --git a/base_layer/wallet/src/utxo_scanner_service/mod.rs b/base_layer/wallet/src/utxo_scanner_service/mod.rs index 41d5eb379a..b0b475b96b 100644 --- a/base_layer/wallet/src/utxo_scanner_service/mod.rs +++ b/base_layer/wallet/src/utxo_scanner_service/mod.rs @@ -105,7 +105,7 @@ where T: WalletBackend + 'static let scanning_service = UtxoScannerService::::builder() .with_peers(vec![]) - .with_retry_limit(10) + .with_retry_limit(2) .with_scanning_interval(interval) .with_mode(UtxoScannerMode::Scanning) .build_with_resources( diff --git a/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs b/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs index f8467dc8f8..6aaa38363f 100644 --- a/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs +++ b/base_layer/wallet/src/utxo_scanner_service/utxo_scanning.rs @@ -307,6 +307,11 @@ where TBackend: WalletBackend + 'static } let num_scanned = self.scan_utxos(&mut client, start_index, tip_header).await?; + if num_scanned == 0 { + return Err(UtxoScannerError::UtxoScanningError( + "Peer returned 0 UTXOs to scan".to_string(), + )); + } debug!( target: LOG_TARGET, "Scanning round completed UTXO #{} in {:.2?} ({} scanned)", @@ -579,7 +584,7 @@ where TBackend: WalletBackend + 'static match self.get_next_peer() { Some(peer) => match self.attempt_sync(peer.clone()).await { Ok((total_scanned, final_utxo_pos, elapsed)) => { - debug!(target: LOG_TARGET, "Scanning to UTXO #{}", final_utxo_pos); + debug!(target: LOG_TARGET, "Scanned to UTXO #{}", final_utxo_pos); self.finalize(total_scanned, final_utxo_pos, elapsed).await?; return Ok(()); }, @@ -591,6 +596,7 @@ where TBackend: WalletBackend + 'static self.publish_event(UtxoScannerEvent::ScanningRoundFailed { num_retries: self.num_retries, retry_limit: self.retry_limit, + error: e.to_string(), }); continue; }, @@ -599,6 +605,7 @@ where TBackend: WalletBackend + 'static self.publish_event(UtxoScannerEvent::ScanningRoundFailed { num_retries: self.num_retries, retry_limit: self.retry_limit, + error: "No new peers to try after this round".to_string(), }); if self.num_retries >= self.retry_limit { @@ -679,7 +686,7 @@ where TBackend: WalletBackend + 'static event_sender: self.event_sender.clone(), retry_limit: self.retry_limit, peer_index: 0, - num_retries: 0, + num_retries: 1, mode: self.mode.clone(), run_flag: self.is_running.clone(), } @@ -709,21 +716,34 @@ where TBackend: WalletBackend + 'static let mut shutdown = self.shutdown_signal.clone(); let start_at = Instant::now() + Duration::from_secs(1); let mut work_interval = time::interval_at(start_at.into(), self.scan_for_utxo_interval).fuse(); + let mut previous = Instant::now(); loop { futures::select! { _ = work_interval.select_next_some() => { - let running_flag = self.is_running.clone(); - if !running_flag.load(Ordering::SeqCst) { - let task = self.create_task(); - debug!(target: LOG_TARGET, "UTXO scanning service starting scan for utxos"); - task::spawn(async move { - if let Err(err) = task.run().await { - error!(target: LOG_TARGET, "Error scanning UTXOs: {}", err); - } - //we make sure the flag is set to false here - running_flag.store(false, Ordering::Relaxed); - }); + // This bit of code prevents bottled up tokio interval events to be fired successively for the edge + // case where a computer wakes up from sleep. + if start_at.elapsed() > self.scan_for_utxo_interval && + previous.elapsed() < self.scan_for_utxo_interval.mul_f32(0.9) + { + debug!( + target: LOG_TARGET, + "UTXO scanning work interval event fired too quickly, not running the task" + ); + } else { + let running_flag = self.is_running.clone(); + if !running_flag.load(Ordering::SeqCst) { + let task = self.create_task(); + debug!(target: LOG_TARGET, "UTXO scanning service starting scan for utxos"); + task::spawn(async move { + if let Err(err) = task.run().await { + error!(target: LOG_TARGET, "Error scanning UTXOs: {}", err); + } + //we make sure the flag is set to false here + running_flag.store(false, Ordering::Relaxed); + }); + } } + previous = Instant::now(); }, request_context = request_stream.select_next_some() => { trace!(target: LOG_TARGET, "Handling Service API Request"); diff --git a/base_layer/wallet/src/wallet.rs b/base_layer/wallet/src/wallet.rs index aa13c4d561..1f91f3d625 100644 --- a/base_layer/wallet/src/wallet.rs +++ b/base_layer/wallet/src/wallet.rs @@ -23,6 +23,7 @@ use crate::{ base_node_service::{handle::BaseNodeServiceHandle, BaseNodeServiceInitializer}, config::{WalletConfig, KEY_MANAGER_COMMS_SECRET_KEY_BRANCH_KEY}, + connectivity_service::{WalletConnectivityHandle, WalletConnectivityInitializer}, contacts_service::{handle::ContactsServiceHandle, storage::database::ContactsBackend, ContactsServiceInitializer}, error::WalletError, output_manager_service::{ @@ -95,13 +96,12 @@ where pub store_and_forward_requester: StoreAndForwardRequester, pub output_manager_service: OutputManagerHandle, pub transaction_service: TransactionServiceHandle, + pub wallet_connectivity: WalletConnectivityHandle, pub contacts_service: ContactsServiceHandle, pub base_node_service: BaseNodeServiceHandle, pub utxo_scanner_service: UtxoScannerHandle, pub db: WalletDatabase, pub factories: CryptoFactories, - #[cfg(feature = "test_harness")] - pub transaction_backend: U, _u: PhantomData, _v: PhantomData, _w: PhantomData, @@ -137,8 +137,6 @@ where comms_config.node_identity = node_identity.clone(); let bn_service_db = wallet_database.clone(); - #[cfg(feature = "test_harness")] - let transaction_backend_handle = transaction_backend.clone(); let factories = config.clone().factories; let (publisher, subscription_factory) = @@ -183,9 +181,10 @@ where )) .add_initializer(ContactsServiceInitializer::new(contacts_backend)) .add_initializer(BaseNodeServiceInitializer::new( - config.base_node_service_config, + config.base_node_service_config.clone(), bn_service_db, )) + .add_initializer(WalletConnectivityInitializer::new(config.base_node_service_config)) .add_initializer(UtxoScannerServiceInitializer::new( config.scan_for_utxo_interval, wallet_database.clone(), @@ -208,6 +207,7 @@ where let base_node_service_handle = handles.expect_handle::(); let utxo_scanner_service_handle = handles.expect_handle::(); + let wallet_connectivity = handles.expect_handle::(); persist_one_sided_payment_script_for_node_identity(&mut output_manager_handle, comms.node_identity()) .await @@ -234,10 +234,9 @@ where contacts_service: contacts_handle, base_node_service: base_node_service_handle, utxo_scanner_service: utxo_scanner_service_handle, + wallet_connectivity, db: wallet_database, factories, - #[cfg(feature = "test_harness")] - transaction_backend: transaction_backend_handle, _u: PhantomData, _v: PhantomData, _w: PhantomData, @@ -274,10 +273,6 @@ where ); self.comms.peer_manager().add_peer(peer.clone()).await?; - self.comms - .connectivity() - .add_managed_peers(vec![peer.node_id.clone()]) - .await?; self.transaction_service .set_base_node_public_key(peer.public_key.clone()) @@ -323,7 +318,7 @@ where let unblinded_output = UnblindedOutput::new( amount, spending_key.clone(), - Some(features.clone()), + features.clone(), script, input_data, script_private_key.clone(), diff --git a/base_layer/wallet/tests/output_manager_service/service.rs b/base_layer/wallet/tests/output_manager_service/service.rs index c3110e59f2..c6c23da53e 100644 --- a/base_layer/wallet/tests/output_manager_service/service.rs +++ b/base_layer/wallet/tests/output_manager_service/service.rs @@ -84,6 +84,8 @@ use tari_wallet::{ types::ValidationRetryStrategy, }; +use tari_comms::protocol::rpc::RpcClientConfig; +use tari_wallet::output_manager_service::storage::models::OutputStatus; use tokio::{ runtime::Runtime, sync::{broadcast, broadcast::channel}, @@ -368,7 +370,14 @@ fn test_utxo_selection_no_chain_metadata() { let amount = MicroTari::from(1000); let fee_per_gram = MicroTari::from(10); let err = runtime - .block_on(oms.prepare_transaction_to_send(amount, fee_per_gram, None, "".to_string(), script!(Nop))) + .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), + amount, + fee_per_gram, + None, + "".to_string(), + script!(Nop), + )) .unwrap_err(); assert!(matches!(err, OutputManagerError::NotEnoughFunds)); @@ -385,7 +394,14 @@ fn test_utxo_selection_no_chain_metadata() { // but we have no chain state so the lowest maturity should be used let stp = runtime - .block_on(oms.prepare_transaction_to_send(amount, fee_per_gram, None, "".to_string(), script!(Nop))) + .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), + amount, + fee_per_gram, + None, + "".to_string(), + script!(Nop), + )) .unwrap(); assert!(stp.get_tx_id().is_ok()); @@ -453,7 +469,14 @@ fn test_utxo_selection_with_chain_metadata() { let amount = MicroTari::from(1000); let fee_per_gram = MicroTari::from(10); let err = runtime - .block_on(oms.prepare_transaction_to_send(amount, fee_per_gram, None, "".to_string(), script!(Nop))) + .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), + amount, + fee_per_gram, + None, + "".to_string(), + script!(Nop), + )) .unwrap_err(); assert!(matches!(err, OutputManagerError::NotEnoughFunds)); @@ -498,7 +521,14 @@ fn test_utxo_selection_with_chain_metadata() { // test transactions let stp = runtime - .block_on(oms.prepare_transaction_to_send(amount, fee_per_gram, None, "".to_string(), script!(Nop))) + .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), + amount, + fee_per_gram, + None, + "".to_string(), + script!(Nop), + )) .unwrap(); assert!(stp.get_tx_id().is_ok()); @@ -514,7 +544,14 @@ fn test_utxo_selection_with_chain_metadata() { // when the amount is greater than the largest utxo, then "Largest" selection strategy is used let stp = runtime - .block_on(oms.prepare_transaction_to_send(6 * amount, fee_per_gram, None, "".to_string(), script!(Nop))) + .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), + 6 * amount, + fee_per_gram, + None, + "".to_string(), + script!(Nop), + )) .unwrap(); assert!(stp.get_tx_id().is_ok()); @@ -560,6 +597,7 @@ fn sending_transaction_and_confirmation() { let stp = runtime .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(1000), MicroTari::from(20), None, @@ -626,7 +664,12 @@ fn sending_transaction_and_confirmation() { ); if let DbValue::KeyManagerState(km) = backend.fetch(&DbKey::KeyManagerState).unwrap().unwrap() { - assert_eq!(km.primary_key_index, 1); + // if we dont have change, we did not move the index forward + if tx.body.outputs().len() > 1 { + assert_eq!(km.primary_key_index, 1); + } else { + assert_eq!(km.primary_key_index, 0); + } } else { panic!("No Key Manager set"); } @@ -653,6 +696,7 @@ fn send_not_enough_funds() { } match runtime.block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(num_outputs * 2000), MicroTari::from(20), None, @@ -698,6 +742,7 @@ fn send_no_change() { let mut stp = runtime .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(value1 + value2) - fee_without_change, fee_per_gram, None, @@ -774,6 +819,7 @@ fn send_not_enough_for_change() { .unwrap(); match runtime.block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(value1 + value2 + 1) - fee_without_change, MicroTari::from(20), None, @@ -835,6 +881,7 @@ fn cancel_transaction() { } let stp = runtime .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(1000), MicroTari::from(20), None, @@ -855,6 +902,59 @@ fn cancel_transaction() { assert_eq!(runtime.block_on(oms.get_unspent_outputs()).unwrap().len(), num_outputs); } +#[test] +fn cancel_transaction_and_reinstate_inbound_tx() { + let mut runtime = Runtime::new().unwrap(); + + let (connection, _tempdir) = get_temp_sqlite_database_connection(); + let backend = OutputManagerSqliteDatabase::new(connection, None); + + let (mut oms, _shutdown, _, _, _, _, _) = setup_output_manager_service(&mut runtime, backend.clone(), true); + + let value = MicroTari::from(5000); + let (tx_id, sender_message) = generate_sender_transaction_message(value); + let _rtp = runtime.block_on(oms.get_recipient_transaction(sender_message)).unwrap(); + assert_eq!(runtime.block_on(oms.get_unspent_outputs()).unwrap().len(), 0); + + let pending_txs = runtime.block_on(oms.get_pending_transactions()).unwrap(); + + assert_eq!(pending_txs.len(), 1); + + let output = pending_txs + .get(&tx_id) + .unwrap() + .outputs_to_be_received + .first() + .unwrap() + .clone(); + + runtime.block_on(oms.cancel_transaction(tx_id)).unwrap(); + + let cancelled_output = backend + .fetch(&DbKey::OutputsByTxIdAndStatus(tx_id, OutputStatus::CancelledInbound)) + .unwrap() + .unwrap(); + + if let DbValue::AnyOutputs(o) = cancelled_output { + let o = o.first().expect("Should be one output in here"); + assert_eq!(o.commitment, output.commitment); + } else { + panic!("Should have found cancelled output"); + } + + assert_eq!(runtime.block_on(oms.get_pending_transactions()).unwrap().len(), 0); + + runtime + .block_on(oms.reinstate_cancelled_inbound_transaction(tx_id)) + .unwrap(); + + assert_eq!(runtime.block_on(oms.get_pending_transactions()).unwrap().len(), 1); + + let balance = runtime.block_on(oms.get_balance()).unwrap(); + + assert_eq!(balance.pending_incoming_balance, value); +} + #[test] fn timeout_transaction() { let factories = CryptoFactories::default(); @@ -876,6 +976,7 @@ fn timeout_transaction() { } let _stp = runtime .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(1000), MicroTari::from(20), None, @@ -930,7 +1031,14 @@ fn test_get_balance() { let send_value = MicroTari::from(1000); let stp = runtime - .block_on(oms.prepare_transaction_to_send(send_value, MicroTari::from(20), None, "".to_string(), script!(Nop))) + .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), + send_value, + MicroTari::from(20), + None, + "".to_string(), + script!(Nop), + )) .unwrap(); let change_val = stp.get_change_amount().unwrap(); @@ -999,6 +1107,7 @@ fn sending_transaction_with_short_term_clear() { // Check that funds are encumbered and then unencumbered if the pending tx is not confirmed before restart let _stp = runtime .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(1000), MicroTari::from(20), None, @@ -1020,6 +1129,7 @@ fn sending_transaction_with_short_term_clear() { // Check that a unconfirm Pending Transaction can be cancelled let stp = runtime .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(1000), MicroTari::from(20), None, @@ -1039,6 +1149,7 @@ fn sending_transaction_with_short_term_clear() { // Check that is the pending tx is confirmed that the encumberance persists after restart let stp = runtime .block_on(oms.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(1000), MicroTari::from(20), None, @@ -1706,14 +1817,19 @@ fn test_txo_validation_rpc_timeout() { .unwrap(); runtime.block_on(async { - let mut delay = delay_for(Duration::from_secs(60)).fuse(); + let mut delay = delay_for( + RpcClientConfig::default().deadline.unwrap() + + RpcClientConfig::default().deadline_grace_period + + Duration::from_secs(30), + ) + .fuse(); let mut failed = 0; loop { futures::select! { event = event_stream.select_next_some() => { if let Ok(msg) = event { if let OutputManagerEvent::TxoValidationFailure(_,_) = (*msg).clone() { - failed+=1; + failed+=1; } } diff --git a/base_layer/wallet/tests/support/rpc.rs b/base_layer/wallet/tests/support/rpc.rs index bba8b32858..0f7009bdd0 100644 --- a/base_layer/wallet/tests/support/rpc.rs +++ b/base_layer/wallet/tests/support/rpc.rs @@ -31,6 +31,8 @@ use tari_core::{ proto::wallet_rpc::{TxLocation, TxQueryResponse, TxSubmissionRejectionReason, TxSubmissionResponse}, rpc::BaseNodeWalletService, }, + blocks::BlockHeader, + proto, proto::{ base_node::{ ChainMetadata, @@ -86,6 +88,7 @@ pub struct BaseNodeWalletRpcMockState { fetch_utxos_calls: Arc>>>>, response_delay: Arc>>, rpc_status_error: Arc>>, + get_header_response: Arc>>, synced: Arc>, utxos: Arc>>, } @@ -121,6 +124,7 @@ impl BaseNodeWalletRpcMockState { fetch_utxos_calls: Arc::new(Mutex::new(Vec::new())), response_delay: Arc::new(Mutex::new(None)), rpc_status_error: Arc::new(Mutex::new(None)), + get_header_response: Arc::new(Mutex::new(None)), synced: Arc::new(Mutex::new(true)), utxos: Arc::new(Mutex::new(Vec::new())), } @@ -458,6 +462,15 @@ impl BaseNodeWalletService for BaseNodeWalletRpcMockService { Ok(Response::new(tip_info_response_lock.clone())) } + + async fn get_header(&self, _: Request) -> Result, RpcStatus> { + let lock = acquire_lock!(self.state.get_header_response); + let resp = lock + .as_ref() + .cloned() + .ok_or_else(|| RpcStatus::not_found("get_header_response set to None"))?; + Ok(Response::new(resp.into())) + } } #[cfg(test)] diff --git a/base_layer/wallet/tests/transaction_service/service.rs b/base_layer/wallet/tests/transaction_service/service.rs index cbc661d0a2..dc508f7321 100644 --- a/base_layer/wallet/tests/transaction_service/service.rs +++ b/base_layer/wallet/tests/transaction_service/service.rs @@ -36,7 +36,7 @@ use futures::{ StreamExt, }; use prost::Message; -use rand::rngs::OsRng; +use rand::{rngs::OsRng, RngCore}; use std::{ convert::{TryFrom, TryInto}, path::Path, @@ -99,6 +99,7 @@ use tari_wallet::{ mock_base_node_service::MockBaseNodeService, BaseNodeServiceInitializer, }, + connectivity_service::WalletConnectivityInitializer, output_manager_service::{ config::OutputManagerServiceConfig, handle::OutputManagerHandle, @@ -211,6 +212,7 @@ pub fn setup_transaction_service< factories, )) .add_initializer(BaseNodeServiceInitializer::new(BaseNodeServiceConfig::default(), db)) + .add_initializer(WalletConnectivityInitializer::new(BaseNodeServiceConfig::default())) .build(); let handles = runtime.block_on(fut).expect("Service initialization failed"); @@ -1439,6 +1441,7 @@ fn finalize_tx_with_incorrect_pubkey() { let mut stp = runtime .block_on(bob_output_manager.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(5000), MicroTari::from(25), None, @@ -1565,6 +1568,7 @@ fn finalize_tx_with_missing_output() { let mut stp = runtime .block_on(bob_output_manager.prepare_transaction_to_send( + OsRng.next_u64(), MicroTari::from(5000), MicroTari::from(20), None, diff --git a/base_layer/wallet/tests/transaction_service/storage.rs b/base_layer/wallet/tests/transaction_service/storage.rs index 911bad2c8d..6ea420e6a4 100644 --- a/base_layer/wallet/tests/transaction_service/storage.rs +++ b/base_layer/wallet/tests/transaction_service/storage.rs @@ -331,38 +331,6 @@ pub fn test_db_backend(backend: T) { panic!("Should have found completed tx"); } - if cfg!(feature = "test_harness") { - let retrieved_completed_txs = runtime.block_on(db.get_completed_transactions()).unwrap(); - assert!(retrieved_completed_txs.contains_key(&completed_txs[0].tx_id)); - assert_eq!( - retrieved_completed_txs.get(&completed_txs[0].tx_id).unwrap().status, - TransactionStatus::Completed - ); - #[cfg(feature = "test_harness")] - runtime - .block_on(db.broadcast_completed_transaction(completed_txs[0].tx_id)) - .unwrap(); - let retrieved_completed_txs = runtime.block_on(db.get_completed_transactions()).unwrap(); - - assert!(retrieved_completed_txs.contains_key(&completed_txs[0].tx_id)); - assert_eq!( - retrieved_completed_txs.get(&completed_txs[0].tx_id).unwrap().status, - TransactionStatus::Broadcast - ); - - #[cfg(feature = "test_harness")] - runtime - .block_on(db.mine_completed_transaction(completed_txs[0].tx_id)) - .unwrap(); - let retrieved_completed_txs = runtime.block_on(db.get_completed_transactions()).unwrap(); - - assert!(retrieved_completed_txs.contains_key(&completed_txs[0].tx_id)); - assert_eq!( - retrieved_completed_txs.get(&completed_txs[0].tx_id).unwrap().status, - TransactionStatus::MinedUnconfirmed - ); - } - let completed_txs = runtime.block_on(db.get_completed_transactions()).unwrap(); let num_completed_txs = completed_txs.len(); assert_eq!( diff --git a/base_layer/wallet/tests/wallet/mod.rs b/base_layer/wallet/tests/wallet/mod.rs index adb3c3b4a3..6aa231b09c 100644 --- a/base_layer/wallet/tests/wallet/mod.rs +++ b/base_layer/wallet/tests/wallet/mod.rs @@ -504,6 +504,8 @@ fn test_20_store_and_forward_send_tx() { } #[test] +#[ignore = "Flakey on CI, theory is that it is due to SAF neighbourhoods. Retry after Kademlia style neighbourhoods \ + are included"] fn test_store_and_forward_send_tx() { let mut shutdown_a = Shutdown::new(); let mut shutdown_b = Shutdown::new(); @@ -755,95 +757,6 @@ async fn test_import_utxo() { assert_eq!(stored_utxo, utxo); } -#[cfg(feature = "test_harness")] -#[tokio_macros::test] -async fn test_data_generation() { - let mut shutdown = Shutdown::new(); - use tari_wallet::testnet_utils::generate_wallet_test_data; - let factories = CryptoFactories::default(); - let node_id = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); - let temp_dir = tempdir().unwrap(); - let comms_config = CommsConfig { - network: Network::Weatherwax, - node_identity: Arc::new(node_id.clone()), - transport_type: TransportType::Memory { - listener_address: node_id.public_address(), - }, - auxilary_tcp_listener_address: None, - datastore_path: temp_dir.path().to_path_buf(), - peer_database_name: random::string(8), - max_concurrent_inbound_tasks: 100, - outbound_buffer_size: 100, - dht: DhtConfig { - discovery_request_timeout: Duration::from_millis(500), - allow_test_addresses: true, - ..Default::default() - }, - allow_test_addresses: true, - listener_liveness_allowlist_cidrs: Vec::new(), - listener_liveness_max_sessions: 0, - user_agent: "tari/test-wallet".to_string(), - dns_seeds_name_server: DEFAULT_DNS_NAME_SERVER.parse().unwrap(), - peer_seeds: Default::default(), - dns_seeds: Default::default(), - dns_seeds_use_dnssec: false, - }; - - let config = WalletConfig::new( - comms_config, - factories, - None, - None, - Network::Weatherwax.into(), - None, - None, - None, - None, - ); - - let (db, transaction_backend, oms_backend, contacts_backend, _temp_dir) = make_wallet_databases(None); - - let metadata = ChainMetadata::new(std::u64::MAX, Vec::new(), 0, 0, 0); - - db.write(WriteOperation::Insert(DbKeyValuePair::BaseNodeChainMetadata(metadata))) - .unwrap(); - - let mut wallet = Wallet::start( - config, - WalletDatabase::new(db), - transaction_backend.clone(), - oms_backend, - contacts_backend, - shutdown.to_signal(), - None, - ) - .await - .unwrap(); - - generate_wallet_test_data(&mut wallet, temp_dir.path(), transaction_backend) - .await - .unwrap(); - - let contacts = wallet.contacts_service.get_contacts().await.unwrap(); - assert!(!contacts.is_empty()); - - let balance = wallet.output_manager_service.get_balance().await.unwrap(); - assert!(balance.available_balance > MicroTari::from(0)); - - // TODO Put this back when the new comms goes in and we use the new Event bus - // let outbound_tx = wallet - // .runtime - // .block_on(wallet.transaction_service.get_pending_outbound_transactions()) - // .unwrap(); - // assert!(outbound_tx.len() > 0); - - let completed_tx = wallet.transaction_service.get_completed_transactions().await.unwrap(); - assert!(!completed_tx.is_empty()); - - shutdown.trigger().unwrap(); - wallet.wait_until_shutdown().await; -} - #[test] fn test_db_file_locking() { let db_tempdir = tempdir().unwrap(); diff --git a/base_layer/wallet_ffi/Cargo.toml b/base_layer/wallet_ffi/Cargo.toml index b89b6d31e8..d7381c0d8e 100644 --- a/base_layer/wallet_ffi/Cargo.toml +++ b/base_layer/wallet_ffi/Cargo.toml @@ -3,7 +3,7 @@ name = "tari_wallet_ffi" authors = ["The Tari Development Community"] description = "Tari cryptocurrency wallet C FFI bindings" license = "BSD-3-Clause" -version = "0.17.1" +version = "0.17.4" edition = "2018" [dependencies] @@ -12,7 +12,7 @@ tari_comms_dht = { version = "^0.9", path = "../../comms/dht", default-features tari_crypto = "0.11.1" tari_key_manager = { version = "^0.9", path = "../key_manager" } tari_p2p = { version = "^0.9", path = "../p2p" } -tari_wallet = { version = "^0.9", path = "../wallet", features = ["test_harness", "c_integration"]} +tari_wallet = { version = "^0.9", path = "../wallet", features = ["c_integration"]} tari_shutdown = { version = "^0.9", path = "../../infrastructure/shutdown" } tari_utilities = "^0.3" @@ -23,7 +23,7 @@ rand = "0.8" chrono = { version = "0.4.6", features = ["serde"]} thiserror = "1.0.20" log = "0.4.6" -log4rs = {version = "0.8.3", features = ["console_appender", "file_appender", "file", "yaml_format"]} +log4rs = {version = "1.0.0", features = ["console_appender", "file_appender", "yaml_format"]} [dependencies.tari_core] path = "../../base_layer/core" diff --git a/base_layer/wallet_ffi/ios.config b/base_layer/wallet_ffi/ios.config new file mode 100644 index 0000000000..5153be7ab5 --- /dev/null +++ b/base_layer/wallet_ffi/ios.config @@ -0,0 +1,10 @@ +BUILD_ANDROID=0 +BUILD_IOS=1 +SQLITE_SOURCE=https://www.sqlite.org/snapshot/sqlite-snapshot-201911192122.tar.gz +NDK_PATH= +PKG_PATH=/usr/local/opt/openssl/lib/pkgconfig +ANDROID_WALLET_PATH= +IOS_WALLET_PATH=$GITHUB_WORKSPACE +TARI_REPO_PATH=$GITHUB_WORKSPACE +# Run cargo clean before the build starts +CARGO_CLEAN=1 \ No newline at end of file diff --git a/base_layer/wallet_ffi/src/error.rs b/base_layer/wallet_ffi/src/error.rs index abf4ff2cc7..8c86a2b355 100644 --- a/base_layer/wallet_ffi/src/error.rs +++ b/base_layer/wallet_ffi/src/error.rs @@ -33,6 +33,7 @@ use tari_wallet::{ output_manager_service::error::{OutputManagerError, OutputManagerStorageError}, transaction_service::error::{TransactionServiceError, TransactionStorageError}, }; + use thiserror::Error; const LOG_TARGET: &str = "wallet_ffi::error"; @@ -47,6 +48,8 @@ pub enum InterfaceError { PositionInvalidError, #[error("An error has occurred when trying to create the tokio runtime: `{0}`")] TokioError(String), + #[error("An error has occurred when trying to select network: `{0}`")] + NetworkError(String), #[error("Emoji ID is invalid")] InvalidEmojiId, } @@ -79,6 +82,10 @@ impl From for LibWalletError { code: 4, message: format!("{:?}", v), }, + InterfaceError::NetworkError(_) => Self { + code: 5, + message: format!("{:?}", v), + }, InterfaceError::InvalidEmojiId => Self { code: 6, message: format!("{:?}", v), diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 62ed8fd9e5..d8b01d9b69 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -42,17 +42,6 @@ //! This documentation will described the flows of the core tasks that the Wallet library supports and will then //! describe how to use the test functions to produce the behaviour of a second wallet without needing to set one up. //! -//! ## Generate Test Data -//! The `generate_wallet_test_data(...)` function will generate some test data in the wallet. The data generated will be -//! as follows: -//! -//! - Some Contacts -//! - Add outputs to the wallet that make up its Available Balance that can be spent -//! - Create transaction history -//! - Pending Inbound Transactions -//! - Pending Outbound Transactions -//! - Completed Transactions -//! //! ## Send Transaction //! To send a transaction your wallet must have available funds and you must had added the recipient's Public Key as a //! `Contact`. @@ -152,6 +141,7 @@ use std::{ ffi::{CStr, CString}, path::PathBuf, slice, + str::FromStr, sync::Arc, time::Duration, }; @@ -160,16 +150,25 @@ use tari_comms::{ peer_manager::{NodeIdentity, PeerFeatures}, socks, tor, + transports::MemoryTransport, types::CommsSecretKey, }; use tari_comms_dht::{DbConnectionUrl, DhtConfig}; -use tari_core::transactions::{tari_amount::MicroTari, transaction::OutputFeatures, types::CryptoFactories}; +use tari_core::transactions::{ + tari_amount::MicroTari, + transaction::OutputFeatures, + types::{ComSignature, CryptoFactories, PublicKey}, +}; use tari_crypto::{ - keys::{PublicKey, SecretKey}, - script::ExecutionStack, + inputs, + keys::{PublicKey as PublicKeyTrait, SecretKey}, + script, tari_utilities::ByteArray, }; -use tari_p2p::transport::{TorConfig, TransportType, TransportType::Tor}; +use tari_p2p::{ + transport::{TorConfig, TransportType, TransportType::Tor}, + Network, +}; use tari_shutdown::Shutdown; use tari_utilities::{hex, hex::Hex}; use tari_wallet::{ @@ -181,15 +180,6 @@ use tari_wallet::{ sqlite_db::WalletSqliteDatabase, sqlite_utilities::{initialize_sqlite_database_backends, partial_wallet_backup}, }, - testnet_utils::{ - broadcast_transaction, - complete_sent_transaction, - finalize_received_transaction, - generate_wallet_test_data, - get_next_memory_address, - mine_transaction, - receive_test_transaction, - }, transaction_service::{ config::TransactionServiceConfig, error::TransactionServiceError, @@ -204,18 +194,11 @@ use tari_wallet::{ }, }, }, - util::emoji::{emoji_set, EmojiId}, + types::ValidationRetryStrategy, + util::emoji::{emoji_set, EmojiId, EmojiIdError}, + utxo_scanner_service::utxo_scanning::{UtxoScannerService, RECOVERY_KEY}, Wallet, WalletConfig, -}; - -use tari_core::transactions::types::ComSignature; -use tari_crypto::script::TariScript; -use tari_p2p::Network; -use tari_wallet::{ - types::ValidationRetryStrategy, - util::emoji::EmojiIdError, - utxo_scanner_service::utxo_scanning::UtxoScannerService, WalletSqlite, }; use tokio::runtime::Runtime; @@ -2379,9 +2362,11 @@ pub unsafe extern "C" fn pending_inbound_transaction_destroy(transaction: *mut T /// leak #[no_mangle] pub unsafe extern "C" fn transport_memory_create() -> *mut TariTransportType { - let transport = TariTransportType::Memory { - listener_address: get_next_memory_address(), - }; + let port = MemoryTransport::acquire_next_memsocket_port(); + let listener_address: Multiaddr = format!("/memory/{}", port) + .parse() + .expect("Should be able to create memory address"); + let transport = TariTransportType::Memory { listener_address }; Box::into_raw(Box::new(transport)) } @@ -2564,6 +2549,7 @@ pub unsafe extern "C" fn transport_type_destroy(transport: *mut TariTransportTyp /// `database_path` - The database path char array pointer which. This is the folder path where the /// database files will be created and the application has write access to /// `discovery_timeout_in_secs`: specify how long the Discovery Timeout for the wallet is. +/// `network`: name of network to connect to. Valid values are: ridcully, stibbons, weatherwax, localnet, mainnet /// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions /// as an out parameter. /// @@ -2581,6 +2567,7 @@ pub unsafe extern "C" fn comms_config_create( datastore_path: *const c_char, discovery_timeout_in_secs: c_ulonglong, saf_message_duration_in_secs: c_ulonglong, + network: *const c_char, error_out: *mut c_int, ) -> *mut TariCommsConfig { let mut error = 0; @@ -2613,49 +2600,76 @@ pub unsafe extern "C" fn comms_config_create( } let datastore_path = PathBuf::from(datastore_path_string); + if transport_type.is_null() { + error = LibWalletError::from(InterfaceError::NullError("transport_type".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return ptr::null_mut(); + } + let dht_database_path = datastore_path.join("dht.db"); let public_address = public_address_str.parse::(); - match public_address { - Ok(public_address) => { - let ni = NodeIdentity::new( - CommsSecretKey::default(), - public_address, - PeerFeatures::COMMUNICATION_CLIENT, - ); - let config = TariCommsConfig { - network: Network::Weatherwax, - node_identity: Arc::new(ni), - transport_type: (*transport_type).clone(), - auxilary_tcp_listener_address: None, - datastore_path, - peer_database_name: database_name_string, - max_concurrent_inbound_tasks: 100, - outbound_buffer_size: 100, - dht: DhtConfig { - discovery_request_timeout: Duration::from_secs(discovery_timeout_in_secs), - database_url: DbConnectionUrl::File(dht_database_path), - auto_join: true, - saf_msg_validity: Duration::from_secs(saf_message_duration_in_secs), - ..Default::default() - }, - // TODO: This should be set to false for non-test wallets. See the `allow_test_addresses` field - // docstring for more info. - allow_test_addresses: true, - listener_liveness_allowlist_cidrs: Vec::new(), - listener_liveness_max_sessions: 0, - user_agent: format!("tari/wallet/{}", env!("CARGO_PKG_VERSION")), - dns_seeds_name_server: "1.1.1.1:53".parse().unwrap(), - peer_seeds: Default::default(), - dns_seeds: Default::default(), - dns_seeds_use_dnssec: true, - }; + let network_str; + if !network.is_null() { + network_str = CStr::from_ptr(network).to_str().unwrap().to_owned(); + } else { + error = LibWalletError::from(InterfaceError::NullError("network".to_string())).code; + ptr::swap(error_out, &mut error as *mut c_int); + return ptr::null_mut(); + } + + let selected_network = Network::from_str(&network_str); + + match selected_network { + Ok(selected_network) => { + match public_address { + Ok(public_address) => { + let ni = NodeIdentity::new( + CommsSecretKey::default(), + public_address, + PeerFeatures::COMMUNICATION_CLIENT, + ); - Box::into_raw(Box::new(config)) + let config = TariCommsConfig { + network: selected_network, + node_identity: Arc::new(ni), + transport_type: (*transport_type).clone(), + auxilary_tcp_listener_address: None, + datastore_path, + peer_database_name: database_name_string, + max_concurrent_inbound_tasks: 100, + outbound_buffer_size: 100, + dht: DhtConfig { + discovery_request_timeout: Duration::from_secs(discovery_timeout_in_secs), + database_url: DbConnectionUrl::File(dht_database_path), + auto_join: true, + saf_msg_validity: Duration::from_secs(saf_message_duration_in_secs), + ..Default::default() + }, + // TODO: This should be set to false for non-test wallets. See the `allow_test_addresses` field + // docstring for more info. + allow_test_addresses: true, + listener_liveness_allowlist_cidrs: Vec::new(), + listener_liveness_max_sessions: 0, + user_agent: format!("tari/wallet/{}", env!("CARGO_PKG_VERSION")), + dns_seeds_name_server: "1.1.1.1:53".parse().unwrap(), + peer_seeds: Default::default(), + dns_seeds: Default::default(), + dns_seeds_use_dnssec: true, + }; + + Box::into_raw(Box::new(config)) + }, + Err(e) => { + error = LibWalletError::from(e).code; + ptr::swap(error_out, &mut error as *mut c_int); + ptr::null_mut() + }, + } }, - Err(e) => { - error = LibWalletError::from(e).code; + Err(_) => { + error = LibWalletError::from(InterfaceError::NetworkError(network_str)).code; ptr::swap(error_out, &mut error as *mut c_int); ptr::null_mut() }, @@ -2778,6 +2792,8 @@ unsafe fn init_logging(log_path: *const c_char, num_rolling_log_files: c_uint, s /// `callback_saf_message_received` - The callback function pointer that will be called when the Dht has determined that /// is has connected to enough of its neighbours to be confident that it has received any SAF messages that were waiting /// for it. +/// `recovery_in_progress` - Pointer to an bool which will be modified to indicate if there is an outstanding recovery +/// that should be completed or not to an error code should one occur, may not be null. Functions as an out parameter. /// `error_out` - Pointer to an int which will be modified /// to an error code should one occur, may not be null. Functions as an out parameter. /// ## Returns @@ -2809,6 +2825,7 @@ pub unsafe extern "C" fn wallet_create( callback_invalid_txo_validation_complete: unsafe extern "C" fn(u64, u8), callback_transaction_validation_complete: unsafe extern "C" fn(u64, u8), callback_saf_messages_received: unsafe extern "C" fn(), + recovery_in_progress: *mut bool, error_out: *mut c_int, ) -> *mut TariWallet { use tari_key_manager::mnemonic::Mnemonic; @@ -2908,6 +2925,13 @@ pub unsafe extern "C" fn wallet_create( None, ); + let mut recovery_lookup = match runtime.block_on(wallet_database.get_client_key_value(RECOVERY_KEY.to_owned())) { + Err(_) => false, + Ok(None) => false, + Ok(Some(_)) => true, + }; + ptr::swap(recovery_in_progress, &mut recovery_lookup as *mut bool); + w = runtime.block_on(Wallet::start( wallet_config, wallet_database, @@ -3111,267 +3135,6 @@ pub unsafe extern "C" fn wallet_verify_message_signature( result } -/// This function will generate some test data in the wallet. The data generated will be -/// as follows: -/// -/// - Some Contacts -/// - Add outputs to the wallet that make up its Available Balance that can be spent -/// - Create transaction history -/// - Pending Inbound Transactions -/// - Pending Outbound Transactions -/// - Completed Transactions -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `bool` - Returns if successful or not -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_test_generate_data( - wallet: *mut TariWallet, - datastore_path: *const c_char, - error_out: *mut c_int, -) -> bool { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - let datastore_path_string; - if !datastore_path.is_null() { - datastore_path_string = CStr::from_ptr(datastore_path).to_str().unwrap().to_owned(); - } else { - error = LibWalletError::from(InterfaceError::NullError("datastore_path".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - - match (*wallet).runtime.block_on(generate_wallet_test_data( - &mut (*wallet).wallet, - datastore_path_string.as_str(), - (*wallet).wallet.transaction_backend.clone(), - )) { - Ok(_) => true, - Err(e) => { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - false - }, - } -} - -/// This function simulates an external `TariWallet` sending a transaction to this `TariWallet` -/// which will become a `TariPendingInboundTransaction` -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `bool` - Returns if successful or not -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_test_receive_transaction(wallet: *mut TariWallet, error_out: *mut c_int) -> bool { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - let handle = (*wallet).runtime.handle(); - match (*wallet) - .runtime - .block_on(receive_test_transaction(&mut (*wallet).wallet, handle)) - { - Ok(_) => true, - Err(e) => { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - false - }, - } -} - -/// This function simulates a receiver accepting and replying to a `TariPendingOutboundTransaction`. -/// This results in that transaction being "completed" and it's status set to `Broadcast` which -/// indicated it is in a base_layer mempool. -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `tx` - The TariPendingOutboundTransaction -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `bool` - Returns if successful or not -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_test_complete_sent_transaction( - wallet: *mut TariWallet, - tx: *mut TariPendingOutboundTransaction, - error_out: *mut c_int, -) -> bool { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - if tx.is_null() { - error = LibWalletError::from(InterfaceError::NullError("tx".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - match (*wallet) - .runtime - .block_on(complete_sent_transaction(&mut (*wallet).wallet, (*tx).tx_id)) - { - Ok(_) => true, - Err(e) => { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - false - }, - } -} - -/// This function will simulate the process when a completed transaction is broadcast to -/// the base layer mempool. The function will update the status of the completed transaction -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `tx` - The pending inbound transaction to operate on -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `bool` - Returns if successful or not -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_test_finalize_received_transaction( - wallet: *mut TariWallet, - tx: *mut TariPendingInboundTransaction, - error_out: *mut c_int, -) -> bool { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - - match (*wallet) - .runtime - .block_on(finalize_received_transaction(&mut (*wallet).wallet, (*tx).tx_id)) - { - Ok(_) => true, - Err(e) => { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - false - }, - } -} - -/// This function will simulate the process when a completed transaction is broadcast to -/// the base layer mempool. The function will update the status of the completed transaction -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `tx_id` - The transaction id to operate on -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `bool` - Returns if successful or not -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_test_broadcast_transaction( - wallet: *mut TariWallet, - tx_id: c_ulonglong, - error_out: *mut c_int, -) -> bool { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - - match (*wallet) - .runtime - .block_on(broadcast_transaction(&mut (*wallet).wallet, tx_id)) - { - Ok(_) => true, - Err(e) => { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - false - }, - } -} - -/// This function will simulate the process when a completed transaction is detected as mined on -/// the base layer. The function will update the status of the completed transaction AND complete -/// the transaction on the Output Manager Service which will update the status of the outputs -/// -/// ## Arguments -/// `wallet` - The TariWallet pointer -/// `tx_id` - The transaction id to operate on -/// `error_out` - Pointer to an int which will be modified to an error code should one occur, may not be null. Functions -/// as an out parameter. -/// -/// ## Returns -/// `bool` - Returns if successful or not -/// -/// # Safety -/// None -#[no_mangle] -pub unsafe extern "C" fn wallet_test_mine_transaction( - wallet: *mut TariWallet, - tx_id: c_ulonglong, - error_out: *mut c_int, -) -> bool { - let mut error = 0; - ptr::swap(error_out, &mut error as *mut c_int); - if wallet.is_null() { - error = LibWalletError::from(InterfaceError::NullError("wallet".to_string())).code; - ptr::swap(error_out, &mut error as *mut c_int); - return false; - } - match (*wallet) - .runtime - .block_on(mine_transaction(&mut (*wallet).wallet, tx_id)) - { - Ok(_) => true, - Err(e) => { - error = LibWalletError::from(e).code; - ptr::swap(error_out, &mut error as *mut c_int); - false - }, - } -} - /// Adds a base node peer to the TariWallet /// /// ## Arguments @@ -4523,19 +4286,17 @@ pub unsafe extern "C" fn wallet_import_utxo( CString::new("Imported UTXO").unwrap().to_str().unwrap().to_owned() }; + let public_script_key = PublicKey::from_secret_key(&(*spending_key)); match (*wallet).runtime.block_on((*wallet).wallet.import_utxo( MicroTari::from(amount), &(*spending_key).clone(), - TariScript::default(), - ExecutionStack::default(), + script!(Nop), + inputs!(public_script_key), &(*source_public_key).clone(), OutputFeatures::default(), message_string, - // TODO: Add the actual metadata signature here. ComSignature::default(), - // TODO:Add the actual script private key here. - &Default::default(), - // TODO:Add the actual script offset public keys here. + &(*spending_key).clone(), &Default::default(), )) { Ok(tx_id) => tx_id, @@ -5552,14 +5313,11 @@ mod test { path::Path, str::{from_utf8, FromStr}, sync::Mutex, - thread, }; - use tari_core::transactions::{fee::Fee, tari_amount::uT, types::PrivateKey}; - use tari_key_manager::mnemonic::Mnemonic; use tari_test_utils::random; use tari_wallet::{ storage::sqlite_utilities::run_migration_and_create_sqlite_connection, - transaction_service::{config::TransactionServiceConfig, storage::models::TransactionStatus}, + transaction_service::storage::models::TransactionStatus, util::emoji, }; use tempfile::tempdir; @@ -5603,22 +5361,6 @@ mod test { callback_transaction_validation_complete: false, } } - - fn reset(&mut self) { - self.received_tx_callback_called = false; - self.received_tx_reply_callback_called = false; - self.received_finalized_tx_callback_called = false; - self.broadcast_tx_callback_called = false; - self.mined_tx_callback_called = false; - self.mined_tx_unconfirmed_callback_called = false; - self.direct_send_callback_called = false; - self.store_and_forward_send_callback_called = false; - self.tx_cancellation_callback_called = false; - self.callback_utxo_validation_complete = false; - self.callback_stxo_validation_complete = false; - self.callback_invalid_txo_validation_complete = false; - self.callback_transaction_validation_complete = false; - } } lazy_static! { @@ -5739,104 +5481,7 @@ mod test { // assert!(true); //optimized out by compiler } - unsafe extern "C" fn received_tx_callback_bob(tx: *mut TariPendingInboundTransaction) { - assert!(!tx.is_null()); - assert_eq!( - type_of((*tx).clone()), - std::any::type_name::() - ); - pending_inbound_transaction_destroy(tx); - } - - unsafe extern "C" fn received_tx_reply_callback_bob(tx: *mut TariCompletedTransaction) { - assert!(!tx.is_null()); - assert_eq!( - type_of((*tx).clone()), - std::any::type_name::() - ); - assert_eq!((*tx).status, TransactionStatus::Completed); - completed_transaction_destroy(tx); - } - - unsafe extern "C" fn received_tx_finalized_callback_bob(tx: *mut TariCompletedTransaction) { - assert!(!tx.is_null()); - assert_eq!( - type_of((*tx).clone()), - std::any::type_name::() - ); - assert_eq!((*tx).status, TransactionStatus::Completed); - completed_transaction_destroy(tx); - } - - unsafe extern "C" fn broadcast_callback_bob(tx: *mut TariCompletedTransaction) { - assert!(!tx.is_null()); - assert_eq!( - type_of((*tx).clone()), - std::any::type_name::() - ); - assert_eq!((*tx).status, TransactionStatus::Broadcast); - completed_transaction_destroy(tx); - } - - unsafe extern "C" fn mined_callback_bob(tx: *mut TariCompletedTransaction) { - assert!(!tx.is_null()); - assert_eq!( - type_of((*tx).clone()), - std::any::type_name::() - ); - assert_eq!((*tx).status, TransactionStatus::MinedUnconfirmed); - completed_transaction_destroy(tx); - } - - unsafe extern "C" fn mined_unconfirmed_callback_bob(tx: *mut TariCompletedTransaction, _confirmations: u64) { - assert!(!tx.is_null()); - assert_eq!( - type_of((*tx).clone()), - std::any::type_name::() - ); - assert_eq!((*tx).status, TransactionStatus::MinedUnconfirmed); - let mut lock = CALLBACK_STATE_FFI.lock().unwrap(); - lock.mined_tx_unconfirmed_callback_called = true; - drop(lock); - completed_transaction_destroy(tx); - } - - unsafe extern "C" fn direct_send_callback_bob(_tx_id: c_ulonglong, _result: bool) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn store_and_forward_send_callback_bob(_tx_id: c_ulonglong, _result: bool) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn tx_cancellation_callback_bob(tx: *mut TariCompletedTransaction) { - assert!(!tx.is_null()); - assert_eq!( - type_of((*tx).clone()), - std::any::type_name::() - ); - completed_transaction_destroy(tx); - } - - unsafe extern "C" fn utxo_validation_complete_callback_bob(_tx_id: c_ulonglong, _result: u8) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn stxo_validation_complete_callback_bob(_tx_id: c_ulonglong, _result: u8) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn invalid_txo_validation_complete_callback_bob(_tx_id: c_ulonglong, _result: u8) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn transaction_validation_complete_callback_bob(_tx_id: c_ulonglong, _result: u8) { - // assert!(true); //optimized out by compiler - } - - unsafe extern "C" fn saf_messages_received_callback_bob() { - // assert!(true); //optimized out by compiler - } + const NETWORK_STRING: &str = "weatherwax"; #[test] fn test_bytevector() { @@ -6097,575 +5742,13 @@ mod test { } } - #[test] - fn test_wallet_ffi() { - unsafe { - { - let mut lock = CALLBACK_STATE_FFI.lock().unwrap(); - lock.reset(); - } - let mut error = 0; - let error_ptr = &mut error as *mut c_int; - let secret_key_alice = private_key_generate(); - let public_key_alice = public_key_from_private_key(secret_key_alice, error_ptr); - let db_name_alice = CString::new(random::string(8).as_str()).unwrap(); - let db_name_alice_str: *const c_char = CString::into_raw(db_name_alice) as *const c_char; - let alice_temp_dir = tempdir().unwrap(); - let db_path_alice = CString::new(alice_temp_dir.path().to_str().unwrap()).unwrap(); - let db_path_alice_str: *const c_char = CString::into_raw(db_path_alice) as *const c_char; - let transport_type_alice = transport_memory_create(); - let address_alice = transport_memory_get_address(transport_type_alice, error_ptr); - let address_alice_str = CStr::from_ptr(address_alice).to_str().unwrap().to_owned(); - let address_alice_str: *const c_char = CString::new(address_alice_str).unwrap().into_raw() as *const c_char; - - let alice_log_path = - CString::new(format!("{}{}", alice_temp_dir.path().to_str().unwrap(), "/test.log")).unwrap(); - let alice_log_path_str: *const c_char = CString::into_raw(alice_log_path) as *const c_char; - - let alice_config = comms_config_create( - address_alice_str, - transport_type_alice, - db_name_alice_str, - db_path_alice_str, - 20, - 10800, - error_ptr, - ); - - let alice_wallet = wallet_create( - alice_config, - alice_log_path_str, - 2, - 10000, - ptr::null(), - ptr::null(), - received_tx_callback, - received_tx_reply_callback, - received_tx_finalized_callback, - broadcast_callback, - mined_callback, - mined_unconfirmed_callback, - direct_send_callback, - store_and_forward_send_callback, - tx_cancellation_callback, - utxo_validation_complete_callback, - stxo_validation_complete_callback, - invalid_txo_validation_complete_callback, - transaction_validation_complete_callback, - saf_messages_received_callback, - error_ptr, - ); - let secret_key_bob = private_key_generate(); - let public_key_bob = public_key_from_private_key(secret_key_bob, error_ptr); - let db_name_bob = CString::new(random::string(8).as_str()).unwrap(); - let db_name_bob_str: *const c_char = CString::into_raw(db_name_bob) as *const c_char; - let bob_temp_dir = tempdir().unwrap(); - let db_path_bob = CString::new(bob_temp_dir.path().to_str().unwrap()).unwrap(); - let db_path_bob_str: *const c_char = CString::into_raw(db_path_bob) as *const c_char; - let transport_type_bob = transport_memory_create(); - let address_bob = transport_memory_get_address(transport_type_bob, error_ptr); - let address_bob_str = CStr::from_ptr(address_bob).to_str().unwrap().to_owned(); - let address_bob_str: *const c_char = CString::new(address_bob_str).unwrap().into_raw() as *const c_char; - let bob_config = comms_config_create( - address_bob_str, - transport_type_bob, - db_name_bob_str, - db_path_bob_str, - 20, - 10800, - error_ptr, - ); - - let bob_log_path = - CString::new(format!("{}{}", bob_temp_dir.path().to_str().unwrap(), "/test.log")).unwrap(); - let bob_log_path_str: *const c_char = CString::into_raw(bob_log_path) as *const c_char; - - let bob_wallet = wallet_create( - bob_config, - bob_log_path_str, - 0, - 0, - ptr::null(), - ptr::null(), - received_tx_callback_bob, - received_tx_reply_callback_bob, - received_tx_finalized_callback_bob, - broadcast_callback_bob, - mined_callback_bob, - mined_unconfirmed_callback_bob, - direct_send_callback_bob, - store_and_forward_send_callback_bob, - tx_cancellation_callback_bob, - utxo_validation_complete_callback_bob, - stxo_validation_complete_callback_bob, - invalid_txo_validation_complete_callback_bob, - transaction_validation_complete_callback_bob, - saf_messages_received_callback_bob, - error_ptr, - ); - - let sig_msg = CString::new("Test Contact").unwrap(); - let sig_msg_str: *const c_char = CString::into_raw(sig_msg) as *const c_char; - let sig_msg_compare = CString::new("Test Contact").unwrap(); - let sig_msg_compare_str: *const c_char = CString::into_raw(sig_msg_compare) as *const c_char; - let sig_nonce_str: *mut c_char = wallet_sign_message(alice_wallet, sig_msg_str, error_ptr) as *mut c_char; - let alice_wallet_key = wallet_get_public_key(alice_wallet, error_ptr); - let verify_msg = wallet_verify_message_signature( - alice_wallet, - alice_wallet_key, - sig_nonce_str, - sig_msg_compare_str, - error_ptr, - ); - assert!(verify_msg); - - let test_contact_private_key = private_key_generate(); - let test_contact_public_key = public_key_from_private_key(test_contact_private_key, error_ptr); - let test_contact_str = CString::new("Test Contact").unwrap(); - let test_contact_alias: *const c_char = CString::into_raw(test_contact_str) as *const c_char; - let test_contact = contact_create(test_contact_alias, test_contact_public_key, error_ptr); - let contact_added = wallet_upsert_contact(alice_wallet, test_contact, error_ptr); - assert!(contact_added); - let contact_removed = wallet_remove_contact(alice_wallet, test_contact, error_ptr); - assert!(contact_removed); - contact_destroy(test_contact); - public_key_destroy(test_contact_public_key); - private_key_destroy(test_contact_private_key); - string_destroy(test_contact_alias as *mut c_char); - - // test number of confirmations calls - let num_confirmations_required = wallet_get_num_confirmations_required(alice_wallet, error_ptr); - assert_eq!( - num_confirmations_required, - TransactionServiceConfig::default().num_confirmations_required - ); - assert_eq!(error, 0); - for number in 1..10 { - // set - wallet_set_num_confirmations_required(alice_wallet, number, error_ptr); - assert_eq!(error, 0); - // get - let num_confirmations_required = wallet_get_num_confirmations_required(alice_wallet, error_ptr); - assert_eq!(num_confirmations_required, number); - assert_eq!(error, 0); - } - - // empty wallet - let fee = wallet_get_fee_estimate(alice_wallet, 100, 1, 1, 1, error_ptr); - assert_eq!(fee, 0); - assert_eq!(error, 101); - - let generated = wallet_test_generate_data(alice_wallet, db_path_alice_str, error_ptr); - assert!(generated); - - // minimum fee - let fee = wallet_get_fee_estimate(alice_wallet, 100, 1, 1, 1, error_ptr); - assert_eq!(fee, 100); - assert_eq!(error, 0); - - for outputs in 1..5 { - let fee = wallet_get_fee_estimate(alice_wallet, 100, 25, 1, outputs, error_ptr); - assert_eq!( - MicroTari::from(fee), - Fee::calculate(MicroTari::from(25), 1, 1, outputs as usize) - ); - assert_eq!(error, 0); - } - - // not enough funds - let fee = wallet_get_fee_estimate(alice_wallet, 1_000_000_000, 2_500, 1, 1, error_ptr); - assert_eq!(fee, 0); - assert_eq!(error, 101); - - assert!(!(wallet_get_completed_transactions(&mut (*alice_wallet), error_ptr)).is_null(),); - assert!(!(wallet_get_pending_inbound_transactions(&mut (*alice_wallet), error_ptr)).is_null(),); - assert!(!(wallet_get_pending_outbound_transactions(&mut (*alice_wallet), error_ptr)).is_null(),); - - let inbound_transactions: std::collections::HashMap< - u64, - tari_wallet::transaction_service::storage::models::InboundTransaction, - > = (*alice_wallet) - .runtime - .block_on( - (*alice_wallet) - .wallet - .transaction_service - .get_pending_inbound_transactions(), - ) - .unwrap(); - - assert_eq!(inbound_transactions.len(), 0); - - // `wallet_test_generate_data(...)` creates 5 completed inbound tx which should appear in this list - let ffi_inbound_txs = wallet_get_pending_inbound_transactions(&mut (*alice_wallet), error_ptr); - assert_eq!(pending_inbound_transactions_get_length(ffi_inbound_txs, error_ptr), 5); - - wallet_test_receive_transaction(alice_wallet, error_ptr); - - let inbound_transactions: std::collections::HashMap< - u64, - tari_wallet::transaction_service::storage::models::InboundTransaction, - > = (*alice_wallet) - .runtime - .block_on( - (*alice_wallet) - .wallet - .transaction_service - .get_pending_inbound_transactions(), - ) - .unwrap(); - - assert_eq!(inbound_transactions.len(), 1); - - let ffi_inbound_txs = wallet_get_pending_inbound_transactions(&mut (*alice_wallet), error_ptr); - assert_eq!(pending_inbound_transactions_get_length(ffi_inbound_txs, error_ptr), 6); - - let mut found_pending = false; - for i in 0..pending_inbound_transactions_get_length(ffi_inbound_txs, error_ptr) { - let pending_tx = pending_inbound_transactions_get_at(ffi_inbound_txs, i, error_ptr); - let status = pending_inbound_transaction_get_status(pending_tx, error_ptr); - if status == 4 { - found_pending = true; - } - } - assert!(found_pending, "At least 1 transaction should be in the Pending state"); - - // `wallet_test_generate_data(...)` creates 9 completed outbound transactions that are not mined - let ffi_outbound_txs = wallet_get_pending_outbound_transactions(&mut (*alice_wallet), error_ptr); - assert_eq!(pending_outbound_transactions_get_length(ffi_outbound_txs, error_ptr), 9); - - let mut found_broadcast = false; - for i in 0..pending_outbound_transactions_get_length(ffi_outbound_txs, error_ptr) { - let pending_tx = pending_outbound_transactions_get_at(ffi_outbound_txs, i, error_ptr); - let status = pending_outbound_transaction_get_status(pending_tx, error_ptr); - if status == 1 { - found_broadcast = true; - } - } - assert!( - found_broadcast, - "At least 1 transaction should be in the Broadcast state" - ); - - let completed_transactions: std::collections::HashMap< - u64, - tari_wallet::transaction_service::storage::models::CompletedTransaction, - > = (*alice_wallet) - .runtime - .block_on((*alice_wallet).wallet.transaction_service.get_completed_transactions()) - .unwrap(); - - let num_completed_tx_pre = completed_transactions.len(); - - for (_k, v) in inbound_transactions { - let tx_ptr = Box::into_raw(Box::new(v)); - wallet_test_finalize_received_transaction(alice_wallet, tx_ptr, error_ptr); - } - - let completed_transactions: std::collections::HashMap< - u64, - tari_wallet::transaction_service::storage::models::CompletedTransaction, - > = (*alice_wallet) - .runtime - .block_on((*alice_wallet).wallet.transaction_service.get_completed_transactions()) - .unwrap(); - - assert_eq!(num_completed_tx_pre + 1, completed_transactions.len()); - - // At this stage there is only 1 Mined transaction created by the `wallet_test_generate_data(...)`function - let ffi_completed_txs = wallet_get_completed_transactions(&mut (*alice_wallet), error_ptr); - assert_eq!(completed_transactions_get_length(ffi_completed_txs, error_ptr), 1); - - for x in 0..completed_transactions_get_length(ffi_completed_txs, error_ptr) { - let id_completed = completed_transactions_get_at(&mut (*ffi_completed_txs), x, error_ptr); - let id_completed_get = - wallet_get_completed_transaction_by_id(&mut (*alice_wallet), (*id_completed).tx_id, error_ptr); - let confirmations = completed_transaction_get_confirmations(id_completed, error_ptr); - assert_eq!(confirmations, 0); - if (*id_completed).status == TransactionStatus::MinedUnconfirmed { - assert_eq!((*id_completed), (*id_completed_get)); - assert_eq!((*id_completed_get).status, TransactionStatus::MinedUnconfirmed); - } else { - assert_eq!(id_completed_get, ptr::null_mut()); - let pk_compare = wallet_get_public_key(&mut (*alice_wallet), error_ptr); - if (&*pk_compare).as_bytes() == (*id_completed).destination_public_key.as_bytes() { - let id_inbound_get = wallet_get_pending_inbound_transaction_by_id( - &mut (*alice_wallet), - (*id_completed_get).tx_id, - error_ptr, - ); - assert_ne!(id_inbound_get, ptr::null_mut()); - assert_ne!((*id_inbound_get).status, TransactionStatus::MinedUnconfirmed); - pending_inbound_transaction_destroy(&mut (*id_inbound_get)); - } else { - let id_outbound_get = wallet_get_pending_outbound_transaction_by_id( - &mut (*alice_wallet), - (*id_completed_get).tx_id, - error_ptr, - ); - assert_ne!(id_outbound_get, ptr::null_mut()); - assert_ne!((*id_outbound_get).status, TransactionStatus::MinedUnconfirmed); - pending_outbound_transaction_destroy(&mut (*id_outbound_get)); - } - public_key_destroy(&mut (*pk_compare)); - } - completed_transaction_destroy(&mut (*id_completed)); - completed_transaction_destroy(&mut (*id_completed_get)); - } - - // TODO: Test transaction collection and transaction methods - let completed_transactions = (*alice_wallet) - .runtime - .block_on((*alice_wallet).wallet.transaction_service.get_completed_transactions()) - .unwrap(); - - for (_k, v) in completed_transactions { - if v.status == TransactionStatus::Completed { - let tx_ptr = Box::into_raw(Box::new(v.clone())); - wallet_test_broadcast_transaction(alice_wallet, (*tx_ptr).tx_id, error_ptr); - wallet_test_mine_transaction(alice_wallet, (*tx_ptr).tx_id, error_ptr); - // test ffi calls for excess, public nonce, and signature - let kernels = v.transaction.get_body().kernels(); - if !kernels.is_empty() { - for k in kernels { - let x = completed_transaction_get_excess(tx_ptr, error_ptr); - assert_eq!(k.excess, *x); - excess_destroy(x); - let nonce = k.excess_sig.get_public_nonce().clone(); - let r = completed_transaction_get_public_nonce(tx_ptr, error_ptr); - assert_eq!(nonce, *r); - nonce_destroy(r); - let sig = k.excess_sig.get_signature().clone(); - let s = completed_transaction_get_signature(tx_ptr, error_ptr); - assert_eq!(sig, *s); - signature_destroy(s); - } - } else { - let x = completed_transaction_get_excess(tx_ptr, error_ptr); - assert!(x.is_null()); - excess_destroy(x); - let r = completed_transaction_get_public_nonce(tx_ptr, error_ptr); - assert!(r.is_null()); - nonce_destroy(r); - let s = completed_transaction_get_signature(tx_ptr, error_ptr); - assert!(s.is_null()); - signature_destroy(s); - } - } - } - - // Now all completed transactions are mined as should be returned - let ffi_completed_txs = wallet_get_completed_transactions(&mut (*alice_wallet), error_ptr); - assert_eq!(completed_transactions_get_length(ffi_completed_txs, error_ptr), 15); - - let contacts = wallet_get_contacts(alice_wallet, error_ptr); - assert_eq!(contacts_get_length(contacts, error_ptr), 4); - - let utxo_spending_key = private_key_generate(); - let utxo_value = 20000; - - let pre_balance = (*alice_wallet) - .runtime - .block_on((*alice_wallet).wallet.output_manager_service.get_balance()) - .unwrap(); - - // test "funds pending" when pending incoming would cover - let fee = wallet_get_fee_estimate(alice_wallet, pre_balance.available_balance.into(), 25, 1, 2, error_ptr); - assert_eq!(fee, 0); - assert_eq!(error, 115); - - let secret_key_base_node = private_key_generate(); - let public_key_base_node = public_key_from_private_key(secret_key_base_node, error_ptr); - let utxo_message_str = CString::new("UTXO Import").unwrap(); - let utxo_message: *const c_char = CString::into_raw(utxo_message_str) as *const c_char; - - let utxo_tx_id = wallet_import_utxo( - alice_wallet, - utxo_value, - utxo_spending_key, - public_key_base_node, - utxo_message, - error_ptr, - ); - - let post_balance = (*alice_wallet) - .runtime - .block_on((*alice_wallet).wallet.output_manager_service.get_balance()) - .unwrap(); - - assert_eq!( - pre_balance.available_balance + utxo_value * uT, - post_balance.available_balance - ); - - let import_transaction = (*alice_wallet) - .runtime - .block_on((*alice_wallet).wallet.transaction_service.get_completed_transactions()) - .unwrap() - .remove(&utxo_tx_id) - .expect("Tx should be in collection"); - - assert_eq!(import_transaction.amount, utxo_value * uT); - - assert_eq!(wallet_start_utxo_validation(alice_wallet, error_ptr), 0); - assert_eq!(wallet_start_stxo_validation(alice_wallet, error_ptr), 0); - assert_eq!(wallet_start_invalid_txo_validation(alice_wallet, error_ptr), 0); - assert_eq!(wallet_start_transaction_validation(alice_wallet, error_ptr), 0); - let mut peer_added = wallet_add_base_node_peer(alice_wallet, public_key_bob, address_bob_str, error_ptr); - assert!(peer_added,); - peer_added = wallet_add_base_node_peer(bob_wallet, public_key_alice, address_alice_str, error_ptr); - assert!(peer_added); - assert!(wallet_start_utxo_validation(alice_wallet, error_ptr) > 0); - assert!(wallet_start_stxo_validation(alice_wallet, error_ptr) > 0); - assert!(wallet_start_invalid_txo_validation(alice_wallet, error_ptr) > 0); - assert!(wallet_start_transaction_validation(alice_wallet, error_ptr) > 0); - - // Test pending tx cancellation - let ffi_cancelled_txs = wallet_get_cancelled_transactions(&mut (*alice_wallet), error_ptr); - assert_eq!( - completed_transactions_get_length(ffi_cancelled_txs, error_ptr), - 0, - "Should have no cancelled txs" - ); - - wallet_test_receive_transaction(&mut (*alice_wallet), error_ptr); - - let inbound_txs = (*alice_wallet) - .runtime - .block_on( - (*alice_wallet) - .wallet - .transaction_service - .get_pending_inbound_transactions(), - ) - .unwrap(); - - let mut inbound_tx_id = 0; - for (k, v) in inbound_txs { - // test ffi calls for excess, public nonce, and signature when given a pending tx - let tx_ptr = Box::into_raw(Box::new(CompletedTransaction::from(v))); - let x = completed_transaction_get_excess(tx_ptr, error_ptr); - assert!(x.is_null()); - excess_destroy(x); - let r = completed_transaction_get_public_nonce(tx_ptr, error_ptr); - assert!(r.is_null()); - nonce_destroy(r); - let s = completed_transaction_get_signature(tx_ptr, error_ptr); - assert!(s.is_null()); - signature_destroy(s); - - inbound_tx_id = k; - - let inbound_tx = wallet_get_cancelled_transaction_by_id(&mut (*alice_wallet), inbound_tx_id, error_ptr); - - assert_eq!(inbound_tx, ptr::null_mut()); - - (*alice_wallet) - .runtime - .block_on(async { (*alice_wallet).wallet.transaction_service.cancel_transaction(k).await }) - .unwrap(); - - let inbound_tx = wallet_get_cancelled_transaction_by_id(&mut (*alice_wallet), inbound_tx_id, error_ptr); - - assert_ne!(inbound_tx, ptr::null_mut()); - assert_eq!(completed_transaction_get_transaction_id(inbound_tx, error_ptr), k); - } - - let mut found_cancelled_tx = false; - let mut ffi_cancelled_txs = ptr::null_mut(); - for _ in 0..12 { - ffi_cancelled_txs = wallet_get_cancelled_transactions(&mut (*alice_wallet), error_ptr); - if completed_transactions_get_length(ffi_cancelled_txs, error_ptr) >= 1 { - found_cancelled_tx = true; - break; - } - thread::sleep(Duration::from_secs(5)); - } - assert!(found_cancelled_tx, "Should have found a cancelled tx"); - - let cancelled_tx = completed_transactions_get_at(ffi_cancelled_txs, 0, error_ptr); - let tx_id = completed_transaction_get_transaction_id(cancelled_tx, error_ptr); - let dest_pubkey = completed_transaction_get_destination_public_key(cancelled_tx, error_ptr); - let pub_key_ptr = Box::into_raw(Box::new( - (*alice_wallet).wallet.comms.node_identity().public_key().clone(), - )); - assert_eq!(tx_id, inbound_tx_id); - assert_eq!(*dest_pubkey, *pub_key_ptr); - public_key_destroy(pub_key_ptr); - - completed_transaction_destroy(cancelled_tx); - - let split_msg = CString::new("Test Coin Split").unwrap(); - let split_msg_str: *const c_char = CString::into_raw(split_msg) as *const c_char; - - let split_tx_id = wallet_coin_split(alice_wallet, 50000, 3, 20, split_msg_str, 0, error_ptr); - assert_eq!(error, 0); - let split_tx = (*alice_wallet).runtime.block_on( - (*alice_wallet) - .wallet - .transaction_service - .get_completed_transaction(split_tx_id), - ); - assert!(split_tx.is_ok()); - string_destroy(split_msg_str as *mut c_char); - - wallet_set_low_power_mode(alice_wallet, error_ptr); - assert_eq!((*error_ptr), 0); - wallet_set_normal_power_mode(alice_wallet, error_ptr); - assert_eq!((*error_ptr), 0); - - // Test seed words - let seed_words = wallet_get_seed_words(alice_wallet, error_ptr); - let seed_word_len = seed_words_get_length(seed_words, error_ptr); - - let mut seed_words_vec = Vec::new(); - for i in 0..seed_word_len { - let word = seed_words_get_at(seed_words, i as c_uint, error_ptr); - let word_string = CString::from_raw(word).to_str().unwrap().to_owned(); - seed_words_vec.push(word_string); - } - let _seed_word_private_key = PrivateKey::from_mnemonic(&seed_words_vec) - .expect("Seed words should be able to convert to private key"); - - let lock = CALLBACK_STATE_FFI.lock().unwrap(); - assert!(lock.received_tx_callback_called); - assert!(lock.received_tx_reply_callback_called); - assert!(lock.received_finalized_tx_callback_called); - assert!(lock.broadcast_tx_callback_called); - assert!(lock.mined_tx_callback_called); - drop(lock); - // Not testing for the discovery_process_completed callback as its tricky to evoke and it is unit tested - // elsewhere - - // free string memory - string_destroy(db_name_alice_str as *mut c_char); - string_destroy(db_path_alice_str as *mut c_char); - string_destroy(address_alice_str as *mut c_char); - string_destroy(db_name_bob_str as *mut c_char); - string_destroy(db_path_bob_str as *mut c_char); - string_destroy(address_bob_str as *mut c_char); - // free wallet memory - wallet_destroy(alice_wallet); - wallet_destroy(bob_wallet); - // free keys - private_key_destroy(secret_key_alice); - private_key_destroy(secret_key_bob); - public_key_destroy(public_key_alice); - public_key_destroy(public_key_bob); - // free config memory - comms_config_destroy(bob_config); - comms_config_destroy(alice_config); - transport_type_destroy(transport_type_alice); - transport_type_destroy(transport_type_bob); - seed_words_destroy(seed_words); - } - } - #[test] fn test_master_private_key_persistence() { unsafe { let mut error = 0; let error_ptr = &mut error as *mut c_int; + let mut recovery_in_progress = true; + let recovery_in_progress_ptr = &mut recovery_in_progress as *mut bool; let secret_key_alice = private_key_generate(); let public_key_alice = public_key_from_private_key(secret_key_alice, error_ptr); @@ -6684,6 +5767,9 @@ mod test { .join(db_name) .with_extension("sqlite3"); + let alice_network = CString::new(NETWORK_STRING).unwrap(); + let alice_network_str: *const c_char = CString::into_raw(alice_network) as *const c_char; + let alice_config = comms_config_create( address_alice_str, transport_type_alice, @@ -6691,6 +5777,7 @@ mod test { db_path_alice_str, 20, 10800, + alice_network_str, error_ptr, ); @@ -6725,9 +5812,10 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); - + assert!(!(*recovery_in_progress_ptr), "no recovery in progress"); assert_eq!(*error_ptr, 0, "No error expected"); wallet_destroy(alice_wallet); @@ -6764,8 +5852,10 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); + assert!(!(*recovery_in_progress_ptr), "no recovery in progress"); assert_eq!(*error_ptr, 0, "No error expected"); wallet_destroy(alice_wallet2); @@ -6801,6 +5891,7 @@ mod test { assert!(stored_key.is_none(), "key should be cleared"); drop(wallet_backend); + string_destroy(alice_network_str as *mut c_char); string_destroy(db_name_alice_str as *mut c_char); string_destroy(db_path_alice_str as *mut c_char); string_destroy(address_alice_str as *mut c_char); @@ -6818,6 +5909,8 @@ mod test { unsafe { let mut error = 0; let error_ptr = &mut error as *mut c_int; + let mut recovery_in_progress = true; + let recovery_in_progress_ptr = &mut recovery_in_progress as *mut bool; let secret_key_alice = private_key_generate(); let public_key_alice = public_key_from_private_key(secret_key_alice, error_ptr); @@ -6830,6 +5923,8 @@ mod test { let address_alice = transport_memory_get_address(transport_type_alice, error_ptr); let address_alice_str = CStr::from_ptr(address_alice).to_str().unwrap().to_owned(); let address_alice_str: *const c_char = CString::new(address_alice_str).unwrap().into_raw() as *const c_char; + let alice_network = CString::new(NETWORK_STRING).unwrap(); + let alice_network_str: *const c_char = CString::into_raw(alice_network) as *const c_char; let alice_config = comms_config_create( address_alice_str, @@ -6838,6 +5933,7 @@ mod test { db_path_alice_str, 20, 10800, + alice_network_str, error_ptr, ); @@ -6862,10 +5958,9 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); - let generated = wallet_test_generate_data(alice_wallet, db_path_alice_str, error_ptr); - assert!(generated); let passphrase = "A pretty long passphrase that should test the hashing to a 32-bit key quite well".to_string(); @@ -6885,6 +5980,7 @@ mod test { db_path_alice_str, 20, 10800, + alice_network_str, error_ptr, ); @@ -6910,6 +6006,7 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); @@ -6940,6 +6037,7 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); assert_eq!(error, 428); @@ -6965,6 +6063,7 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); @@ -6986,6 +6085,7 @@ mod test { db_path_alice_str, 20, 10800, + alice_network_str, error_ptr, ); @@ -7010,11 +6110,13 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); + assert!(!(*recovery_in_progress_ptr), "no recovery in progress"); assert_eq!(error, 0); - + string_destroy(alice_network_str as *mut c_char); string_destroy(db_name_alice_str as *mut c_char); string_destroy(db_path_alice_str as *mut c_char); string_destroy(address_alice_str as *mut c_char); @@ -7035,6 +6137,8 @@ mod test { unsafe { let mut error = 0; let error_ptr = &mut error as *mut c_int; + let mut recovery_in_progress = true; + let recovery_in_progress_ptr = &mut recovery_in_progress as *mut bool; let secret_key_alice = private_key_generate(); let db_name_alice = CString::new(random::string(8).as_str()).unwrap(); @@ -7047,6 +6151,9 @@ mod test { let address_alice_str = CStr::from_ptr(address_alice).to_str().unwrap().to_owned(); let address_alice_str: *const c_char = CString::new(address_alice_str).unwrap().into_raw() as *const c_char; + let alice_network = CString::new(NETWORK_STRING).unwrap(); + let alice_network_str: *const c_char = CString::into_raw(alice_network) as *const c_char; + let alice_config = comms_config_create( address_alice_str, transport_type_alice, @@ -7054,6 +6161,7 @@ mod test { db_path_alice_str, 20, 10800, + alice_network_str, error_ptr, ); @@ -7078,6 +6186,7 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); @@ -7127,6 +6236,7 @@ mod test { assert_eq!(found_value, ptr::null_mut()); assert_eq!(*error_ptr, 424i32); + string_destroy(alice_network_str as *mut c_char); string_destroy(k_str as *mut c_char); string_destroy(db_name_alice_str as *mut c_char); string_destroy(db_path_alice_str as *mut c_char); @@ -7145,6 +6255,8 @@ mod test { unsafe { let mut error = 0; let error_ptr = &mut error as *mut c_int; + let mut recovery_in_progress = true; + let recovery_in_progress_ptr = &mut recovery_in_progress as *mut bool; let mnemonic = vec![ "clever", "jaguar", "bus", "engage", "oil", "august", "media", "high", "trick", "remove", "tiny", @@ -7190,6 +6302,9 @@ mod test { let address_str = CStr::from_ptr(address).to_str().unwrap().to_owned(); let address_str = CString::new(address_str).unwrap().into_raw() as *const c_char; + let network = CString::new(NETWORK_STRING).unwrap(); + let network_str: *const c_char = CString::into_raw(network) as *const c_char; + let config = comms_config_create( address_str, transport_type, @@ -7197,6 +6312,7 @@ mod test { db_path_str, 20, 10800, + network_str, error_ptr, ); @@ -7221,6 +6337,7 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); @@ -7240,6 +6357,9 @@ mod test { let address_str = CStr::from_ptr(address).to_str().unwrap().to_owned(); let address_str = CString::new(address_str).unwrap().into_raw() as *const c_char; + let network = CString::new(NETWORK_STRING).unwrap(); + let network_str: *const c_char = CString::into_raw(network) as *const c_char; + let config = comms_config_create( address_str, transport_type, @@ -7247,6 +6367,7 @@ mod test { db_path_str, 20, 10800, + network_str, error_ptr, ); @@ -7271,6 +6392,7 @@ mod test { invalid_txo_validation_complete_callback, transaction_validation_complete_callback, saf_messages_received_callback, + recovery_in_progress_ptr, error_ptr, ); assert_eq!(error, 0); diff --git a/base_layer/wallet_ffi/src/tasks.rs b/base_layer/wallet_ffi/src/tasks.rs index 0b69cd72ea..9c44c94106 100644 --- a/base_layer/wallet_ffi/src/tasks.rs +++ b/base_layer/wallet_ffi/src/tasks.rs @@ -95,10 +95,6 @@ pub async fn recovery_event_monitoring( (recovery_progress_callback)(RecoveryEvent::Progress as u8, current, total); } info!(target: LOG_TARGET, "Recovery progress: {}/{}", current, total); - if current == total { - info!(target: LOG_TARGET, "Recovery complete: {}/{}", current, total); - break; - } }, Ok(UtxoScannerEvent::Completed { number_scanned: num_scanned, @@ -111,17 +107,19 @@ pub async fn recovery_event_monitoring( "Recovery complete! Scanned = {} in {:.2?} ({} utxos/s), Recovered {} worth {}", num_scanned, elapsed, - num_scanned / elapsed.as_secs(), + num_scanned / (1 + elapsed.as_secs()), num_utxos, total_amount ); unsafe { (recovery_progress_callback)(RecoveryEvent::Completed as u8, num_scanned, u64::from(total_amount)); } + break; }, Ok(UtxoScannerEvent::ScanningRoundFailed { num_retries, retry_limit, + error, }) => { unsafe { (recovery_progress_callback)( @@ -132,7 +130,7 @@ pub async fn recovery_event_monitoring( } info!( target: LOG_TARGET, - "UTXO Scanning round failed on retry {} of {}", num_retries, retry_limit + "UTXO Scanning round failed on retry {} of {}: {}", num_retries, retry_limit, error ); }, Ok(UtxoScannerEvent::ScanningFailed) => { diff --git a/base_layer/wallet_ffi/wallet.h b/base_layer/wallet_ffi/wallet.h index ebadace6ce..bd17dd613e 100644 --- a/base_layer/wallet_ffi/wallet.h +++ b/base_layer/wallet_ffi/wallet.h @@ -38,8 +38,8 @@ extern "C" { #endif -#include #include +#include struct ByteVector; @@ -49,8 +49,6 @@ struct TariPrivateKey; struct TariWallet; -struct TariWalletConfig; - struct TariPublicKey; struct TariContacts; @@ -87,7 +85,7 @@ struct TariExcessSignature; struct TariTransportType *transport_memory_create(); // Creates a tcp transport type -struct TariTransportType *transport_tcp_create(const char *listener_address,int* error_out); +struct TariTransportType *transport_tcp_create(const char *listener_address, int *error_out); // Creates a tor transport type struct TariTransportType *transport_tor_create( @@ -96,10 +94,10 @@ struct TariTransportType *transport_tor_create( unsigned short tor_port, const char *socks_username, const char *socks_password, - int* error_out); + int *error_out); // Gets the address from a memory transport type -char *transport_memory_get_address(struct TariTransportType *transport,int* error_out); +char *transport_memory_get_address(struct TariTransportType *transport, int *error_out); // Frees memory for a transport type void transport_type_destroy(struct TariTransportType *transport); @@ -112,13 +110,13 @@ void string_destroy(char *s); /// -------------------------------- ByteVector ----------------------------------------------- /// // Creates a ByteVector -struct ByteVector *byte_vector_create(const unsigned char *byte_array, unsigned int element_count, int* error_out); +struct ByteVector *byte_vector_create(const unsigned char *byte_array, unsigned int element_count, int *error_out); // Gets a char from a ByteVector -unsigned char byte_vector_get_at(struct ByteVector *ptr, unsigned int i, int* error_out); +unsigned char byte_vector_get_at(struct ByteVector *ptr, unsigned int i, int *error_out); // Returns the number of elements in a ByteVector -unsigned int byte_vector_get_length(const struct ByteVector *vec, int* error_out); +unsigned int byte_vector_get_length(const struct ByteVector *vec, int *error_out); // Frees memory for a ByteVector pointer void byte_vector_destroy(struct ByteVector *bytes); @@ -126,39 +124,39 @@ void byte_vector_destroy(struct ByteVector *bytes); /// -------------------------------- TariPublicKey ----------------------------------------------- /// // Creates a TariPublicKey from a ByteVector -struct TariPublicKey *public_key_create(struct ByteVector *bytes,int* error_out); +struct TariPublicKey *public_key_create(struct ByteVector *bytes, int *error_out); // Gets a ByteVector from a TariPublicKey -struct ByteVector *public_key_get_bytes(struct TariPublicKey *public_key,int* error_out); +struct ByteVector *public_key_get_bytes(struct TariPublicKey *public_key, int *error_out); // Creates a TariPublicKey from a TariPrivateKey -struct TariPublicKey *public_key_from_private_key(struct TariPrivateKey *secret_key,int* error_out); +struct TariPublicKey *public_key_from_private_key(struct TariPrivateKey *secret_key, int *error_out); // Creates a TariPublicKey from a const char* filled with hexadecimal characters -struct TariPublicKey *public_key_from_hex(const char *hex,int* error_out); +struct TariPublicKey *public_key_from_hex(const char *hex, int *error_out); // Frees memory for a TariPublicKey pointer void public_key_destroy(struct TariPublicKey *pk); //Converts a TariPublicKey to char array in emoji format -char *public_key_to_emoji_id(struct TariPublicKey *pk, int* error_out); +char *public_key_to_emoji_id(struct TariPublicKey *pk, int *error_out); // Converts a char array in emoji format to a public key -struct TariPublicKey *emoji_id_to_public_key(const char *emoji, int* error_out); +struct TariPublicKey *emoji_id_to_public_key(const char *emoji, int *error_out); /// -------------------------------- TariPrivateKey ----------------------------------------------- /// // Creates a TariPrivateKey from a ByteVector -struct TariPrivateKey *private_key_create(struct ByteVector *bytes,int* error_out); +struct TariPrivateKey *private_key_create(struct ByteVector *bytes, int *error_out); // Generates a TariPrivateKey -struct TariPrivateKey *private_key_generate(void); +struct TariPrivateKey *private_key_generate(); // Creates a ByteVector from a TariPrivateKey -struct ByteVector *private_key_get_bytes(struct TariPrivateKey *private_key,int* error_out); +struct ByteVector *private_key_get_bytes(struct TariPrivateKey *private_key, int *error_out); // Creates a TariPrivateKey from a const char* filled with hexadecimal characters -struct TariPrivateKey *private_key_from_hex(const char *hex,int* error_out); +struct TariPrivateKey *private_key_from_hex(const char *hex, int *error_out); // Frees memory for a TariPrivateKey void private_key_destroy(struct TariPrivateKey *pk); @@ -168,10 +166,10 @@ void private_key_destroy(struct TariPrivateKey *pk); struct TariSeedWords *seed_words_create(); // Get the number of seed words in the provided collection -unsigned int seed_words_get_length(struct TariSeedWords *seed_words, int* error_out); +unsigned int seed_words_get_length(struct TariSeedWords *seed_words, int *error_out); // Get a seed word from the provided collection at the specified position -char *seed_words_get_at(struct TariSeedWords *seed_words, unsigned int position, int* error_out); +char *seed_words_get_at(struct TariSeedWords *seed_words, unsigned int position, int *error_out); /// Add a word to the provided TariSeedWords instance /// @@ -190,7 +188,7 @@ char *seed_words_get_at(struct TariSeedWords *seed_words, unsigned int position, /// '3' -> InvalidSeedPhrase /// # Safety /// The ```string_destroy``` method must be called when finished with a string from rust to prevent a memory leak -unsigned char seed_words_push_word(struct TariSeedWords *seed_words, const char *word, int* error_out); +unsigned char seed_words_push_word(struct TariSeedWords *seed_words, const char *word, int *error_out); // Frees the memory for a TariSeedWords collection void seed_words_destroy(struct TariSeedWords *seed_words); @@ -198,13 +196,13 @@ void seed_words_destroy(struct TariSeedWords *seed_words); /// -------------------------------- Contact ------------------------------------------------------ /// // Creates a TariContact -struct TariContact *contact_create(const char *alias, struct TariPublicKey *public_key,int* error_out); +struct TariContact *contact_create(const char *alias, struct TariPublicKey *public_key, int *error_out); // Gets the alias of the TariContact -char *contact_get_alias(struct TariContact *contact,int* error_out); +char *contact_get_alias(struct TariContact *contact, int *error_out); /// Gets the TariPublicKey of the TariContact -struct TariPublicKey *contact_get_public_key(struct TariContact *contact, int* error_out); +struct TariPublicKey *contact_get_public_key(struct TariContact *contact, int *error_out); // Frees memory for a TariContact void contact_destroy(struct TariContact *contact); @@ -212,10 +210,10 @@ void contact_destroy(struct TariContact *contact); /// -------------------------------- Contacts ------------------------------------------------------ /// // Gets the number of elements of TariContacts -unsigned int contacts_get_length(struct TariContacts *contacts,int* error_out); +unsigned int contacts_get_length(struct TariContacts *contacts, int *error_out); // Gets a TariContact from TariContacts at position -struct TariContact *contacts_get_at(struct TariContacts *contacts, unsigned int position,int* error_out); +struct TariContact *contacts_get_at(struct TariContacts *contacts, unsigned int position, int *error_out); // Frees memory for TariContacts void contacts_destroy(struct TariContacts *contacts); @@ -223,19 +221,19 @@ void contacts_destroy(struct TariContacts *contacts); /// -------------------------------- CompletedTransaction ------------------------------------------------------ /// // Gets the destination TariPublicKey of a TariCompletedTransaction -struct TariPublicKey *completed_transaction_get_destination_public_key(struct TariCompletedTransaction *transaction,int* error_out); +struct TariPublicKey *completed_transaction_get_destination_public_key(struct TariCompletedTransaction *transaction, int *error_out); // Gets the source TariPublicKey of a TariCompletedTransaction -struct TariPublicKey *completed_transaction_get_source_public_key(struct TariCompletedTransaction *transaction,int* error_out); +struct TariPublicKey *completed_transaction_get_source_public_key(struct TariCompletedTransaction *transaction, int *error_out); // Gets the amount of a TariCompletedTransaction -unsigned long long completed_transaction_get_amount(struct TariCompletedTransaction *transaction,int* error_out); +unsigned long long completed_transaction_get_amount(struct TariCompletedTransaction *transaction, int *error_out); // Gets the fee of a TariCompletedTransaction -unsigned long long completed_transaction_get_fee(struct TariCompletedTransaction *transaction,int* error_out); +unsigned long long completed_transaction_get_fee(struct TariCompletedTransaction *transaction, int *error_out); // Gets the message of a TariCompletedTransaction -const char *completed_transaction_get_message(struct TariCompletedTransaction *transaction,int* error_out); +const char *completed_transaction_get_message(struct TariCompletedTransaction *transaction, int *error_out); // Gets the status of a TariCompletedTransaction // | Value | Interpretation | @@ -246,35 +244,35 @@ const char *completed_transaction_get_message(struct TariCompletedTransaction *t // | 2 | Mined | // | 3 | Imported | // | 4 | Pending | -int completed_transaction_get_status(struct TariCompletedTransaction *transaction,int* error_out); +int completed_transaction_get_status(struct TariCompletedTransaction *transaction, int *error_out); // Gets the TransactionID of a TariCompletedTransaction -unsigned long long completed_transaction_get_transaction_id(struct TariCompletedTransaction *transaction,int* error_out); +unsigned long long completed_transaction_get_transaction_id(struct TariCompletedTransaction *transaction, int *error_out); // Gets the timestamp of a TariCompletedTransaction -unsigned long long completed_transaction_get_timestamp(struct TariCompletedTransaction *transaction,int* error_out); +unsigned long long completed_transaction_get_timestamp(struct TariCompletedTransaction *transaction, int *error_out); // Check if a TariCompletedTransaction is Valid or not -bool completed_transaction_is_valid(struct TariCompletedTransaction *tx,int* error_out); +bool completed_transaction_is_valid(struct TariCompletedTransaction *tx, int *error_out); // Checks if a TariCompletedTransaction was originally a TariPendingOutboundTransaction, // i.e the transaction was originally sent from the wallet -bool completed_transaction_is_outbound(struct TariCompletedTransaction *tx,int* error_out); +bool completed_transaction_is_outbound(struct TariCompletedTransaction *tx, int *error_out); /// Gets the number of confirmations of a TariCompletedTransaction -unsigned long long completed_transaction_get_confirmations(struct TariCompletedTransaction *transaction,int* error_out); +unsigned long long completed_transaction_get_confirmations(struct TariCompletedTransaction *transaction, int *error_out); // Frees memory for a TariCompletedTransaction void completed_transaction_destroy(struct TariCompletedTransaction *transaction); // Gets the TariExcess of a TariCompletedTransaction -struct TariExcess *completed_transaction_get_excess(struct TariCompletedTransaction *transaction,int* error_out); +struct TariExcess *completed_transaction_get_excess(struct TariCompletedTransaction *transaction, int *error_out); // Gets the TariExcessPublicNonce of a TariCompletedTransaction -struct TariExcessPublicNonce *completed_transaction_get_public_nonce(struct TariCompletedTransaction *transaction,int* error_out); +struct TariExcessPublicNonce *completed_transaction_get_public_nonce(struct TariCompletedTransaction *transaction, int *error_out); // Gets the TariExcessSignature of a TariCompletedTransaction -struct TariExcessSignature *completed_transaction_get_signature(struct TariCompletedTransaction *transaction,int* error_out); +struct TariExcessSignature *completed_transaction_get_signature(struct TariCompletedTransaction *transaction, int *error_out); // Frees memory for a TariExcess void excess_destroy(struct TariExcess *excess); @@ -288,10 +286,10 @@ void signature_destroy(struct TariExcessSignature *signature); /// -------------------------------- CompletedTransactions ------------------------------------------------------ /// // Gets number of elements in TariCompletedTransactions -unsigned int completed_transactions_get_length(struct TariCompletedTransactions *transactions,int* error_out); +unsigned int completed_transactions_get_length(struct TariCompletedTransactions *transactions, int *error_out); // Gets a TariCompletedTransaction from a TariCompletedTransactions at position -struct TariCompletedTransaction *completed_transactions_get_at(struct TariCompletedTransactions *transactions, unsigned int position,int* error_out); +struct TariCompletedTransaction *completed_transactions_get_at(struct TariCompletedTransactions *transactions, unsigned int position, int *error_out); // Frees memory for a TariCompletedTransactions void completed_transactions_destroy(struct TariCompletedTransactions *transactions); @@ -299,22 +297,22 @@ void completed_transactions_destroy(struct TariCompletedTransactions *transactio /// -------------------------------- OutboundTransaction ------------------------------------------------------ /// // Gets the TransactionId of a TariPendingOutboundTransaction -unsigned long long pending_outbound_transaction_get_transaction_id(struct TariPendingOutboundTransaction *transaction,int* error_out); +unsigned long long pending_outbound_transaction_get_transaction_id(struct TariPendingOutboundTransaction *transaction, int *error_out); // Gets the destination TariPublicKey of a TariPendingOutboundTransaction -struct TariPublicKey *pending_outbound_transaction_get_destination_public_key(struct TariPendingOutboundTransaction *transaction,int* error_out); +struct TariPublicKey *pending_outbound_transaction_get_destination_public_key(struct TariPendingOutboundTransaction *transaction, int *error_out); // Gets the amount of a TariPendingOutboundTransaction -unsigned long long pending_outbound_transaction_get_amount(struct TariPendingOutboundTransaction *transaction,int* error_out); +unsigned long long pending_outbound_transaction_get_amount(struct TariPendingOutboundTransaction *transaction, int *error_out); // Gets the fee of a TariPendingOutboundTransaction -unsigned long long pending_outbound_transaction_get_fee(struct TariPendingOutboundTransaction *transaction,int* error_out); +unsigned long long pending_outbound_transaction_get_fee(struct TariPendingOutboundTransaction *transaction, int *error_out); // Gets the message of a TariPendingOutboundTransaction -const char *pending_outbound_transaction_get_message(struct TariPendingOutboundTransaction *transaction,int* error_out); +const char *pending_outbound_transaction_get_message(struct TariPendingOutboundTransaction *transaction, int *error_out); // Gets the timestamp of a TariPendingOutboundTransaction -unsigned long long pending_outbound_transaction_get_timestamp(struct TariPendingOutboundTransaction *transaction,int* error_out); +unsigned long long pending_outbound_transaction_get_timestamp(struct TariPendingOutboundTransaction *transaction, int *error_out); // Gets the status of a TariPendingOutboundTransaction // | Value | Interpretation | @@ -325,7 +323,7 @@ unsigned long long pending_outbound_transaction_get_timestamp(struct TariPending // | 2 | Mined | // | 3 | Imported | // | 4 | Pending | -int pending_outbound_transaction_get_status(struct TariPendingOutboundTransaction *transaction,int* error_out); +int pending_outbound_transaction_get_status(struct TariPendingOutboundTransaction *transaction, int *error_out); // Frees memory for a TariPendingOutboundTactions void pending_outbound_transaction_destroy(struct TariPendingOutboundTransaction *transaction); @@ -333,10 +331,10 @@ void pending_outbound_transaction_destroy(struct TariPendingOutboundTransaction /// -------------------------------- OutboundTransactions ------------------------------------------------------ /// // Gets the number of elements in a TariPendingOutboundTactions -unsigned int pending_outbound_transactions_get_length(struct TariPendingOutboundTransactions *transactions,int* error_out); +unsigned int pending_outbound_transactions_get_length(struct TariPendingOutboundTransactions *transactions, int *error_out); // Gets a TariPendingOutboundTransaction of a TariPendingOutboundTransactions at position -struct TariPendingOutboundTransaction *pending_outbound_transactions_get_at(struct TariPendingOutboundTransactions *transactions, unsigned int position,int* error_out); +struct TariPendingOutboundTransaction *pending_outbound_transactions_get_at(struct TariPendingOutboundTransactions *transactions, unsigned int position, int *error_out); // Frees memory of a TariPendingOutboundTransactions void pending_outbound_transactions_destroy(struct TariPendingOutboundTransactions *transactions); @@ -344,19 +342,19 @@ void pending_outbound_transactions_destroy(struct TariPendingOutboundTransaction /// -------------------------------- InboundTransaction ------------------------------------------------------ /// // Gets the TransactionId of a TariPendingInboundTransaction -unsigned long long pending_inbound_transaction_get_transaction_id(struct TariPendingInboundTransaction *transaction,int* error_out); +unsigned long long pending_inbound_transaction_get_transaction_id(struct TariPendingInboundTransaction *transaction, int *error_out); // Gets the source TariPublicKey of a TariPendingInboundTransaction -struct TariPublicKey *pending_inbound_transaction_get_source_public_key(struct TariPendingInboundTransaction *transaction,int* error_out); +struct TariPublicKey *pending_inbound_transaction_get_source_public_key(struct TariPendingInboundTransaction *transaction, int *error_out); // Gets the message of a TariPendingInboundTransaction -const char *pending_inbound_transaction_get_message(struct TariPendingInboundTransaction *transaction,int* error_out); +const char *pending_inbound_transaction_get_message(struct TariPendingInboundTransaction *transaction, int *error_out); // Gets the amount of a TariPendingInboundTransaction -unsigned long long pending_inbound_transaction_get_amount(struct TariPendingInboundTransaction *transaction,int* error_out); +unsigned long long pending_inbound_transaction_get_amount(struct TariPendingInboundTransaction *transaction, int *error_out); // Gets the timestamp of a TariPendingInboundTransaction -unsigned long long pending_inbound_transaction_get_timestamp(struct TariPendingInboundTransaction *transaction,int* error_out); +unsigned long long pending_inbound_transaction_get_timestamp(struct TariPendingInboundTransaction *transaction, int *error_out); // Gets the status of a TariPendingInboundTransaction // | Value | Interpretation | @@ -367,7 +365,7 @@ unsigned long long pending_inbound_transaction_get_timestamp(struct TariPendingI // | 2 | Mined | // | 3 | Imported | // | 4 | Pending | -int pending_inbound_transaction_get_status(struct TariPendingInboundTransaction *transaction,int* error_out); +int pending_inbound_transaction_get_status(struct TariPendingInboundTransaction *transaction, int *error_out); // Frees memory for a TariPendingInboundTransaction void pending_inbound_transaction_destroy(struct TariPendingInboundTransaction *transaction); @@ -375,23 +373,25 @@ void pending_inbound_transaction_destroy(struct TariPendingInboundTransaction *t /// -------------------------------- InboundTransactions ------------------------------------------------------ /// // Gets the number of elements in a TariPendingInboundTransactions -unsigned int pending_inbound_transactions_get_length(struct TariPendingInboundTransactions *transactions,int* error_out); +unsigned int pending_inbound_transactions_get_length(struct TariPendingInboundTransactions *transactions, int *error_out); // Gets a TariPendingInboundTransaction of a TariPendingInboundTransactions at position -struct TariPendingInboundTransaction *pending_inbound_transactions_get_at(struct TariPendingInboundTransactions *transactions, unsigned int position,int* error_out); +struct TariPendingInboundTransaction *pending_inbound_transactions_get_at(struct TariPendingInboundTransactions *transactions, unsigned int position, int *error_out); // Frees memory of a TariPendingInboundTransaction void pending_inbound_transactions_destroy(struct TariPendingInboundTransactions *transactions); /// -------------------------------- TariCommsConfig ----------------------------------------------- /// // Creates a TariCommsConfig +// Valid values for network are: ridcully, stibbons, weatherwax, localnet, mainnet struct TariCommsConfig *comms_config_create(const char *public_address, - struct TariTransportType *transport, - const char *database_name, - const char *datastore_path, - unsigned long long discovery_timeout_in_secs, - unsigned long long saf_message_duration_in_secs, - int* error_out); + struct TariTransportType *transport, + const char *database_name, + const char *datastore_path, + unsigned long long discovery_timeout_in_secs, + unsigned long long saf_message_duration_in_secs, + const char *network, + int *error_out); // Frees memory for a TariCommsConfig void comms_config_destroy(struct TariCommsConfig *wc); @@ -441,6 +441,8 @@ void comms_config_destroy(struct TariCommsConfig *wc); /// `callback_saf_message_received` - The callback function pointer that will be called when the Dht has determined that /// is has connected to enough of its neighbours to be confident that it has received any SAF messages that were waiting /// for it. +/// `recovery_in_progress` - Pointer to an bool which will be modified to indicate if there is an outstanding recovery +/// that should be completed or not to an error code should one occur, may not be null. Functions as an out parameter. /// `error_out` - Pointer to an int which will be modified /// to an error code should one occur, may not be null. Functions as an out parameter. /// ## Returns @@ -457,155 +459,137 @@ void comms_config_destroy(struct TariCommsConfig *wc); /// Failure, // 2 /// BaseNodeNotInSync, // 3 /// } -struct TariWallet *wallet_create(struct TariWalletConfig *config, - const char *log_path, - unsigned int num_rolling_log_files, - unsigned int size_per_log_file_bytes, - const char *passphrase, - struct TariSeedWords *seed_words, - void (*callback_received_transaction)(struct TariPendingInboundTransaction*), - void (*callback_received_transaction_reply)(struct TariCompletedTransaction*), - void (*callback_received_finalized_transaction)(struct TariCompletedTransaction*), - void (*callback_transaction_broadcast)(struct TariCompletedTransaction*), - void (*callback_transaction_mined)(struct TariCompletedTransaction*), - void (*callback_transaction_mined_unconfirmed)(struct TariCompletedTransaction*, unsigned long long), - void (*callback_direct_send_result)(unsigned long long, bool), - void (*callback_store_and_forward_send_result)(unsigned long long, bool), - void (*callback_transaction_cancellation)(struct TariCompletedTransaction*), - void (*callback_utxo_validation_complete)(unsigned long long, unsigned char), - void (*callback_stxo_validation_complete)(unsigned long long, unsigned char), - void (*callback_invalid_txo_validation_complete)(unsigned long long, unsigned char), - void (*callback_transaction_validation_complete)(unsigned long long, unsigned char), - void (*callback_saf_message_received)(), - int* error_out); +struct TariWallet *wallet_create(struct TariCommsConfig *config, + const char *log_path, + unsigned int num_rolling_log_files, + unsigned int size_per_log_file_bytes, + const char *passphrase, + struct TariSeedWords *seed_words, + void (*callback_received_transaction)(struct TariPendingInboundTransaction *), + void (*callback_received_transaction_reply)(struct TariCompletedTransaction *), + void (*callback_received_finalized_transaction)(struct TariCompletedTransaction *), + void (*callback_transaction_broadcast)(struct TariCompletedTransaction *), + void (*callback_transaction_mined)(struct TariCompletedTransaction *), + void (*callback_transaction_mined_unconfirmed)(struct TariCompletedTransaction *, unsigned long long), + void (*callback_direct_send_result)(unsigned long long, bool), + void (*callback_store_and_forward_send_result)(unsigned long long, bool), + void (*callback_transaction_cancellation)(struct TariCompletedTransaction *), + void (*callback_utxo_validation_complete)(unsigned long long, unsigned char), + void (*callback_stxo_validation_complete)(unsigned long long, unsigned char), + void (*callback_invalid_txo_validation_complete)(unsigned long long, unsigned char), + void (*callback_transaction_validation_complete)(unsigned long long, unsigned char), + void (*callback_saf_message_received)(), + bool *recovery_in_progress, + int *error_out); // Signs a message -char* wallet_sign_message(struct TariWallet *wallet, const char* msg, int* error_out); +char *wallet_sign_message(struct TariWallet *wallet, const char *msg, int *error_out); // Verifies signature for a signed message -bool wallet_verify_message_signature(struct TariWallet *wallet, struct TariPublicKey *public_key, const char* hex_sig_nonce, const char* msg, int* error_out); - -/// Generates test data -bool wallet_test_generate_data(struct TariWallet *wallet, const char *datastore_path,int* error_out); +bool wallet_verify_message_signature(struct TariWallet *wallet, struct TariPublicKey *public_key, const char *hex_sig_nonce, const char *msg, int *error_out); // Adds a base node peer to the TariWallet -bool wallet_add_base_node_peer(struct TariWallet *wallet, struct TariPublicKey *public_key, const char *address,int* error_out); +bool wallet_add_base_node_peer(struct TariWallet *wallet, struct TariPublicKey *public_key, const char *address, int *error_out); // Upserts a TariContact to the TariWallet, if the contact does not exist it is inserted and if it does the alias is updated -bool wallet_upsert_contact(struct TariWallet *wallet, struct TariContact *contact,int* error_out); +bool wallet_upsert_contact(struct TariWallet *wallet, struct TariContact *contact, int *error_out); // Removes a TariContact form the TariWallet -bool wallet_remove_contact(struct TariWallet *wallet, struct TariContact *contact,int* error_out); +bool wallet_remove_contact(struct TariWallet *wallet, struct TariContact *contact, int *error_out); // Gets the available balance from a TariWallet -unsigned long long wallet_get_available_balance(struct TariWallet *wallet,int* error_out); +unsigned long long wallet_get_available_balance(struct TariWallet *wallet, int *error_out); // Gets the incoming balance from a TariWallet -unsigned long long wallet_get_pending_incoming_balance(struct TariWallet *wallet,int* error_out); +unsigned long long wallet_get_pending_incoming_balance(struct TariWallet *wallet, int *error_out); // Gets the outgoing balance from a TariWallet -unsigned long long wallet_get_pending_outgoing_balance(struct TariWallet *wallet,int* error_out); +unsigned long long wallet_get_pending_outgoing_balance(struct TariWallet *wallet, int *error_out); // Get a fee estimate from a TariWallet for a given amount -unsigned long long wallet_get_fee_estimate(struct TariWallet *wallet, unsigned long long amount, unsigned long long fee_per_gram, unsigned long long num_kernels, unsigned long long num_outputs, int* error_out); +unsigned long long wallet_get_fee_estimate(struct TariWallet *wallet, unsigned long long amount, unsigned long long fee_per_gram, unsigned long long num_kernels, unsigned long long num_outputs, int *error_out); // Get the number of mining confirmations by the wallet transaction service -unsigned long long wallet_get_num_confirmations_required(struct TariWallet *wallet, int* error_out); +unsigned long long wallet_get_num_confirmations_required(struct TariWallet *wallet, int *error_out); // Set the number of mining confirmations by the wallet transaction service -void wallet_set_num_confirmations_required(struct TariWallet *wallet, unsigned long long num, int* error_out); - +void wallet_set_num_confirmations_required(struct TariWallet *wallet, unsigned long long num, int *error_out); // Sends a TariPendingOutboundTransaction -unsigned long long wallet_send_transaction(struct TariWallet *wallet, struct TariPublicKey *destination, unsigned long long amount, unsigned long long fee_per_gram,const char *message,int* error_out); +unsigned long long wallet_send_transaction(struct TariWallet *wallet, struct TariPublicKey *destination, unsigned long long amount, unsigned long long fee_per_gram, const char *message, int *error_out); // Get the TariContacts from a TariWallet -struct TariContacts *wallet_get_contacts(struct TariWallet *wallet,int* error_out); +struct TariContacts *wallet_get_contacts(struct TariWallet *wallet, int *error_out); // Get the TariCompletedTransactions from a TariWallet -struct TariCompletedTransactions *wallet_get_completed_transactions(struct TariWallet *wallet,int* error_out); +struct TariCompletedTransactions *wallet_get_completed_transactions(struct TariWallet *wallet, int *error_out); // Get the TariPendingOutboundTransactions from a TariWallet -struct TariPendingOutboundTransactions *wallet_get_pending_outbound_transactions(struct TariWallet *wallet,int* error_out); +struct TariPendingOutboundTransactions *wallet_get_pending_outbound_transactions(struct TariWallet *wallet, int *error_out); // Get the TariPublicKey from a TariCommsConfig -struct TariPublicKey *wallet_get_public_key(struct TariWallet *wallet,int* error_out); +struct TariPublicKey *wallet_get_public_key(struct TariWallet *wallet, int *error_out); // Get the TariPendingInboundTransactions from a TariWallet -struct TariPendingInboundTransactions *wallet_get_pending_inbound_transactions(struct TariWallet *wallet,int* error_out); +struct TariPendingInboundTransactions *wallet_get_pending_inbound_transactions(struct TariWallet *wallet, int *error_out); // Get all cancelled transactions from a TariWallet -struct TariCompletedTransactions *wallet_get_cancelled_transactions(struct TariWallet *wallet,int* error_out); +struct TariCompletedTransactions *wallet_get_cancelled_transactions(struct TariWallet *wallet, int *error_out); // Get the TariCompletedTransaction from a TariWallet by its TransactionId -struct TariCompletedTransaction *wallet_get_completed_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id,int* error_out); +struct TariCompletedTransaction *wallet_get_completed_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id, int *error_out); // Get the TariPendingOutboundTransaction from a TariWallet by its TransactionId -struct TariPendingOutboundTransaction *wallet_get_pending_outbound_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id,int* error_out); +struct TariPendingOutboundTransaction *wallet_get_pending_outbound_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id, int *error_out); // Get the TariPendingInboundTransaction from a TariWallet by its TransactionId -struct TariPendingInboundTransaction *wallet_get_pending_inbound_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id,int* error_out); +struct TariPendingInboundTransaction *wallet_get_pending_inbound_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id, int *error_out); // Get a Cancelled transaction from a TariWallet by its TransactionId. Pending Inbound or Outbound transaction will be converted to a CompletedTransaction -struct TariCompletedTransaction *wallet_get_cancelled_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id, int* error_out); - -// Simulates completion of a TariPendingOutboundTransaction -bool wallet_test_complete_sent_transaction(struct TariWallet *wallet, struct TariPendingOutboundTransaction *tx,int* error_out); +struct TariCompletedTransaction *wallet_get_cancelled_transaction_by_id(struct TariWallet *wallet, unsigned long long transaction_id, int *error_out); // Import a UTXO into the wallet. This will add a spendable UTXO and create a faux completed transaction to record the // event. -unsigned long long wallet_import_utxo(struct TariWallet *wallet, unsigned long long amount, struct TariPrivateKey *spending_key, struct TariPublicKey *source_public_key, const char *message, int* error_out); +unsigned long long wallet_import_utxo(struct TariWallet *wallet, unsigned long long amount, struct TariPrivateKey *spending_key, struct TariPublicKey *source_public_key, const char *message, int *error_out); // This function will tell the wallet to query the set base node to confirm the status of unspent transaction outputs (UTXOs). -unsigned long long wallet_start_utxo_validation(struct TariWallet *wallet, int* error_out); +unsigned long long wallet_start_utxo_validation(struct TariWallet *wallet, int *error_out); // This function will tell the wallet to query the set base node to confirm the status of spent transaction outputs (STXOs). -unsigned long long wallet_start_stxo_validation(struct TariWallet *wallet, int* error_out); +unsigned long long wallet_start_stxo_validation(struct TariWallet *wallet, int *error_out); // This function will tell the wallet to query the set base node to confirm the status of invalid transaction outputs. -unsigned long long wallet_start_invalid_txo_validation(struct TariWallet *wallet, int* error_out); +unsigned long long wallet_start_invalid_txo_validation(struct TariWallet *wallet, int *error_out); //This function will tell the wallet to query the set base node to confirm the status of mined transactions. -unsigned long long wallet_start_transaction_validation(struct TariWallet *wallet, int* error_out); +unsigned long long wallet_start_transaction_validation(struct TariWallet *wallet, int *error_out); //This function will tell the wallet retart any broadcast protocols for completed transactions. Ideally this should be // called after a successfuly Transaction Validation is complete -bool wallet_restart_transaction_broadcast(struct TariWallet *wallet, int* error_out); +bool wallet_restart_transaction_broadcast(struct TariWallet *wallet, int *error_out); // Set the power mode of the wallet to Low Power mode which will reduce the amount of network operations the wallet performs to conserve power -void wallet_set_low_power_mode(struct TariWallet *wallet, int* error_out); +void wallet_set_low_power_mode(struct TariWallet *wallet, int *error_out); // Set the power mode of the wallet to Normal Power mode which will then use the standard level of network traffic -void wallet_set_normal_power_mode(struct TariWallet *wallet, int* error_out); - -// Simulates the completion of a broadcasted TariPendingInboundTransaction -bool wallet_test_broadcast_transaction(struct TariWallet *wallet, unsigned long long tx, int* error_out); - -// Simulates receiving the finalized version of a TariPendingInboundTransaction -bool wallet_test_finalize_received_transaction(struct TariWallet *wallet, struct TariPendingInboundTransaction *tx, int* error_out); - -// Simulates a TariCompletedTransaction that has been mined -bool wallet_test_mine_transaction(struct TariWallet *wallet, unsigned long long tx, int* error_out); - -// Simulates a TariPendingInboundtransaction being received -bool wallet_test_receive_transaction(struct TariWallet *wallet,int* error_out); +void wallet_set_normal_power_mode(struct TariWallet *wallet, int *error_out); /// Cancel a Pending Outbound Transaction -bool wallet_cancel_pending_transaction(struct TariWallet *wallet, unsigned long long transaction_id, int* error_out); +bool wallet_cancel_pending_transaction(struct TariWallet *wallet, unsigned long long transaction_id, int *error_out); /// Perform a coin split -unsigned long long wallet_coin_split(struct TariWallet *wallet, unsigned long long amount, unsigned long long count, unsigned long long fee, const char* msg, unsigned long long lock_height, int* error_out); +unsigned long long wallet_coin_split(struct TariWallet *wallet, unsigned long long amount, unsigned long long count, unsigned long long fee, const char *msg, unsigned long long lock_height, int *error_out); /// Get the seed words representing the seed private key of the provided TariWallet -struct TariSeedWords *wallet_get_seed_words(struct TariWallet *wallet, int* error_out); +struct TariSeedWords *wallet_get_seed_words(struct TariWallet *wallet, int *error_out); // Apply encryption to the databases used in this wallet using the provided passphrase. If the databases are already // encrypted this function will fail. -void wallet_apply_encryption(struct TariWallet *wallet, const char *passphrase, int* error_out); +void wallet_apply_encryption(struct TariWallet *wallet, const char *passphrase, int *error_out); // Remove encryption to the databases used in this wallet. If this wallet is currently encrypted this encryption will // be removed. If it is not encrypted then this function will still succeed to make the operation idempotent -void wallet_remove_encryption(struct TariWallet *wallet, int* error_out); +void wallet_remove_encryption(struct TariWallet *wallet, int *error_out); /// Set a Key Value in the Wallet storage used for Client Key Value store /// @@ -622,7 +606,7 @@ void wallet_remove_encryption(struct TariWallet *wallet, int* error_out); /// /// # Safety /// None -bool wallet_set_key_value(struct TariWallet *wallet, const char* key, const char* value, int* error_out); +bool wallet_set_key_value(struct TariWallet *wallet, const char *key, const char *value, int *error_out); /// get a stored Value that was previously stored in the Wallet storage used for Client Key Value store /// @@ -638,7 +622,7 @@ bool wallet_set_key_value(struct TariWallet *wallet, const char* key, const char /// /// # Safety /// The ```string_destroy``` method must be called when finished with a string from rust to prevent a memory leak -const char *wallet_get_value(struct TariWallet *wallet, const char* key, int* error_out); +const char *wallet_get_value(struct TariWallet *wallet, const char *key, int *error_out); /// Clears a Value for the provided Key Value in the Wallet storage used for Client Key Value store /// @@ -654,7 +638,7 @@ const char *wallet_get_value(struct TariWallet *wallet, const char* key, int* er /// /// # Safety /// None -bool wallet_clear_value(struct TariWallet *wallet, const char* key, int* error_out); +bool wallet_clear_value(struct TariWallet *wallet, const char *key, int *error_out); /// Check if a Wallet has the data of an In Progress Recovery in its database. /// @@ -669,7 +653,7 @@ bool wallet_clear_value(struct TariWallet *wallet, const char* key, int* error_o /// /// # Safety /// None -bool wallet_is_recovery_in_progress(struct TariWallet *wallet, int* error_out); +bool wallet_is_recovery_in_progress(struct TariWallet *wallet, int *error_out); /// Starts the Wallet recovery process. /// @@ -723,7 +707,7 @@ bool wallet_is_recovery_in_progress(struct TariWallet *wallet, int* error_out); /// /// # Safety /// None -bool wallet_start_recovery(struct TariWallet *wallet, struct TariPublicKey *base_node_public_key, void (*recovery_progress_callback)(unsigned char, unsigned long long, unsigned long long), int* error_out); +bool wallet_start_recovery(struct TariWallet *wallet, struct TariPublicKey *base_node_public_key, void (*recovery_progress_callback)(unsigned char, unsigned long long, unsigned long long), int *error_out); // Frees memory for a TariWallet void wallet_destroy(struct TariWallet *wallet); @@ -731,18 +715,18 @@ void wallet_destroy(struct TariWallet *wallet); // This function will produce a partial backup of the specified wallet database file (full file path must be provided. // This backup will be written to the provided file (full path must include the filename and extension) and will include // the full wallet db but will clear the sensitive Comms Private Key -void file_partial_backup(const char *original_file_path, const char *backup_file_path, int* error_out); +void file_partial_backup(const char *original_file_path, const char *backup_file_path, int *error_out); /// This function will log the provided string at debug level. To be used to have a client log messages to the LibWallet -void log_debug_message(const char* msg); +void log_debug_message(const char *msg); -struct EmojiSet *get_emoji_set(void); +struct EmojiSet *get_emoji_set(); void emoji_set_destroy(struct EmojiSet *emoji_set); -struct ByteVector *emoji_set_get_at(struct EmojiSet *emoji_set, unsigned int position, int* error_out); +struct ByteVector *emoji_set_get_at(struct EmojiSet *emoji_set, unsigned int position, int *error_out); -unsigned int emoji_set_get_length(struct EmojiSet *emoji_set, int* error_out); +unsigned int emoji_set_get_length(struct EmojiSet *emoji_set, int *error_out); #ifdef __cplusplus } diff --git a/buildtools/docker_rig/README.md b/buildtools/docker_rig/README.md index 13937e8679..1e38bb60ed 100644 --- a/buildtools/docker_rig/README.md +++ b/buildtools/docker_rig/README.md @@ -10,7 +10,6 @@ Configure the containers to run Other config options incl tor password "Go!" - ## Layout +-----------------------+ @@ -44,4 +43,28 @@ Other config options incl tor password | | +-----------------------+ +#### Notes + +Building docker images: + +``` +cd buildtools/docker_rig +docker build -t quay.io/tarilabs/tor:latest -f base_node.Dockerfile . +docker build -t quay.io/tarilabs/tari_base_node:latest -f base_node.Dockerfile ../../ +``` + +Base node/Wallet config for using the Tor docker container: + +```toml +tcp_listener_address = "/ip4/0.0.0.0/tcp/18189" +transport = "tor" +tor_control_address = "/dns4/tor/tcp/9051" +tor_control_auth = "password=asdf" # replace with your configured password +tor_onion_port = 18141 +tor_forward_address = "/ip4/0.0.0.0/tcp/18189" +tor_socks_address_override="/dns4/tor/tcp/9050" +``` + +When attaching to a running container: +To detach the tty without exiting the shell/program, use the escape sequence ^P^Q (Ctrl+P followed by Ctrl+Q). diff --git a/buildtools/docker_rig/base_node.Dockerfile b/buildtools/docker_rig/base_node.Dockerfile index 8774f5a717..f0962d0ae6 100644 --- a/buildtools/docker_rig/base_node.Dockerfile +++ b/buildtools/docker_rig/base_node.Dockerfile @@ -1,11 +1,19 @@ -#FROM rust:1.42.0 as builder FROM quay.io/tarilabs/rust_tari-build-with-deps:nightly-2021-05-09 as builder -# Copy the dependency lists -#ADD Cargo.toml ./ -ADD . /tari WORKDIR /tari +# Adding only necessary things up front and copying the entrypoint script last +# to take advantage of layer caching in docker +ADD Cargo.lock . +ADD Cargo.toml . +ADD applications applications +ADD base_layer base_layer +ADD common common +ADD comms comms +ADD infrastructure infrastructure +ADD meta meta +ADD rust-toolchain . + # RUN rustup component add rustfmt --toolchain nightly-2020-08-13-x86_64-unknown-linux-gnu #ARG TBN_ARCH=native ARG TBN_ARCH=x86-64 @@ -13,35 +21,33 @@ ARG TBN_ARCH=x86-64 ARG TBN_FEATURES=safe ENV RUSTFLAGS="-C target_cpu=$TBN_ARCH" ENV ROARING_ARCH=$TBN_ARCH -# Work around for odd issue with broken Cargo.lock and builds -RUN cargo fetch && \ - cargo build --bin tari_base_node --release --features $TBN_FEATURES --locked -# Create a base minimal image for adding our executables to +RUN cargo build --bin tari_base_node --release --features $TBN_FEATURES --locked + +# Create a base minimal image for the executables FROM quay.io/bitnami/minideb:buster as base # Disable Prompt During Packages Installation ARG DEBIAN_FRONTEND=noninteractive RUN apt update && apt -y install \ - apt-transport-https \ - bash \ - ca-certificates \ - curl \ - gpg \ - iputils-ping \ - less \ - libreadline7 \ - libreadline-dev \ - libsqlite3-0 \ - openssl \ - telnet + apt-transport-https \ + bash \ + ca-certificates \ + curl \ + gpg \ + iputils-ping \ + less \ + libreadline7 \ + libreadline-dev \ + libsqlite3-0 \ + openssl \ + telnet # Now create a new image with only the essentials and throw everything else away FROM base ENV APP_NAME=base_node APP_EXEC=tari_base_node COPY --from=builder /tari/target/release/$APP_EXEC /usr/bin/ -COPY --from=builder /tari/buildtools/docker_rig/start_tari_app.sh /usr/bin/start_tari_app.sh - +COPY buildtools/docker_rig/start_base_node.sh /usr/bin/start_tari_app.sh ENTRYPOINT [ "start_tari_app.sh", "-c", "/var/tari/config/config.toml", "-b", "/var/tari/base_node" ] -CMD [ "-d" ] +# CMD [ "--non-interactive-mode" ] diff --git a/buildtools/docker_rig/console_wallet.Dockerfile b/buildtools/docker_rig/console_wallet.Dockerfile index 7d77fe19a6..50f12eb2c4 100644 --- a/buildtools/docker_rig/console_wallet.Dockerfile +++ b/buildtools/docker_rig/console_wallet.Dockerfile @@ -1,22 +1,30 @@ -#FROM rust:1.42.0 as builder FROM quay.io/tarilabs/rust_tari-build-with-deps:nightly-2021-05-09 as builder -# Copy the dependency lists -#ADD Cargo.toml ./ -ADD . /tari WORKDIR /tari +# Adding only necessary things up front and copying the entrypoint script last +# to take advantage of layer caching in docker +ADD Cargo.lock . +ADD Cargo.toml . +ADD applications applications +ADD base_layer base_layer +ADD common common +ADD comms comms +ADD infrastructure infrastructure +ADD meta meta +ADD rust-toolchain . + # RUN rustup component add rustfmt --toolchain nightly-2020-08-13-x86_64-unknown-linux-gnu -#ARG WALLET_ARCH=native -ARG WALLET_ARCH=x86-64 -#ARG WALLET_FEATURES=avx2 -ENV RUSTFLAGS="-C target_cpu=$WALLET_ARCH" -ENV ROARING_ARCH=$WALLET_ARCH -# Work around for odd issue with broken Cargo.lock and builds -RUN cargo fetch && \ - cargo build --bin tari_console_wallet --locked --release - -# Create a base minimal image for adding our executables to +#ARG TBN_ARCH=native +ARG TBN_ARCH=x86-64 +#ARG TBN_FEATURES=avx2 +ARG TBN_FEATURES=safe +ENV RUSTFLAGS="-C target_cpu=$TBN_ARCH" +ENV ROARING_ARCH=$TBN_ARCH + +RUN cargo build --bin tari_console_wallet --release --features $TBN_FEATURES --locked + +# Create a base minimal image for the executables FROM quay.io/bitnami/minideb:buster as base # Disable Prompt During Packages Installation ARG DEBIAN_FRONTEND=noninteractive @@ -31,15 +39,15 @@ RUN apt update && apt -y install \ libreadline7 \ libreadline-dev \ libsqlite3-0 \ - openssl + openssl \ + telnet # Now create a new image with only the essentials and throw everything else away FROM base - ENV APP_NAME=wallet APP_EXEC=tari_console_wallet + COPY --from=builder /tari/target/release/$APP_EXEC /usr/bin/ -COPY --from=builder /tari/buildtools/docker_rig/start_tari_app.sh /usr/bin/start_tari_app.sh +COPY buildtools/docker_rig/start_wallet.sh /usr/bin/start_tari_app.sh -ENV SHELL=/bin/bash -ENV APP_NAME=wallet APP_EXEC=tari_console_wallet -ENTRYPOINT ["start_tari_app.sh", "-c", "/var/tari/config/config.toml", "-b", "/var/tari/wallet"] +ENTRYPOINT [ "start_tari_app.sh", "-c", "/var/tari/config/config.toml", "-b", "/var/tari/wallet" ] +# CMD [ "--non-interactive-mode" ] diff --git a/buildtools/docker_rig/docker-compose.yml b/buildtools/docker_rig/docker-compose.yml index adb9d861b3..ff149cce2c 100644 --- a/buildtools/docker_rig/docker-compose.yml +++ b/buildtools/docker_rig/docker-compose.yml @@ -9,24 +9,30 @@ services: - 9050:9050 - 9051:9051 wallet: - image: quay.io/tarilabs/tari_console_wallet + image: quay.io/tarilabs/tari_console_wallet:latest build: context: ./../.. dockerfile: buildtools/docker_rig/console_wallet.Dockerfile args: ARG WALLET_ARCH: x86-64 ports: - - 18189:18189 + - 18188:18188 environment: TARI_LOG_CONFIGURATION: "/var/tari/config/log4rs.yml" APP_NAME: wallet APP_EXEC: tari_console_wallet CREATE_CONFIG: 1 CREATE_ID: 1 - WAIT_FOR_TOR: 0 + WAIT_FOR_TOR: 60 TARI_NETWORK: weatherwax SHELL: "/bin/bash" TERM: "linux" + PASSWORD: "asdf" + TARI_WALLET__WEATHERWAX__TOR_CONTROL_AUTH: "password=asdf" + TARI_WALLET__WEATHERWAX__TOR_CONTROL_ADDRESS: "/dns4/tor/tcp/9051" + TARI_WALLET__WEATHERWAX__TOR_SOCKS_ADDRESS_OVERRIDE: "/dns4/tor/tcp/9050" + TARI_WALLET__WEATHERWAX__TOR_FORWARD_ADDRESS: "/ip4/0.0.0.0/tcp/18188" + TARI_WALLET__WEATHERWAX__TCP_LISTENER_ADDRESS: "/ip4/0.0.0.0/tcp/18188" command: [] depends_on: - tor @@ -34,8 +40,10 @@ services: - $HOME/.tari/config:/var/tari/config - $HOME/.tari/wallet:/var/tari/wallet - $HOME/.tari/wallet/log:/var/tari/log + stdin_open: true + tty: true base_node: - image: quay.io/tarilabs/tari_base_node + image: quay.io/tarilabs/tari_base_node:latest build: context: ./../.. dockerfile: buildtools/docker_rig/base_node.Dockerfile @@ -47,8 +55,13 @@ services: APP_EXEC: tari_base_node CREATE_CONFIG: 1 CREATE_ID: 1 - WAIT_FOR_TOR: 0 + WAIT_FOR_TOR: 60 TARI_NETWORK: weatherwax + TARI_BASE_NODE__WEATHERWAX__TOR_CONTROL_AUTH: "password=asdf" + TARI_BASE_NODE__WEATHERWAX__TOR_CONTROL_ADDRESS: "/dns4/tor/tcp/9051" + TARI_BASE_NODE__WEATHERWAX__TOR_SOCKS_ADDRESS_OVERRIDE: "/dns4/tor/tcp/9050" + TARI_BASE_NODE__WEATHERWAX__TOR_FORWARD_ADDRESS: "/ip4/0.0.0.0/tcp/18189" + TARI_BASE_NODE__WEATHERWAX__TCP_LISTENER_ADDRESS: "/ip4/0.0.0.0/tcp/18189" ports: - 18189:18189 command: [] @@ -58,6 +71,8 @@ services: - $HOME/.tari/config:/var/tari/config - $HOME/.tari/base_node:/var/tari/base_node - $HOME/.tari/base_node/log:/var/tari/log + stdin_open: true + tty: true # xmrig: # sha3-miner: # pool-worker: @@ -75,4 +90,4 @@ services: # driver_opts: # o: bind # type: none -# device: $HOME/.tari/data \ No newline at end of file +# device: $HOME/.tari/data diff --git a/buildtools/docker_rig/start_base_node.sh b/buildtools/docker_rig/start_base_node.sh index 362358901f..6d6235bd97 100755 --- a/buildtools/docker_rig/start_base_node.sh +++ b/buildtools/docker_rig/start_base_node.sh @@ -1,29 +1,56 @@ #!/bin/bash # -# Docker Start Script for base nodet docker image in a docker-compose context +# Docker Start Script for Tari applications +# The docker compose environment should set the following envars +# - APP_NAME - the name of the app to run. This var is used to set the location of log files, and app-specific config +# - APP_EXEC - the name of the application executable. Just the name is enough, since the Dockerfile will put it in /usr/bin +# - CREATE_CONFIG - set to 1 if we should write a default config file if one is missing. +# - CREATE_ID - set to 1 if we should create an id file for this application if one is missing. It will be called +# {network}_{app_name}_id.json +# - WAIT_FOR_TOR - set to 1 to place a 30 second delay at the beginning of this script. +# - TARI_NETWORK - the Tari network to configure the docker rig for # -if [[ x$WAIT_FOR_TOR == x1 ]]; then - echo "Waiting for tor to start up" - sleep 30 -fi -TARI_BASE=/var/tari/base_node -CONFIG=/var/tari/config +APP_NAME=${APP_NAME:-base_node} +APP_EXEC=${APP_EXEC:-tari_base_node} +CREATE_CONFIG=${CREATE_CONFIG:-0} +CREATE_ID=${CREATE_ID:-0} +WAIT_FOR_TOR=${WAIT_FOR_TOR:-0} NETWORK=${TARI_NETWORK:-weatherwax} +TARI_BASE=/var/tari/$APP_NAME +CONFIG=/var/tari/config + +echo "Starting $APP_NAME with following docker environment:" +echo "executable: $APP_EXEC" +echo "network: $NETWORK" +echo "CREATE_CONFIG: $CREATE_CONFIG" +echo "CREATE_ID: $CREATE_ID" +echo "WAIT_FOR_TOR: $WAIT_FOR_TOR" +echo "base folder (in container): $TARI_BASE" +echo "config folder (in container): $CONFIG" -cd $TARI_BASE +if [[ $WAIT_FOR_TOR != 0 ]]; then + echo "Waiting $WAIT_FOR_TOR seconds for Tor to start up" + sleep "$WAIT_FOR_TOR" +fi + +cd "$TARI_BASE" || exit 1 + +ARGS=() +if [[ $CREATE_CONFIG == 1 && ! -f $CONFIG/config.toml ]]; then + echo "Creating config file." + ARGS+=("--init") +fi -if [[ ! -f $CONFIG/config.toml ]]; then - echo "I could not find a global Tari configuration file. I can create a default one for you, or you can set this up" - echo "yourself and place it in the global config path (usually ~/.tari/config/config.toml, but YMMV)" - # TODO what it says on the box - exit 1 +ID_FILENAME=${NETWORK}_${APP_NAME}_id.json +if [[ $CREATE_ID && ! -f $ID_FILENAME ]]; then + echo "Creating network identity file ($ID_FILENAME)." + ARGS+=("--create-id") fi -if [[ ! -f ${NETWORK}_base_node_id.json ]]; then - echo "I could not find a network identity file for this node (${NETWORK}_base_node_id.json)." - echo "So I'll create one for you real quick." - tari_base_node -c $CONFIG/config.toml -b $TARI_BASE --create_id +if [ -n "${ARGS[0]}" ]; then + echo "Initializing." + $APP_EXEC -b "$TARI_BASE" -c "$CONFIG/config.toml" "${ARGS[@]}" || exit 1 fi -tari_base_node "$@" +$APP_EXEC "$@" diff --git a/buildtools/docker_rig/start_tari_app.sh b/buildtools/docker_rig/start_tari_app.sh deleted file mode 100755 index a3a22b0afc..0000000000 --- a/buildtools/docker_rig/start_tari_app.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -# -# Docker Start Script for Tari applications -# The docker compose environment should set the following envars -# - APP_NAME - the name of the app to run. This var is used to set the location of log files, and app-soecific config -# - APP_EXEC - the name of the application executable. Just the name is enough, since the Dockerfile will put it in /usr/bin -# - CREATE_CONFIG - set to 1 if we should write a default config file if one is missing. -# - CREATE_ID - set to 1 if we should create an id file for this application if one is missing. It will be called -# {network}_{app_name}_id.json -# - WAIT_FOR_TOR - set to 1 to place a 30 second delay at the beginning of this script. -# - TARI_NETWORK - the Tari network to configure the docker rig for -# - -APP_NAME=${APP_NAME:-base_node} -APP_EXEC=${APP_EXEC:-tari_base_node} -CREATE_CONFIG=${CREATE_CONFIG:-0} -CREATE_ID=${CREATE_ID:-0} -WAIT_FOR_TOR=${WAIT_FOR_TOR:-0} -NETWORK=${TARI_NETWORK:-weatherwax} -TARI_BASE=/var/tari/$APP_NAME -CONFIG=/var/tari/config - -echo "Starting $APP_NAME with following docker environment:" -echo "executable: $APP_EXEC" -echo "network: $NETWORK" -echo "CREATE_CONFIG: $CREATE_CONFIG" -echo "CREATE_ID: $CREATE_ID" -echo "WAIT_FOR_TOR: $WAIT_FOR_TOR" -echo "base folder (in container): $TARI_BASE" -echo "config folder (in container): $CONFIG" - -if [[ $WAIT_FOR_TOR == 1 ]]; then - echo "Waiting for tor to start up" - sleep 30 -fi - -cd $TARI_BASE - -if [[ $CREATE_CONFIG == 1 && ! -f $CONFIG/config.toml ]]; then - echo "I could not find a global Tari configuration file. I can create a default one for you, or you can set this up" - echo "yourself and place it in the global config path (usually ~/.tari/config/config.toml, but YMMV)" - # TODO what it says on the box - exit 1 -fi - -ID_FILENAME=${NETWORK}_${APP_NAME}_id.json - -if [[ $CREATE_ID && ! -f $ID_FILENAME ]]; then - - echo "I could not find a network identity file for this node ($ID_FILENAME)." - echo "So I'll create one for you real quick." - $APP_EXEC -c $CONFIG/config.toml -b $TARI_BASE --create_id -fi - -$APP_EXEC "$@" diff --git a/buildtools/docker_rig/start_wallet.sh b/buildtools/docker_rig/start_wallet.sh index fbf8ee9ecb..8cebe3949b 100755 --- a/buildtools/docker_rig/start_wallet.sh +++ b/buildtools/docker_rig/start_wallet.sh @@ -1,30 +1,46 @@ #!/bin/bash # -# Docker Start Script for console_wallet docker image in a docker-compose context +# Docker Start Script for Tari applications +# The docker compose environment should set the following envars +# - APP_NAME - the name of the app to run. This var is used to set the location of log files, and app-specific config +# - APP_EXEC - the name of the application executable. Just the name is enough, since the Dockerfile will put it in /usr/bin +# - CREATE_CONFIG - set to 1 if we should write a default config file if one is missing. +# - CREATE_ID - set to 1 if we should create an id file for this application if one is missing. It will be called +# {network}_{app_name}_id.json +# - WAIT_FOR_TOR - set to 1 to place a 30 second delay at the beginning of this script. +# - TARI_NETWORK - the Tari network to configure the docker rig for # -if [[ x$WAIT_FOR_TOR == x1 ]]; then - echo "Waiting for tor to start up" - sleep 30 -fi -TARI_BASE=/var/tari/wallet -TARI_CONFIG=/var/tari/config +APP_NAME=${APP_NAME:-wallet} +APP_EXEC=${APP_EXEC:-tari_console_wallet} +CREATE_CONFIG=${CREATE_CONFIG:-0} +CREATE_ID=${CREATE_ID:-0} +WAIT_FOR_TOR=${WAIT_FOR_TOR:-0} NETWORK=${TARI_NETWORK:-weatherwax} +TARI_BASE=/var/tari/$APP_NAME +CONFIG=/var/tari/config -cd $TARI_BASE +echo "Starting $APP_NAME with following docker environment:" +echo "executable: $APP_EXEC" +echo "network: $NETWORK" +echo "CREATE_CONFIG: $CREATE_CONFIG" +echo "CREATE_ID: $CREATE_ID" +echo "WAIT_FOR_TOR: $WAIT_FOR_TOR" +echo "base folder (in container): $TARI_BASE" +echo "config folder (in container): $CONFIG" +echo "wallet password: $PASSWORD" # delete this -if [[ ! -f $CONFIG/config.toml ]]; then - echo "I could not find a global Tari configuration file. I can create a default one for you, or you can set this up" - echo "yourself and place it in the global config path (usually ~/.tari/config/config.toml, but YMMV)" - # TODO what it says on the box - exit 1 +if [[ $WAIT_FOR_TOR != 0 ]]; then + echo "Waiting $WAIT_FOR_TOR seconds for Tor to start up" + sleep "$WAIT_FOR_TOR" fi -if [[ ! -f ${NETWORK}_console_wallet_id.json ]]; then - echo "I could not find a network identity file for this node (${NETWORK}_console_wallet_id.json)." - echo "So I'll create one for you real quick." - tari_base_node -c $CONFIG/config.toml -b $TARI_BASE --create_id -fi +cd "$TARI_BASE" || exit 1 +if [[ $CREATE_CONFIG == 1 && ! -f $CONFIG/config.toml ]]; then + $APP_EXEC --init --password "$PASSWORD" "$@" +else + $APP_EXEC --password "$PASSWORD" "$@" +fi -tari_console_wallet "$@" +# $APP_EXEC "$INIT" --password "$PASSWORD" "$@" diff --git a/buildtools/docker_rig/torrc b/buildtools/docker_rig/torrc index 3061284987..74392ecd7e 100644 --- a/buildtools/docker_rig/torrc +++ b/buildtools/docker_rig/torrc @@ -4,4 +4,4 @@ ControlPort 0.0.0.0:9051 CookieAuthentication 0 ClientOnly 1 ClientUseIPv6 1 -HashedControlPassword 16:FFE2A07AEAF0AFFF606CA27EAD66655F30AE15992A05BBCBA1DBA2B0D7 +HashedControlPassword 16:8318D1DA9334F90C603A7A7A7CD330E98B7A73AD516E810A48C6F50C6A diff --git a/buildtools/get_openssl_win.ps1 b/buildtools/get_openssl_win.ps1 deleted file mode 100644 index 3e2fb61144..0000000000 --- a/buildtools/get_openssl_win.ps1 +++ /dev/null @@ -1,10 +0,0 @@ -echo "" -echo "" -echo "" -echo "" -echo "" -echo "OpenSSL download page: '$env:openssl_downloads'" -$url = $env:openssl_repo + ((Invoke-WebRequest "$env:openssl_downloads" -UseBasicParsing).Links.href | ` - Select-String -Pattern 'Win64' | Select-String -Pattern 'Light-1_1' | Select-String -Pattern 'exe') -echo "OpenSSL install file: '$url'" -Invoke-WebRequest "$url" -outfile "$env:TEMP\$env:openssl_install_file" diff --git a/buildtools/install_openssl.bat b/buildtools/install_openssl.bat deleted file mode 100644 index d035c3bd9c..0000000000 --- a/buildtools/install_openssl.bat +++ /dev/null @@ -1,33 +0,0 @@ -echo off - -rem Control variables -rem - OpenSSL v.1.1.1 latest {Note: `powershell` cannot `expand-archive` to `C:\Program Files (x86)`} -rem - Download `OpenSSL` at `https://slproweb.com/products/Win32OpenSSL.html` - -set openssl_install_file=openssl-win64.exe -set openssl_repo=https://slproweb.com -set openssl_downloads=%openssl_repo%/products/Win32OpenSSL.html - -echo Downloading and installing OpenSSL... -echo. - -rem Install dependencies -call :INSTALL_OPEN_SSL -goto END: - -:INSTALL_OPEN_SSL -rem Download install file -del /f "%TEMP%\%openssl_install_file%" 2>null -powershell "Set-ExecutionPolicy -ExecutionPolicy Bypass -Scope Process; .\get_openssl_win.ps1" -rem Install -"%TEMP%\%openssl_install_file%" -goto :eof - -:END -echo. -if not [%1]==[NO_PAUSE] ( - pause -) else ( - ping -n 5 localhost>nul -) -if [%errorlevel%]==[10101] exit diff --git a/buildtools/windows_inno_installer.iss b/buildtools/windows_inno_installer.iss index 3b0663126b..8121f7bd5d 100644 --- a/buildtools/windows_inno_installer.iss +++ b/buildtools/windows_inno_installer.iss @@ -118,8 +118,6 @@ Source: "xmr_logo.ico"; DestDir: "{userdocs}\..\temp\tari_icons"; Flags: ignorev Source: "install_sqlite.bat"; DestDir: "{app}\runtime"; Flags: ignoreversion Source: "install_tor_services.bat"; DestDir: "{app}\runtime"; Flags: ignoreversion Source: "install_vs2019_redist.bat"; DestDir: "{app}\runtime"; Flags: ignoreversion -Source: "install_openssl.bat"; DestDir: "{app}\runtime"; Flags: ignoreversion -Source: "get_openssl_win.ps1"; DestDir: "{app}\runtime"; Flags: ignoreversion Source: "install_xmrig.bat"; DestDir: "{app}\runtime"; Flags: ignoreversion Source: "get_xmrig_win.ps1"; DestDir: "{app}\runtime"; Flags: ignoreversion Source: "..\common\xmrig_config\config_example_stagenet.json"; DestDir: "{app}\config"; DestName: "xmrig_config_example_stagenet.json"; Flags: ignoreversion @@ -153,7 +151,6 @@ Name: "{userappdata}\Microsoft\Internet Explorer\Quick Launch\{#BaseNodeName}"; [Run] Filename: "{app}\runtime\install_sqlite.bat"; Parameters: "NO_PAUSE"; Flags: runascurrentuser postinstall; Description: "Install SQLite" Filename: "{app}\runtime\install_tor_services.bat"; Parameters: "NO_PAUSE"; Flags: runascurrentuser postinstall; Description: "Install Tor Services" -Filename: "{app}\runtime\install_openssl.bat"; Parameters: "NO_PAUSE"; Flags: runascurrentuser postinstall; Description: "Install OpenSSL" Filename: "{app}\runtime\install_xmrig.bat"; Parameters: "NO_PAUSE"; Flags: runascurrentuser postinstall; Description: "Install XMRig" Filename: "{app}\runtime\install_vs2019_redist.bat"; Parameters: "NO_PAUSE"; Flags: runascurrentuser postinstall; Description: "Install Redistributable for Visual Studio 2019" diff --git a/changelog.md b/changelog.md index 1bb7d0b354..6f0c643bd0 100644 --- a/changelog.md +++ b/changelog.md @@ -1,3 +1,104 @@ + +## 0.9.5 (2021-08-23) + + +#### Bug Fixes + +* show warnings on console (#3225) ([3291021c](https://github.com/tari-project/tari/commit/3291021c6e63778d4fa14ca6cb10c51681d8a5f5)) +* edge-case fixes for wallet peer switching in console wallet (#3226) ([f577df8e](https://github.com/tari-project/tari/commit/f577df8e9b34c6a823cc555b0fecfa2153ddd7e0)) +* chain error caused by zero-conf transactions and reorgs (#3223) ([f0404273](https://github.com/tari-project/tari/commit/f04042732a78bf3dc98d1aee7bf5b032e398010c)) +* bug in wallet base node peer switching (#3217) ([878c317b](https://github.com/tari-project/tari/commit/878c317be9226da342cef439af2bc0024d1eb77f)) +* division by zero ([8a988e1c](https://github.com/tari-project/tari/commit/8a988e1cd5bd4c49660819494949305963d08173)) +* improve p2p RPC robustness (#3208) ([211dcfdb](https://github.com/tari-project/tari/commit/211dcfdb70eb774f9f2c3cdd080d6db7a24cb46c)) +* **wallet:** add NodeId to console wallet Who Am I tab (#3213) ([706ff5e5](https://github.com/tari-project/tari/commit/706ff5e59185f8088add19ac8654f29cc4ab1145)) +* **wallet_ffi:** fix division by zero during recovery (#3214) ([abd3d849](https://github.com/tari-project/tari/commit/abd3d84965651285c72ecbcca1c401f3e54ad28c)) + +#### Features + +* add `ping()` to all comms RPC clients (#3227) ([b5b62238](https://github.com/tari-project/tari/commit/b5b62238cf7512abb38803c426369ebbcc8fe540)) + +#### Breaking Changes + +* base nodes should delete their database and resync + + + +## 0.9.4 (2021-08-17) + + +#### Features + +* add sync rpc client pool to wallet connectivity (#3199) ([305aeda1](https://github.com/tari-project/tari/commit/305aeda139cfc93d35f67926e1d52fae010961c4)) +* **wallet:** add network selection to wallet_ffi (#3178) ([f0f40b20](https://github.com/tari-project/tari/commit/f0f40b20bc2f60fecc26dd9b83bd5820f9212eab)) + +#### Bug Fixes + +* fix console wallet buffer size bug (#3200) ([b94667fd](https://github.com/tari-project/tari/commit/b94667fddda4299d1ee176b3120a991a5b6903db)) +* ensure peers are added to peer list before recovery starts (#3186) ([5f33414a](https://github.com/tari-project/tari/commit/5f33414a5d39be046f471d5b279da66ecf1e747c)) +* enforce unique commitments in utxo set (#3173) ([23a7d64c](https://github.com/tari-project/tari/commit/23a7d64c550d7689db451c1dcf9e22d723f19f75)) +* cleanup stratum config terminal output in tari_mining_node (#3181) ([6c38f226](https://github.com/tari-project/tari/commit/6c38f2266641f77b39eb1406ca7e26a21ff38151)) +* **wallet:** handle receiver cancelling an inbound transaction that is later received (#3177) ([c79e53cf](https://github.com/tari-project/tari/commit/c79e53cfc20ea404f0d1b160f2686f77d1c52698)) + + + +## 0.9.3 (2021-08-12) + + +#### Bug Fixes + +* set robust limits for busy a blockchain (#3150) ([c993780a](https://github.com/tari-project/tari/commit/c993780ad0237feba78857b6e67cfbe6e9f78b1d)) +* update handling of SAF message propagation and deletion (#3164) ([cedb4efc](https://github.com/tari-project/tari/commit/cedb4efcc1b9ef3b01e1425437f84dd62065ac90)) +* improve prune mode to remove panics (#3163) ([05f78132](https://github.com/tari-project/tari/commit/05f7813296797e2583dbb38742084bef91ebbdd4)) +* better method for getting an open port in cucumber tests ([2d9f3a60](https://github.com/tari-project/tari/commit/2d9f3a60342b6af251405ca471ed76e8f25f5b84)) +* fix utxo scan edge case when pc awakes from sleep (#3160) ([5bdc9f39](https://github.com/tari-project/tari/commit/5bdc9f398c9036542a6f9ea385587af237ea96e3)) +* ban peer when merkle roots mismatch ([39ddd337](https://github.com/tari-project/tari/commit/39ddd337cc870932328250417755f2fa6a8201c5)) +* fix search_kernel command (#3157) ([dc99898e](https://github.com/tari-project/tari/commit/dc99898e1faf87c5fa7a26313cdec1623b53d947)) +* introduce cache update cool down to console wallet (#3146) ([5de92526](https://github.com/tari-project/tari/commit/5de92526d3266ff3476088fe91a2779451bd6c39)) +* add timeout to protocol notifications + log improvements (#3143) ([77018464](https://github.com/tari-project/tari/commit/77018464f4304428f8d1b4f0f886825de66af28e)) +* fix GRPC GetTransactionInfo not found response (#3145) ([0e0bfe0f](https://github.com/tari-project/tari/commit/0e0bfe0f31b05d44540a3bfa90e28bfc07ec86a7)) +* fix cucumber transaction builder reliability (#3147) ([d4a7fdd3](https://github.com/tari-project/tari/commit/d4a7fdd3ed4b61b068f9541b24f5fb9ad5bf40b5)) +* **wallet:** + * fix resize panic (#3149) ([33af0847](https://github.com/tari-project/tari/commit/33af084720d752c5111fbef23ff854eaabe1a7d0)) + * in wallet block certain keys during popup (#3148) ([84542922](https://github.com/tari-project/tari/commit/84542922f98d46985047d590c237bb63bf35c03b)) + * correctly deal with new coinbase transactions for the same height (#3151) ([564ef5a2](https://github.com/tari-project/tari/commit/564ef5a26a3056ef855f7f132582beaf2ef0e15a)) + +#### Features + +* wallet connectivity service (#3159) ([54e8c8e4](https://github.com/tari-project/tari/commit/54e8c8e4020bbd38fd8e563465a4ce5d95408d7a)) +* add a shared p2p rpc client session pool to reduce rpc setup time (#3152) ([778f9512](https://github.com/tari-project/tari/commit/778f951282082e7774f649b043a4e9085fb05bdd)) +* miningcore transcoder (#3003) ([ee9a225c](https://github.com/tari-project/tari/commit/ee9a225c389b43267db34f97aff537b244533844)) +* **mining_node:** mining worker name for tari_mining_node (#3185) ([48a62f98](https://github.com/tari-project/tari/commit/48a62f98db687183759551b8bcd6239021e3c0c3)) + + + + +## 0.9.2 (2021-07-29) + + +#### Bug Fixes + +* update LibWallet `wallet_import_utxo` method to include valid TariScript (#3139) ([cc6de2ab](https://github.com/tari-project/tari/commit/cc6de2ab7fde419b6bf5358aeed25ea343d0539e)) +* update LibWallet recovery task event handling (#3142) ([0861d726](https://github.com/tari-project/tari/commit/0861d726a1ec8811e8042018116e5a606326f306)) +* improve reliability of get block template protocol in mm proxy (#3141) ([6afde62f](https://github.com/tari-project/tari/commit/6afde62f94be350d58b45945017fef5bc6e16338)) +* replace usage of RangeProof MR with Witness MR (#3129) ([bbfc6878](https://github.com/tari-project/tari/commit/bbfc68783082e59de71ee4fa099f851a6d2f645f)) +* fix prune mode sync (#3138) ([d0d1d614](https://github.com/tari-project/tari/commit/d0d1d614798999e511b48a15aeca0a371612df1d)) +* update transaction and block validator to use full deleted map (#3137) ([4f1509e6](https://github.com/tari-project/tari/commit/4f1509e61b98152369b1eb4e722352119e21dce2)) +* bug that causes non p2p apps to panic on startup (#3131) ([389dd748](https://github.com/tari-project/tari/commit/389dd748371282a6965d7d3dd052f4dbb8962b73)) +* console wallet now recognises wallet.network comms settings (#3121) ([162e98bf](https://github.com/tari-project/tari/commit/162e98bfe21b229f2384404a93853e3eb9823f5b)) + +#### Features + +* add persistent dedup cache for message hashes (#3130) ([08f2675d](https://github.com/tari-project/tari/commit/08f2675d21ff1e7fc8ad98060b897d4c9254e96e)) +* **comms:** + * tcp-only p2p protocol listener (#3127) ([6fefd18a](https://github.com/tari-project/tari/commit/6fefd18a57c6c8efa13412291a132c7242e7b1ea)) +* **wallet:** add extra feedback to recovery monitoring callback in Wallet FFI (#3128) ([02836b09](https://github.com/tari-project/tari/commit/02836b099ebcf4261199dcf418cffb2c66bfff5d)) + +#### Breaking Changes + +* console wallet now recognises wallet.network comms settings (#3121) ([162e98bf](https://github.com/tari-project/tari/commit/162e98bfe21b229f2384404a93853e3eb9823f5b)) + + + ## 0.9.1 (2021-07-21) diff --git a/clients/wallet_grpc_client/index.js b/clients/wallet_grpc_client/index.js index ae42f5284b..d638ba1f5c 100644 --- a/clients/wallet_grpc_client/index.js +++ b/clients/wallet_grpc_client/index.js @@ -39,8 +39,13 @@ function Client(address) { "importUtxos", "listConnectedPeers", "getNetworkStatus", + "cancelTransaction", ]; + this.waitForReady = (...args) => { + this.inner.waitForReady(...args); + }; + functions.forEach((method) => { this[method] = (arg) => this.inner[method]().sendMessage(arg); }); diff --git a/clients/wallet_grpc_client/package-lock.json b/clients/wallet_grpc_client/package-lock.json index 18b6e5cc81..768729da2a 100644 --- a/clients/wallet_grpc_client/package-lock.json +++ b/clients/wallet_grpc_client/package-lock.json @@ -8,15 +8,15 @@ "name": "@tari/wallet-grpc-client", "version": "0.0.1", "dependencies": { - "@grpc/grpc-js": "^1.2.3", + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.5.5", "grpc-promise": "^1.4.0" } }, "node_modules/@grpc/grpc-js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.5.tgz", - "integrity": "sha512-V29L2QNKkLWM3bcJfVFMSo+Z7kkO8A1s7MAfdzBXLYEC1PE5/M0n1iXBDiD5aUtyVLh5GILcbme2bGtIHl0FMQ==", + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.6.tgz", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", "dependencies": { "@types/node": ">=12.12.47" }, @@ -143,9 +143,9 @@ }, "dependencies": { "@grpc/grpc-js": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.5.tgz", - "integrity": "sha512-V29L2QNKkLWM3bcJfVFMSo+Z7kkO8A1s7MAfdzBXLYEC1PE5/M0n1iXBDiD5aUtyVLh5GILcbme2bGtIHl0FMQ==", + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.6.tgz", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", "requires": { "@types/node": ">=12.12.47" } diff --git a/clients/wallet_grpc_client/package.json b/clients/wallet_grpc_client/package.json index 524e7ab85d..25ed48203e 100644 --- a/clients/wallet_grpc_client/package.json +++ b/clients/wallet_grpc_client/package.json @@ -8,7 +8,7 @@ }, "author": "Tari development community", "dependencies": { - "@grpc/grpc-js": "^1.2.3", + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.5.5", "grpc-promise": "^1.4.0" } diff --git a/common/Cargo.toml b/common/Cargo.toml index 590babe6c0..aa9d646654 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [features] @@ -21,11 +21,18 @@ serde_json = "1.0.51" dirs-next = "1.0.2" get_if_addrs = "0.5.3" log = "0.4.8" -log4rs = "0.8.3" +log4rs = { version = "1.0.0", default_features= false, features = ["config_parsing", "threshold_filter"]} multiaddr={package="parity-multiaddr", version = "0.11.0"} sha2 = "0.9.5" path-clean = "0.1.0" tari_storage = { version = "^0.9", path = "../infrastructure/storage"} +tracing = "0.1.26" +tracing-opentelemetry = "0.15.0" +tracing-subscriber = "0.2.20" + +# network tracing, rt-tokio for async batch export +opentelemetry = { version = "0.16", default-features = false, features = ["trace","rt-tokio"] } +opentelemetry-jaeger = { version="0.15", features=["rt-tokio"]} anyhow = { version = "1.0", optional = true } git2 = { version = "0.8", optional = true } diff --git a/common/config/presets/tari_config_example.toml b/common/config/presets/tari_config_example.toml index f16ce8ffcb..5daa90a2c9 100644 --- a/common/config/presets/tari_config_example.toml +++ b/common/config/presets/tari_config_example.toml @@ -43,16 +43,16 @@ network = "weatherwax" #liveness_allowlist_cidrs = ["127.0.0.1/32"] # The buffer size constants for the publish/subscribe connector channel, connecting comms messages to the domain layer: -# - Buffer size for the base node (min value = 30, default value = 100). -#buffer_size_base_node = 100 -# - Buffer size for the base node wallet (min value = 300, default value = 1000). -#buffer_size_base_node_wallet = 1000 +# - Buffer size for the base node (min value = 30, default value = 1500). +#buffer_size_base_node = 1500 +# - Buffer size for the console wallet (min value = 300, default value = 50000). +#buffer_size_console_wallet = 50000 # The rate limit constants for the publish/subscribe connector channel, i.e. maximum amount of inbound messages to # accept - any rate attemting to exceed this limit will be throttled. -# - Rate limit for the base node (min value = 5, default value = 20). -#buffer_rate_limit_base_node = 20 -# - Rate limit for the base node wallet (min value = 5, default value = 20). -#buffer_rate_limit_base_node_wallet = 20 +# - Rate limit for the base node (min value = 5, default value = 1000). +#buffer_rate_limit_base_node = 1000 +# - Rate limit for the console wallet (min value = 5, default value = 1000). +buffer_rate_limit_console_wallet = 1000 # The message deduplication persistent cache size - messages with these hashes in the cache will only be processed once. # The cache will also be trimmed down to size periodically (min value = 0, default value = 2500). dedup_cache_capacity = 25000 @@ -66,8 +66,9 @@ dedup_cache_capacity = 25000 # The timeout (s) for requesting other base node services (min value = 10 s, default value = 180 s). #service_request_timeout = 180 -# The maximum simultaneous comms RPC sessions allowed. Setting this to -1 will allow unlimited sessions. -# rpc_max_simultaneous_sessions = 1000 +# The maximum simultaneous comms RPC sessions allowed (default value = 1000). Setting this to -1 will allow unlimited +# sessions. +rpc_max_simultaneous_sessions = 10000 # Auto Update # @@ -116,20 +117,33 @@ console_wallet_db_file = "wallet/console-wallet.dat" # This is the timeout period that will be used to monitor TXO queries to the base node (default = 60). Larger values # are needed for wallets with many (>1000) TXOs to be validated. -base_node_query_timeout = 120 +base_node_query_timeout = 180 # The amount of seconds added to the current time (Utc) which will then be used to check if the message has # expired or not when processing the message (default = 10800). #saf_expiry_duration = 10800 -# This is the number of block confirmations required for a transaction to be considered completely mined and confirmed. (default = 3) +# This is the number of block confirmations required for a transaction to be considered completely mined and +# confirmed. (default = 3) #transaction_num_confirmations_required = 3 # This is the timeout period that will be used for base node broadcast monitoring tasks (default = 60) -#transaction_broadcast_monitoring_timeout = 60 +transaction_broadcast_monitoring_timeout = 180 # This is the timeout period that will be used for chain monitoring tasks (default = 60) #transaction_chain_monitoring_timeout = 60 # This is the timeout period that will be used for sending transactions directly (default = 20) -#transaction_direct_send_timeout = 20 +transaction_direct_send_timeout = 180 # This is the timeout period that will be used for sending transactions via broadcast mode (default = 60) -#transaction_broadcast_send_timeout = 60 +transaction_broadcast_send_timeout = 180 +# This is the size of the event channel used to communicate transaction status events to the wallet's UI. A busy console +# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>10000) (default = 1000). +transaction_event_channel_size = 25000 +# This is the size of the event channel used to communicate base node events to the wallet. A busy console +# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>3000) (default = 250). +base_node_event_channel_size = 3500 +# This is the size of the event channel used to communicate output manager events to the wallet. A busy console +# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>3000) (default = 250). +output_manager_event_channel_size = 3500 +# This is the size of the event channel used to communicate base node update events to the wallet. A busy console +# wallet doing thousands of bulk payments or used for stress testing needs a fairly big size (>300) (default = 50). +base_node_update_publisher_channel_size = 500 # If a large amount of tiny valued uT UTXOs are used as inputs to a transaction, the fee may be larger than # the transaction amount. Set this value to `false` to allow spending of "dust" UTXOs for small valued # transactions (default = true). @@ -140,7 +154,7 @@ base_node_query_timeout = 120 #transaction_routing_mechanism = "DirectAndStoreAndForward" # UTXO scanning service interval (default = 12 hours, i.e. 60 * 60 * 12 seconds) -scan_for_utxo_interval = 60 +scan_for_utxo_interval = 180 # When running the console wallet in command mode, use these values to determine what "stage" and timeout to wait # for sent transactions. @@ -151,7 +165,7 @@ scan_for_utxo_interval = 60 # - "MinedUnconfirmed" - The transaction was successfully detected as mined but unconfirmed on the blockchain. # - "Mined" - The transaction was successfully detected as mined and confirmed on the blockchain. -# The default values are: +# The default values are: "Broadcast", 300 #command_send_wait_stage = "Broadcast" #command_send_wait_timeout = 300 @@ -161,9 +175,9 @@ scan_for_utxo_interval = 60 # Configuration for the wallet's base node service # The refresh interval, defaults to 10 seconds -# base_node_service_refresh_interval = 10 +base_node_service_refresh_interval = 30 # The maximum age of service requests in seconds, requests older than this are discarded -# base_node_service_request_max_age = 60 +base_node_service_request_max_age = 180 #[base_node.transport.tor] #control_address = "/ip4/127.0.0.1/tcp/9051" @@ -253,6 +267,10 @@ db_type = "lmdb" # is "0", which indicates an archival node without any pruning. #pruning_horizon = 0 +# The amount of messages that will be permitted in the flood ban timespan of 100s (Default weatherwax = 1000, +# default mainnet = 10000) +flood_ban_max_msg_count = 10000 + # The relative path to store persistent data data_dir = "weatherwax" @@ -263,6 +281,7 @@ data_dir = "weatherwax" # new nodes can use to introduce themselves to the network. # peer_seeds = ["public_key1::address1", "public_key2::address2",... ] peer_seeds = [ + "98bc76afc1c35ad4651bdc9ef57bbe0655a2ea3cd86c0e19b5fd5890546eb040::/onion3/33izgtjkrlxhxybj6luqowkpiy2wvte43osejnbqyieqtdfhovzghxad:18141", #jozi "9a26e910288213d649b26f9a7a7ee51fe2b2a67ff7d42334523463bf4be94312::/onion3/56kq54ylttnbl5ikotqex3oqvtzlxdpn7zlx4v56rvzf4kq7eezlclid:18141", #london "6afd5b3c7772ad7d4bb26e0c19668fe04f2d68f99de9e132bee50a6c1846946d::/onion3/may4ajbmcn4dlnzf6fanvqlklxzqiw6qwu6ywqwkjc3bb354rc2i5wid:18141", #ncal "8e7beec9becdc44fe6015a00d97a77fa3dbafe65127dcc988df6326bd9fd040d::/onion3/3pise36l4imoopsbjic5rtw67adx7rms6w5pgjmccpdwiqx66j7oqcqd:18141", #nvir @@ -271,12 +290,12 @@ peer_seeds = [ "f2ce179fb733725961a5f7e1e45dacdd443dd43ba6237438d6abe344fb717058::/onion3/nvgdmjf4wucgatz7vemzvi2u4sw5o4gyzwuikagpepoj4w7mkii47zid:18141", #stockholm "909c0160f4d8e815aba5c2bbccfcceb448877e7b38759fb160f3e9494484d515::/onion3/qw5uxv533sqdn2qoncfyqo35dgecy4rt4x27rexi2her6q6pcpxbm4qd:18141", #sydney # backups - "0c3fe3c23866ed3827e1cd72aae0c9d364d860d597993104e90d9a9401e52f05::/onion3/2m2xnylrsqbaozsndkbmfisxxbwh2vgvs6oyfak2qah4snnxykrf7zad:18141", - "b6b2c8a997ba3500d44b64a3b47203d922d614c01ebd9cad5563dc59086f9938::/onion3/kxtcwaoan6wvdl4wltguo4omo2san2ge727cfoc65hrzuwixprvop5id:18141", - "4471cec2b77b1608ff26b8695281e0b4ab35bad7f882eeaab043398644094a6c::/onion3/fqmbme5qvagh2tv722nrnwwcd2ooa4aqr43mxgiozo2yq33w74bop5qd:18141", - "e427d6007cb46e8c311fc75d2daaa95898a6162ce6a29cb4ab257c2ef70bfd62::/onion3/glicssekriwwwvfm2o5sk2mari4czbd7eklv4lsdn4wmy4ljss2t7zqd:18141", - "021baaee838a6c7a21ae3ed74f1cbb27e46351b78f0e00810c1a344a9042754d::/onion3/p2z2ptouqgi3agmw62uhz2tambqfzfdtetrfw443of3jnkkiw44e2zqd:18141", - "002a5b9d129401ca250d120b126caf067f8eb7a616252e630b6974da48def71a::/onion3/cldbbow3yzgxkvz4g2apoclrkbvg3mjxdvonoaeej3tarhlzkluodeyd:18141", + # "0c3fe3c23866ed3827e1cd72aae0c9d364d860d597993104e90d9a9401e52f05::/onion3/2m2xnylrsqbaozsndkbmfisxxbwh2vgvs6oyfak2qah4snnxykrf7zad:18141", + # "b6b2c8a997ba3500d44b64a3b47203d922d614c01ebd9cad5563dc59086f9938::/onion3/kxtcwaoan6wvdl4wltguo4omo2san2ge727cfoc65hrzuwixprvop5id:18141", + # "4471cec2b77b1608ff26b8695281e0b4ab35bad7f882eeaab043398644094a6c::/onion3/fqmbme5qvagh2tv722nrnwwcd2ooa4aqr43mxgiozo2yq33w74bop5qd:18141", + # "e427d6007cb46e8c311fc75d2daaa95898a6162ce6a29cb4ab257c2ef70bfd62::/onion3/glicssekriwwwvfm2o5sk2mari4czbd7eklv4lsdn4wmy4ljss2t7zqd:18141", + # "021baaee838a6c7a21ae3ed74f1cbb27e46351b78f0e00810c1a344a9042754d::/onion3/p2z2ptouqgi3agmw62uhz2tambqfzfdtetrfw443of3jnkkiw44e2zqd:18141", + # "002a5b9d129401ca250d120b126caf067f8eb7a616252e630b6974da48def71a::/onion3/cldbbow3yzgxkvz4g2apoclrkbvg3mjxdvonoaeej3tarhlzkluodeyd:18141", ] # This allowlist provides a method to force syncing from any known nodes you may choose, for example if you have a @@ -464,9 +483,10 @@ console_wallet_tor_identity_file = "config/console_wallet_tor.json" [merge_mining_proxy.weatherwax] # URL to monerod -monerod_url = "http://monero-stagenet.exan.tech:38081" # stagenet -#monerod_url = "http://18.133.59.45:28081" # testnet -#monerod_url = "http://18.132.124.81:18081" # mainnet +monerod_url = "http://monero-stagenet.exan.tech:38081" # stagenet +#monerod_url = "http://18.133.59.45:28081" # testnet +#monerod_url = "http://18.132.124.81:18081" # mainnet +#monerod_url = "http://monero.exan.tech:18081" # mainnet alternative # Address of the tari_merge_mining_proxy application proxy_host_address = "127.0.0.1:7878" @@ -491,6 +511,11 @@ monerod_password = "" # accepted. (Default value = true; will wait for base node initial sync). #wait_for_initial_sync_at_startup = true +[stratum_transcoder] + +# Address of the tari_stratum_transcoder application +transcoder_host_address = "127.0.0.1:7879" + [mining_node] # Number of mining threads # Default: number of logical CPU cores @@ -514,3 +539,8 @@ monerod_password = "" # to true # Default: 30 seconds #validate_tip_timeout_sec=30 + +# Stratum Mode configuration +# mining_pool_address = "miningcore.tarilabs.com:3052" +# mining_wallet_address = "YOUR_WALLET_PUBLIC_KEY" +# mining_worker_name = "worker1" diff --git a/common/logging/log4rs_sample_base_node.yml b/common/logging/log4rs_sample_base_node.yml index 6343e77445..17781c077b 100644 --- a/common/logging/log4rs_sample_base_node.yml +++ b/common/logging/log4rs_sample_base_node.yml @@ -14,8 +14,13 @@ appenders: # An appender named "stdout" that writes to stdout stdout: kind: console + encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {h({l}):5} {m}{n}" + pattern: "{d(%H:%M)} {h({l}):5} {m}{n}" + filters: + - + kind: threshold + level: warn # An appender named "network" that writes to a file with a custom pattern encoder network: @@ -32,7 +37,7 @@ appenders: count: 5 pattern: "log/base-node/network.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" # An appender named "base_layer" that writes to a file with a custom pattern encoder base_layer: @@ -49,7 +54,7 @@ appenders: count: 5 pattern: "log/base-node/base_layer.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" # An appender named "other" that writes to a file with a custom pattern encoder other: @@ -66,13 +71,13 @@ appenders: count: 5 pattern: "log/base-node/other.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" # Set the default logging level to "info" root: - level: info + level: warn appenders: - - base_layer + - stdout loggers: # Route log events common to every application to all appenders @@ -89,70 +94,61 @@ loggers: level: info appenders: - base_layer - additive: false tari: level: info appenders: - base_layer - additive: false # Route log events sent to the "wallet" logger to the "base_layer" appender wallet: level: info appenders: - base_layer - additive: false # Route log events sent to the "comms" logger to the "network" appender comms: level: info appenders: - network - additive: false # Route log events sent to the "p2p" logger to the "network" appender p2p: level: info appenders: - network - additive: false - # Route log events sent to the "yamux" logger to the "network" appender + + # Route log events sent to the "yamux" logger to the "network" appender yamux: level: info appenders: - network - additive: false # Route log events sent to the "mio" logger to the "network" appender mio: level: error appenders: - network - additive: false # Route log events sent to the "rustyline" logger to the "other" appender rustyline: level: error appenders: - other additive: false + # Route log events sent to the "tokio_util" logger to the "other" appender tokio_util: level: error appenders: - other - additive: false # Route PGP log events pgp: - level: warn - appenders: - - other - additive: false + level: warn + appenders: + - other # Route log events sent to the "tari_mm_proxy" logger to the "base_layer" appender tari_mm_proxy: level: info appenders: - base_layer - additive: false # Route log events sent to the "stress_test" logger to the "base_layer" appender stress_test: level: info appenders: - base_layer - additive: false diff --git a/common/logging/log4rs_sample_mining_node.yml b/common/logging/log4rs_sample_mining_node.yml index 24db5595bf..f0c8a965b8 100644 --- a/common/logging/log4rs_sample_mining_node.yml +++ b/common/logging/log4rs_sample_mining_node.yml @@ -14,7 +14,10 @@ appenders: kind: console encoder: pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {h({l}):5} {m}{n}" - + filters: + - + kind: threshold + level: warn # An appender named "base_layer" that writes to a file with a custom pattern encoder mining_node: kind: rolling_file @@ -30,25 +33,13 @@ appenders: count: 5 pattern: "log/mining-node/mining_node.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" # Set the default logging level to "warn" and attach the "stdout" appender to the root root: - level: warn + level: info appenders: - stdout + - mining_node -loggers: - # mining_node - tari::application: - level: info - appenders: - - mining_node - additive: false - tari_mining_node: - level: info - appenders: - - mining_node - - stdout - additive: false diff --git a/common/logging/log4rs_sample_seed_node.yml b/common/logging/log4rs_sample_seed_node.yml index a91dd7501b..554cf0c59a 100644 --- a/common/logging/log4rs_sample_seed_node.yml +++ b/common/logging/log4rs_sample_seed_node.yml @@ -19,7 +19,7 @@ appenders: count: 5 pattern: "log/base-node/extra/network.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" status: kind: rolling_file @@ -35,7 +35,7 @@ appenders: count: 5 pattern: "log/base-node/rolled/status.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" base_layer: kind: rolling_file @@ -53,7 +53,7 @@ appenders: count: 5 pattern: "log/base-node/rolled/base_layer.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" base_layer_extra: kind: rolling_file @@ -71,7 +71,7 @@ appenders: count: 5 pattern: "log/base-node/extra/base_layer.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" other: kind: rolling_file @@ -87,7 +87,7 @@ appenders: count: 5 pattern: "log/base-node/extra/other.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" root: appenders: diff --git a/common/logging/log4rs_sample_transcoder.yml b/common/logging/log4rs_sample_transcoder.yml new file mode 100644 index 0000000000..29649ffc88 --- /dev/null +++ b/common/logging/log4rs_sample_transcoder.yml @@ -0,0 +1,38 @@ +# A sample log configuration file for running in release mode. By default, this configuration splits up log messages to +# three destinations: +# * Console: For log messages with level INFO and higher +# * log/transcoder/transcoder.log: All transcoder logs will be written to this file +# +# See https://docs.rs/log4rs/0.8.3/log4rs/encode/pattern/index.html for deciphering the log pattern. The log format +# used in this sample configuration prints messages as: +# timestamp [target] LEVEL message +refresh_rate: 30 seconds +appenders: + # An appender named "transcoder" that writes to a file with a custom pattern encoder + transcoder: + kind: rolling_file + path: "log/transcoder/transcoder.log" + policy: + kind: compound + trigger: + kind: size + limit: 200mb + roller: + kind: fixed_window + base: 1 + count: 50 + pattern: "log/transcoder/transcoder.{}.log" + encoder: + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + +# root (to transcoder) +root: + level: debug + appenders: + - transcoder + +loggers: + h2: + level: info + hyper: + level: info diff --git a/common/logging/log4rs_sample_wallet.yml b/common/logging/log4rs_sample_wallet.yml index 95d50f7a87..631d479043 100644 --- a/common/logging/log4rs_sample_wallet.yml +++ b/common/logging/log4rs_sample_wallet.yml @@ -11,12 +11,17 @@ # timestamp [target] LEVEL message refresh_rate: 30 seconds appenders: - # An appender named "stdout" that writes to stdout +# An appender named "stdout" that writes to file. stdout: - kind: console + kind: file + path: "log/wallet/stdout.log" + append: false encoder: pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {h({l}):5} {m}{n}" - + filters: + - kind: threshold + level: warn + # An appender named "network" that writes to a file with a custom pattern encoder network: kind: rolling_file @@ -32,7 +37,7 @@ appenders: count: 5 pattern: "log/wallet/network.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" # An appender named "base_layer" that writes to a file with a custom pattern encoder base_layer: @@ -49,9 +54,9 @@ appenders: count: 5 pattern: "log/wallet/base_layer.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" - # An appender named "base_layer" that writes to a file with a custom pattern encoder + # An appender named "base_layer" that writes to a file with a custom pattern encoder other: kind: rolling_file path: "log/wallet/other.log" @@ -66,67 +71,66 @@ appenders: count: 5 pattern: "log/wallet/other.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{t}] [Thread:{I}] {l:5} {m}{n}" -# Set the default logging level to "warn" and attach the "stdout" appender to the root +# root (to base_layer) root: - level: warn + level: info appenders: + - base_layer - stdout loggers: - # Route log events common to every application to all appenders - tari::application: + # base_layer + wallet: level: info appenders: - base_layer - - network - - other additive: false - # Route log events sent to the "core" logger to the "base_layer" appender - c: + # other + h2: level: info appenders: - - base_layer + - other additive: false - # Route log events sent to the "wallet" logger to the "base_layer" appender - wallet: + hyper: level: info appenders: - - base_layer + - other additive: false - # Route log events sent to the "comms" logger to the "network" appender + tokio_util: + level: error + appenders: + - other + additive: false + # network comms: level: info appenders: - network additive: false - p2p: - level: info + comms::noise: + level: error appenders: - network additive: false - # Route log events sent to the "p2p" logger to the "network" appender - yamux: + p2p: level: info appenders: - network - additive: false - # Route log events sent to the "mio" logger to the "other" appender + # Route log events sent to the "mio" logger to the "other" appender mio: level: error appenders: - network additive: false - # Route log events sent to the "rustyline" logger to the "other" appender - rustyline: + yamux: level: error appenders: - - other + - network additive: false - # Route log events sent to the "tokio_util" logger to the "other" appender - tokio_util: + tracing: level: error appenders: - - other + - network additive: false diff --git a/common/src/configuration/bootstrap.rs b/common/src/configuration/bootstrap.rs index 144fd5b00e..6e0a668541 100644 --- a/common/src/configuration/bootstrap.rs +++ b/common/src/configuration/bootstrap.rs @@ -59,6 +59,7 @@ use crate::{ DEFAULT_CONFIG, DEFAULT_MERGE_MINING_PROXY_LOG_CONFIG, DEFAULT_MINING_NODE_LOG_CONFIG, + DEFAULT_STRATUM_TRANSCODER_LOG_CONFIG, DEFAULT_WALLET_LOG_CONFIG, }; use std::{ @@ -145,6 +146,8 @@ pub struct ConfigBootstrap { pub miner_min_diff: Option, #[structopt(long, alias = "max-difficulty")] pub miner_max_diff: Option, + #[structopt(long, alias = "tracing")] + pub tracing_enabled: bool, } fn normalize_path(path: PathBuf) -> PathBuf { @@ -179,6 +182,7 @@ impl Default for ConfigBootstrap { miner_max_blocks: None, miner_min_diff: None, miner_max_diff: None, + tracing_enabled: false, } } } @@ -230,6 +234,12 @@ impl ConfigBootstrap { Some(&self.base_path), )) }, + ApplicationType::StratumTranscoder => { + self.log_config = normalize_path(dir_utils::default_path( + DEFAULT_STRATUM_TRANSCODER_LOG_CONFIG, + Some(&self.base_path), + )) + }, ApplicationType::MiningNode => { self.log_config = normalize_path(dir_utils::default_path( DEFAULT_MINING_NODE_LOG_CONFIG, @@ -279,6 +289,10 @@ impl ConfigBootstrap { &self.log_config, logging::install_default_merge_mining_proxy_logfile_config, ), + ApplicationType::StratumTranscoder => install_configuration( + &self.log_config, + logging::install_default_stratum_transcoder_logfile_config, + ), ApplicationType::MiningNode => { install_configuration(&self.log_config, logging::install_default_mining_node_logfile_config) }, @@ -304,7 +318,7 @@ impl ConfigBootstrap { } } -fn prompt(question: &str) -> bool { +pub fn prompt(question: &str) -> bool { println!("{}", question); let mut input = "".to_string(); io::stdin().read_line(&mut input).unwrap(); @@ -329,6 +343,7 @@ pub enum ApplicationType { ConsoleWallet, MergeMiningProxy, MiningNode, + StratumTranscoder, } impl ApplicationType { @@ -339,6 +354,7 @@ impl ApplicationType { ConsoleWallet => "Tari Console Wallet", MergeMiningProxy => "Tari Merge Mining Proxy", MiningNode => "Tari Mining Node", + StratumTranscoder => "Tari Stratum Transcoder", } } @@ -349,6 +365,7 @@ impl ApplicationType { ConsoleWallet => "wallet", MergeMiningProxy => "merge_mining_proxy", MiningNode => "miner", + StratumTranscoder => "stratum-transcoder", } } } @@ -363,6 +380,7 @@ impl FromStr for ApplicationType { "console-wallet" | "console_wallet" => Ok(ConsoleWallet), "mm-proxy" | "mm_proxy" => Ok(MergeMiningProxy), "miner" => Ok(MiningNode), + "stratum-proxy" => Ok(StratumTranscoder), _ => Err(ConfigError::new("Invalid ApplicationType", None)), } } diff --git a/common/src/configuration/global.rs b/common/src/configuration/global.rs index 3dcfdf3a0c..880e111cd1 100644 --- a/common/src/configuration/global.rs +++ b/common/src/configuration/global.rs @@ -82,7 +82,6 @@ pub struct GlobalConfig { pub dns_seeds_name_server: SocketAddr, pub dns_seeds_use_dnssec: bool, pub peer_db_path: PathBuf, - pub enable_wallet: bool, pub num_mining_threads: usize, pub base_node_tor_identity_file: PathBuf, pub wallet_db_file: PathBuf, @@ -92,9 +91,9 @@ pub struct GlobalConfig { pub wallet_peer_db_path: PathBuf, pub console_wallet_peer_db_path: PathBuf, pub buffer_size_base_node: usize, - pub buffer_size_base_node_wallet: usize, + pub buffer_size_console_wallet: usize, pub buffer_rate_limit_base_node: usize, - pub buffer_rate_limit_base_node_wallet: usize, + pub buffer_rate_limit_console_wallet: usize, pub dedup_cache_capacity: usize, pub fetch_blocks_timeout: Duration, pub fetch_utxos_timeout: Duration, @@ -108,6 +107,10 @@ pub struct GlobalConfig { pub transaction_broadcast_send_timeout: Duration, pub transaction_routing_mechanism: String, pub transaction_num_confirmations_required: u64, + pub transaction_event_channel_size: usize, + pub base_node_event_channel_size: usize, + pub output_manager_event_channel_size: usize, + pub base_node_update_publisher_channel_size: usize, pub console_wallet_password: Option, pub wallet_command_send_wait_stage: String, pub wallet_command_send_wait_timeout: u64, @@ -120,6 +123,7 @@ pub struct GlobalConfig { pub monerod_password: String, pub monerod_use_auth: bool, pub proxy_host_address: SocketAddr, + pub transcoder_host_address: SocketAddr, pub proxy_submit_to_origin: bool, pub force_sync_peers: Vec, pub wait_for_initial_sync_at_startup: bool, @@ -130,6 +134,9 @@ pub struct GlobalConfig { pub flood_ban_max_msg_count: usize, pub mine_on_tip_only: bool, pub validate_tip_timeout_sec: u64, + pub mining_pool_address: String, + pub mining_wallet_address: String, + pub mining_worker_name: String, } impl GlobalConfig { @@ -387,12 +394,6 @@ fn convert_node_config( let wallet_peer_db_path = data_dir.join("wallet_peer_db"); let console_wallet_peer_db_path = data_dir.join("console_wallet_peer_db"); - // set base node wallet - let key = config_string("base_node", &net_str, "enable_wallet"); - let enable_wallet = cfg - .get_bool(&key) - .map_err(|e| ConfigurationError::new(&key, &e.to_string()))?; - let key = config_string("base_node", &net_str, "flood_ban_max_msg_count"); let flood_ban_max_msg_count = cfg .get_int(&key) @@ -400,11 +401,13 @@ fn convert_node_config( // block sync let key = config_string("base_node", &net_str, "force_sync_peers"); - let force_sync_peers = optional( - cfg.get_array(&key) - .map(|values| values.into_iter().map(|v| v.into_str().unwrap()).collect()), - )? - .unwrap_or_default(); + let force_sync_peers = match cfg.get_array(&key) { + Ok(peers) => peers.into_iter().map(|v| v.into_str().unwrap()).collect(), + Err(..) => match cfg.get_str(&key) { + Ok(s) => s.split(',').map(|v| v.to_string()).collect(), + Err(..) => vec![], + }, + }; // Liveness auto ping interval let key = config_string("base_node", &net_str, "auto_ping_interval"); @@ -473,6 +476,18 @@ fn convert_node_config( let key = "wallet.transaction_num_confirmations_required"; let transaction_num_confirmations_required = optional(cfg.get_int(&key))?.unwrap_or(3) as u64; + let key = "wallet.transaction_event_channel_size"; + let transaction_event_channel_size = optional(cfg.get_int(&key))?.unwrap_or(1000) as usize; + + let key = "wallet.base_node_event_channel_size"; + let base_node_event_channel_size = optional(cfg.get_int(&key))?.unwrap_or(250) as usize; + + let key = "wallet.output_manager_event_channel_size"; + let output_manager_event_channel_size = optional(cfg.get_int(&key))?.unwrap_or(250) as usize; + + let key = "wallet.base_node_update_publisher_channel_size"; + let base_node_update_publisher_channel_size = optional(cfg.get_int(&key))?.unwrap_or(50) as usize; + let key = "wallet.prevent_fee_gt_amount"; let prevent_fee_gt_amount = cfg .get_bool(&key) @@ -505,18 +520,16 @@ fn convert_node_config( let console_wallet_notify_file = optional(cfg.get_str(key))?.map(PathBuf::from); let key = "wallet.base_node_service_refresh_interval"; - let wallet_base_node_service_refresh_interval = match cfg.get_int(key) { - Ok(seconds) => seconds as u64, - Err(ConfigError::NotFound(_)) => 10, - Err(e) => return Err(ConfigurationError::new(&key, &e.to_string())), - }; + let wallet_base_node_service_refresh_interval = cfg + .get_int(key) + .map(|seconds| seconds as u64) + .map_err(|e| ConfigurationError::new(&key, &e.to_string()))?; let key = "wallet.base_node_service_request_max_age"; - let wallet_base_node_service_request_max_age = match cfg.get_int(key) { - Ok(seconds) => seconds as u64, - Err(ConfigError::NotFound(_)) => 60, - Err(e) => return Err(ConfigurationError::new(&key, &e.to_string())), - }; + let wallet_base_node_service_request_max_age = cfg + .get_int(key) + .map(|seconds| seconds as u64) + .map_err(|e| ConfigurationError::new(&key, &e.to_string()))?; let key = "common.liveness_max_sessions"; let liveness_max_sessions = cfg @@ -549,8 +562,8 @@ fn convert_node_config( .get_int(&key) .map_err(|e| ConfigurationError::new(&key, &e.to_string()))? as usize; - let key = "common.buffer_size_base_node_wallet"; - let buffer_size_base_node_wallet = cfg + let key = "common.buffer_size_console_wallet"; + let buffer_size_console_wallet = cfg .get_int(&key) .map_err(|e| ConfigurationError::new(&key, &e.to_string()))? as usize; @@ -559,8 +572,8 @@ fn convert_node_config( .get_int(&key) .map_err(|e| ConfigurationError::new(&key, &e.to_string()))? as usize; - let key = "common.buffer_rate_limit_base_node_wallet"; - let buffer_rate_limit_base_node_wallet = + let key = "common.buffer_rate_limit_console_wallet"; + let buffer_rate_limit_console_wallet = cfg.get_int(&key) .map_err(|e| ConfigurationError::new(&key, &e.to_string()))? as usize; @@ -616,6 +629,15 @@ fn convert_node_config( .map_err(|e| ConfigurationError::new(&key, &e.to_string())) })?; + let key = config_string("stratum_transcoder", &net_str, "transcoder_host_address"); + let transcoder_host_address = cfg + .get_str(&key) + .map_err(|e| ConfigurationError::new(&key, &e.to_string())) + .and_then(|addr| { + addr.parse::() + .map_err(|e| ConfigurationError::new(&key, &e.to_string())) + })?; + let key = config_string("merge_mining_proxy", &net_str, "wait_for_initial_sync_at_startup"); let wait_for_initial_sync_at_startup = cfg .get_bool(&key) @@ -658,6 +680,18 @@ fn convert_node_config( let key = "common.auto_update.hashes_sig_url"; let autoupdate_hashes_sig_url = cfg.get_str(&key)?; + let key = "mining_node.mining_pool_address"; + let mining_pool_address = cfg.get_str(&key).unwrap_or_else(|_| "".to_string()); + let key = "mining_node.mining_wallet_address"; + let mining_wallet_address = cfg.get_str(&key).unwrap_or_else(|_| "".to_string()); + let key = "mining_node.mining_worker_name"; + let mining_worker_name = cfg + .get_str(&key) + .unwrap_or_else(|_| "".to_string()) + .chars() + .filter(|c| c.is_alphanumeric()) + .collect::(); + Ok(GlobalConfig { autoupdate_check_interval, autoupdate_dns_hosts, @@ -689,7 +723,6 @@ fn convert_node_config( dns_seeds_name_server, dns_seeds_use_dnssec, peer_db_path, - enable_wallet, num_mining_threads, base_node_tor_identity_file, console_wallet_identity_file, @@ -699,9 +732,9 @@ fn convert_node_config( wallet_peer_db_path, console_wallet_peer_db_path, buffer_size_base_node, - buffer_size_base_node_wallet, + buffer_size_console_wallet, buffer_rate_limit_base_node, - buffer_rate_limit_base_node_wallet, + buffer_rate_limit_console_wallet, dedup_cache_capacity, fetch_blocks_timeout, fetch_utxos_timeout, @@ -715,6 +748,10 @@ fn convert_node_config( transaction_broadcast_send_timeout, transaction_routing_mechanism, transaction_num_confirmations_required, + transaction_event_channel_size, + base_node_event_channel_size, + output_manager_event_channel_size, + base_node_update_publisher_channel_size, console_wallet_password, wallet_command_send_wait_stage, wallet_command_send_wait_timeout, @@ -723,6 +760,7 @@ fn convert_node_config( wallet_base_node_service_request_max_age, prevent_fee_gt_amount, proxy_host_address, + transcoder_host_address, proxy_submit_to_origin, monerod_url, monerod_username, @@ -737,6 +775,9 @@ fn convert_node_config( flood_ban_max_msg_count, mine_on_tip_only, validate_tip_timeout_sec, + mining_pool_address, + mining_wallet_address, + mining_worker_name, }) } diff --git a/common/src/configuration/utils.rs b/common/src/configuration/utils.rs index 5d65103d9e..1f291cf0ce 100644 --- a/common/src/configuration/utils.rs +++ b/common/src/configuration/utils.rs @@ -61,9 +61,9 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { cfg.set_default("common.liveness_max_sessions", 0).unwrap(); cfg.set_default("common.denylist_ban_period", 1440).unwrap(); cfg.set_default("common.buffer_size_base_node", 1_500).unwrap(); - cfg.set_default("common.buffer_size_base_node_wallet", 50_000).unwrap(); + cfg.set_default("common.buffer_size_console_wallet", 50_000).unwrap(); cfg.set_default("common.buffer_rate_limit_base_node", 1_000).unwrap(); - cfg.set_default("common.buffer_rate_limit_base_node_wallet", 1_000) + cfg.set_default("common.buffer_rate_limit_console_wallet", 1_000) .unwrap(); cfg.set_default("common.dedup_cache_capacity", 2_500).unwrap(); cfg.set_default("common.fetch_blocks_timeout", 150).unwrap(); @@ -98,7 +98,8 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { ) .unwrap(); cfg.set_default("wallet.base_node_query_timeout", 60).unwrap(); - // 60 sec * 60 minutes * 12 hours. + cfg.set_default("wallet.base_node_service_refresh_interval", 5).unwrap(); + cfg.set_default("wallet.base_node_service_request_max_age", 60).unwrap(); cfg.set_default("wallet.scan_for_utxo_interval", 60 * 60 * 12).unwrap(); cfg.set_default("wallet.transaction_broadcast_monitoring_timeout", 60) .unwrap(); @@ -172,7 +173,6 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { .unwrap(); cfg.set_default("base_node.mainnet.grpc_console_wallet_address", "127.0.0.1:18143") .unwrap(); - cfg.set_default("base_node.mainnet.enable_wallet", true).unwrap(); cfg.set_default("base_node.mainnet.flood_ban_max_msg_count", 10000) .unwrap(); @@ -228,7 +228,6 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { .unwrap(); cfg.set_default("base_node.weatherwax.grpc_console_wallet_address", "127.0.0.1:18143") .unwrap(); - cfg.set_default("base_node.weatherwax.enable_wallet", true).unwrap(); cfg.set_default("base_node.weatherwax.dns_seeds_name_server", "1.1.1.1:53") .unwrap(); @@ -242,10 +241,21 @@ pub fn default_config(bootstrap: &ConfigBootstrap) -> Config { set_transport_defaults(&mut cfg).unwrap(); set_merge_mining_defaults(&mut cfg); set_mining_node_defaults(&mut cfg); + set_stratum_transcoder_defaults(&mut cfg); cfg } +fn set_stratum_transcoder_defaults(cfg: &mut Config) { + cfg.set_default("stratum_transcoder.mainnet.transcoder_host_address", "127.0.0.1:7879") + .unwrap(); + cfg.set_default( + "stratum_transcoder.weatherwax.transcoder_host_address", + "127.0.0.1:7879", + ) + .unwrap(); +} + fn set_merge_mining_defaults(cfg: &mut Config) { cfg.set_default( "merge_mining_proxy.mainnet.monerod_url", @@ -262,7 +272,6 @@ fn set_merge_mining_defaults(cfg: &mut Config) { .unwrap(); cfg.set_default("merge_mining_proxy.mainnet.wait_for_initial_sync_at_startup", true) .unwrap(); - cfg.set_default( "merge_mining_proxy.weatherwax.monerod_url", "http://monero-stagenet.exan.tech:38081", diff --git a/common/src/lib.rs b/common/src/lib.rs index bc65a132a0..6f5c98a2e4 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -98,6 +98,7 @@ pub const DEFAULT_CONFIG: &str = "config/config.toml"; pub const DEFAULT_BASE_NODE_LOG_CONFIG: &str = "config/log4rs_base_node.yml"; pub const DEFAULT_WALLET_LOG_CONFIG: &str = "config/log4rs_console_wallet.yml"; pub const DEFAULT_MERGE_MINING_PROXY_LOG_CONFIG: &str = "config/log4rs_merge_mining_proxy.yml"; +pub const DEFAULT_STRATUM_TRANSCODER_LOG_CONFIG: &str = "config/log4rs_miningcore_transcoder.yml"; pub const DEFAULT_MINING_NODE_LOG_CONFIG: &str = "config/log4rs_mining_node.yml"; pub(crate) const LOG_TARGET: &str = "common::config"; diff --git a/common/src/logging.rs b/common/src/logging.rs index 1cf3621325..d9764381c8 100644 --- a/common/src/logging.rs +++ b/common/src/logging.rs @@ -21,6 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +// use log::LevelFilter; +// use simplelog::*; use std::{fs, fs::File, io::Write, path::Path}; /// Set up application-level logging using the Log4rs configuration file specified in @@ -29,10 +31,61 @@ pub fn initialize_logging(config_file: &Path) -> bool { "Initializing logging according to {:?}", config_file.to_str().unwrap_or("[??]") ); + if let Err(e) = log4rs::init_file(config_file, Default::default()) { println!("We couldn't load a logging configuration file. {}", e.to_string()); return false; } + + // simplelog config - perhaps for future use + // let config = ConfigBuilder::new() + // .set_thread_level(LevelFilter::Error) + // .set_time_to_local(true) + // .set_time_format_str("%H:%M") + // .build(); + // + // let network_config = ConfigBuilder::new() + // .set_thread_level(LevelFilter::Error) + // .set_time_to_local(true) + // .build(); + // + // let log_level = env::var("RUST_LOG").unwrap_or("Info".to_string()); + // CombinedLogger::init(vec![ + // TermLogger::new( + // LevelFilter::Warn, + // ConfigBuilder::new() + // .set_thread_level(LevelFilter::Error) + // .set_time_to_local(true) + // .set_time_format_str("%H:%M") + // .build(), + // TerminalMode::Mixed, + // ColorChoice::Auto, + // ), + // WriteLogger::new( + // LevelFilter::from_str(log_level.as_str()).unwrap_or(LevelFilter::Info), + // ConfigBuilder::new() + // .set_thread_level(LevelFilter::Error) + // .add_filter_ignore_str("comms") + // .add_filter_ignore_str("p2p") + // .add_filter_ignore_str("yamux") + // .add_filter_ignore_str("mio") + // .build(), + // File::create("log/log.log").unwrap(), + // ), + // WriteLogger::new( + // LevelFilter::from_str(log_level.as_str()).unwrap_or(LevelFilter::Info), + // ConfigBuilder::new() + // .set_thread_level(LevelFilter::Error) + // .add_filter_allow_str("comms") + // .add_filter_allow_str("p2p") + // .add_filter_allow_str("yamux") + // .add_filter_allow_str("mio") + // .build(), + // File::create("log/network.log").unwrap(), + // ), + // ]) + // .unwrap(); + true } @@ -69,6 +122,17 @@ pub fn install_default_merge_mining_proxy_logfile_config(path: &Path) -> Result< file.write_all(source.as_ref()) } +/// Installs a new default logfile configuration, copied from `log4rs_sample_transcoder.yml` to the given path. +pub fn install_default_stratum_transcoder_logfile_config(path: &Path) -> Result<(), std::io::Error> { + let source = include_str!("../logging/log4rs_sample_transcoder.yml"); + if let Some(d) = path.parent() { + fs::create_dir_all(d)? + }; + // Note: `fs::write(path, source)` did not work as expected, as the file name was not changed + let mut file = File::create(path)?; + file.write_all(source.as_ref()) +} + /// Installs a new default logfile configuration, copied from `log4rs_sample_wallet.yml` to the given path. pub fn install_default_mining_node_logfile_config(path: &Path) -> Result<(), std::io::Error> { let source = include_str!("../logging/log4rs_sample_mining_node.yml"); diff --git a/comms/Cargo.toml b/comms/Cargo.toml index 915884ff8d..71b8eec12f 100644 --- a/comms/Cargo.toml +++ b/comms/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] @@ -38,16 +38,23 @@ serde_derive = "1.0.119" snow = {version="=0.8.0", features=["default-resolver"]} thiserror = "1.0.20" tokio = {version="~0.2.19", features=["blocking", "time", "tcp", "dns", "sync", "stream", "signal"]} -tokio-util = {version="0.2.0", features=["codec"]} +tokio-util = {version="0.3.1", features=["codec"]} tower= "0.3.1" +tracing = "0.1.26" +tracing-futures = "0.2.5" yamux = "=0.9.0" +# network tracing, rt-tokio for async batch export +opentelemetry = { version = "0.16", default-features = false, features = ["trace","rt-tokio"] } +opentelemetry-jaeger = { version="0.15", features=["rt-tokio"]} + # RPC dependencies tower-make = {version="0.3.0", optional=true} anyhow = "1.0.32" [dev-dependencies] tari_test_utils = {version="^0.9", path="../infrastructure/test_utils"} +tari_comms_rpc_macros = {version="*", path="./rpc_macros"} env_logger = "0.7.0" serde_json = "1.0.39" diff --git a/comms/dht/Cargo.toml b/comms/dht/Cargo.toml index cf0f91d0f7..c75e423543 100644 --- a/comms/dht/Cargo.toml +++ b/comms/dht/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tari_comms_dht" -version = "0.9.1" +version = "0.9.5" authors = ["The Tari Development Community"] description = "Tari comms DHT module" repository = "https://github.com/tari-project/tari" diff --git a/comms/dht/src/actor.rs b/comms/dht/src/actor.rs index e67ce3ae38..c2c2d4e52a 100644 --- a/comms/dht/src/actor.rs +++ b/comms/dht/src/actor.rs @@ -28,7 +28,7 @@ //! [DhtRequest]: ./enum.DhtRequest.html use crate::{ - broadcast_strategy::BroadcastStrategy, + broadcast_strategy::{BroadcastClosestRequest, BroadcastStrategy}, dedup::DedupCacheDatabase, discovery::DhtDiscoveryError, outbound::{DhtOutboundError, OutboundMessageRequester, SendMessageParams}, @@ -416,43 +416,19 @@ impl DhtActor { .await?; Ok(peers.into_iter().map(|p| p.peer_node_id().clone()).collect()) }, - Closest(closest_request) => { - let connections = connectivity - .select_connections(ConnectivitySelection::closest_to( - closest_request.node_id.clone(), - config.broadcast_factor, - closest_request.excluded_peers.clone(), - )) - .await?; - - let mut candidates = connections - .iter() - .map(|conn| conn.peer_node_id()) - .cloned() - .collect::>(); - - if !closest_request.connected_only { - let excluded = closest_request - .excluded_peers - .iter() - .chain(candidates.iter()) - .cloned() - .collect::>(); - // If we don't have enough connections, let's select some more disconnected peers (at least 2) - let n = cmp::max(config.broadcast_factor.saturating_sub(candidates.len()), 2); - let additional = Self::select_closest_peers_for_propagation( - &peer_manager, - &closest_request.node_id, - n, - &excluded, - PeerFeatures::MESSAGE_PROPAGATION, - ) - .await?; - - candidates.extend(additional); + ClosestNodes(closest_request) => { + Self::select_closest_node_connected(closest_request, config, connectivity, peer_manager).await + }, + DirectOrClosestNodes(closest_request) => { + // First check if a direct connection exists + if connectivity + .get_connection(closest_request.node_id.clone()) + .await? + .is_some() + { + return Ok(vec![closest_request.node_id.clone()]); } - - Ok(candidates) + Self::select_closest_node_connected(closest_request, config, connectivity, peer_manager).await }, Random(n, excluded) => { // Send to a random set of peers of size n that are Communication Nodes @@ -659,6 +635,50 @@ impl DhtActor { Ok(peers.into_iter().map(|p| p.node_id).collect()) } + + async fn select_closest_node_connected( + closest_request: Box, + config: DhtConfig, + mut connectivity: ConnectivityRequester, + peer_manager: Arc, + ) -> Result, DhtActorError> { + let connections = connectivity + .select_connections(ConnectivitySelection::closest_to( + closest_request.node_id.clone(), + config.broadcast_factor, + closest_request.excluded_peers.clone(), + )) + .await?; + + let mut candidates = connections + .iter() + .map(|conn| conn.peer_node_id()) + .cloned() + .collect::>(); + + if !closest_request.connected_only { + let excluded = closest_request + .excluded_peers + .iter() + .chain(candidates.iter()) + .cloned() + .collect::>(); + // If we don't have enough connections, let's select some more disconnected peers (at least 2) + let n = cmp::max(config.broadcast_factor.saturating_sub(candidates.len()), 2); + let additional = Self::select_closest_peers_for_propagation( + &peer_manager, + &closest_request.node_id, + n, + &excluded, + PeerFeatures::MESSAGE_PROPAGATION, + ) + .await?; + + candidates.extend(additional); + } + + Ok(candidates) + } } #[cfg(test)] @@ -855,7 +875,7 @@ mod test { mock.spawn(); let (conn_in, _, conn_out, _) = - create_peer_connection_mock_pair(1, client_node_identity.to_peer(), node_identity.to_peer()).await; + create_peer_connection_mock_pair(client_node_identity.to_peer(), node_identity.to_peer()).await; connectivity_manager_mock_state.add_active_connection(conn_in).await; peer_manager.add_peer(make_node_identity().to_peer()).await.unwrap(); @@ -888,6 +908,7 @@ mod test { connectivity_manager_mock_state .set_selected_connections(vec![conn_out.clone()]) .await; + let peers = requester .select_peers(BroadcastStrategy::Broadcast(Vec::new())) .await @@ -915,7 +936,29 @@ mod test { connected_only: false, }); let peers = requester - .select_peers(BroadcastStrategy::Closest(send_request)) + .select_peers(BroadcastStrategy::ClosestNodes(send_request)) + .await + .unwrap(); + assert_eq!(peers.len(), 2); + + let send_request = Box::new(BroadcastClosestRequest { + node_id: node_identity.node_id().clone(), + excluded_peers: vec![], + connected_only: false, + }); + let peers = requester + .select_peers(BroadcastStrategy::DirectOrClosestNodes(send_request)) + .await + .unwrap(); + assert_eq!(peers.len(), 1); + + let send_request = Box::new(BroadcastClosestRequest { + node_id: client_node_identity.node_id().clone(), + excluded_peers: vec![], + connected_only: false, + }); + let peers = requester + .select_peers(BroadcastStrategy::DirectOrClosestNodes(send_request)) .await .unwrap(); assert_eq!(peers.len(), 2); diff --git a/comms/dht/src/broadcast_strategy.rs b/comms/dht/src/broadcast_strategy.rs index 3e1b356067..9077cc3a58 100644 --- a/comms/dht/src/broadcast_strategy.rs +++ b/comms/dht/src/broadcast_strategy.rs @@ -57,7 +57,9 @@ pub enum BroadcastStrategy { /// Send to a random set of peers of size n that are Communication Nodes, excluding the given node IDs Random(usize, Vec), /// Send to all n nearest Communication Nodes according to the given BroadcastClosestRequest - Closest(Box), + ClosestNodes(Box), + /// Send directly to destination if connected but otherwise send to all n nearest Communication Nodes + DirectOrClosestNodes(Box), Broadcast(Vec), /// Propagate to a set of closest neighbours and random peers Propagate(NodeDestination, Vec), @@ -70,7 +72,8 @@ impl fmt::Display for BroadcastStrategy { DirectPublicKey(pk) => write!(f, "DirectPublicKey({})", pk), DirectNodeId(node_id) => write!(f, "DirectNodeId({})", node_id), Flood(excluded) => write!(f, "Flood({} excluded)", excluded.len()), - Closest(request) => write!(f, "Closest({})", request), + ClosestNodes(request) => write!(f, "ClosestNodes({})", request), + DirectOrClosestNodes(request) => write!(f, "DirectOrClosestNodes({})", request), Random(n, excluded) => write!(f, "Random({}, {} excluded)", n, excluded.len()), Broadcast(excluded) => write!(f, "Broadcast({} excluded)", excluded.len()), Propagate(destination, excluded) => write!(f, "Propagate({}, {} excluded)", destination, excluded.len(),), @@ -79,13 +82,18 @@ impl fmt::Display for BroadcastStrategy { } impl BroadcastStrategy { - /// Returns true if this strategy will send multiple messages, otherwise false - pub fn is_multi_message(&self) -> bool { + /// Returns true if this strategy will send multiple indirect messages, otherwise false + pub fn is_multi_message(&self, chosen_peers: &[NodeId]) -> bool { use BroadcastStrategy::*; - matches!( - self, - Closest(_) | Flood(_) | Broadcast(_) | Random(_, _) | Propagate(_, _) - ) + + match self { + DirectOrClosestNodes(strategy) => { + // Testing if there is a single chosen peer and it is the target NodeId + chosen_peers.len() == 1 && chosen_peers.first() == Some(&strategy.node_id) + }, + ClosestNodes(_) | Broadcast(_) | Propagate(_, _) | Flood(_) | Random(_, _) => true, + _ => false, + } } pub fn is_direct(&self) -> bool { @@ -129,7 +137,7 @@ mod test { assert!(!BroadcastStrategy::Broadcast(Default::default()).is_direct()); assert!(!BroadcastStrategy::Propagate(Default::default(), Default::default()).is_direct(),); assert!(!BroadcastStrategy::Flood(Default::default()).is_direct()); - assert!(!BroadcastStrategy::Closest(Box::new(BroadcastClosestRequest { + assert!(!BroadcastStrategy::ClosestNodes(Box::new(BroadcastClosestRequest { node_id: NodeId::default(), excluded_peers: Default::default(), connected_only: false @@ -152,7 +160,7 @@ mod test { assert!(BroadcastStrategy::Flood(Default::default()) .direct_public_key() .is_none()); - assert!(BroadcastStrategy::Closest(Box::new(BroadcastClosestRequest { + assert!(BroadcastStrategy::ClosestNodes(Box::new(BroadcastClosestRequest { node_id: NodeId::default(), excluded_peers: Default::default(), connected_only: false @@ -174,7 +182,7 @@ mod test { .direct_node_id() .is_none()); assert!(BroadcastStrategy::Flood(Default::default()).direct_node_id().is_none()); - assert!(BroadcastStrategy::Closest(Box::new(BroadcastClosestRequest { + assert!(BroadcastStrategy::ClosestNodes(Box::new(BroadcastClosestRequest { node_id: NodeId::default(), excluded_peers: Default::default(), connected_only: false diff --git a/comms/dht/src/config.rs b/comms/dht/src/config.rs index 90fc9b8b72..0612445dca 100644 --- a/comms/dht/src/config.rs +++ b/comms/dht/src/config.rs @@ -66,11 +66,6 @@ pub struct DhtConfig { pub saf_max_message_size: usize, /// When true, store and forward messages are requested from peers on connect (Default: true) pub saf_auto_request: bool, - /// The minimum period used to request SAF messages from a peer. When requesting SAF messages, - /// it will request messages since the DHT last went offline, but this may be a small amount of - /// time, so `minimum_request_period` can be used so that messages aren't missed. - /// Default: 3 days - pub saf_minimum_request_period: Duration, /// The max capacity of the message hash cache /// Default: 2,500 pub dedup_cache_capacity: usize, @@ -154,7 +149,6 @@ impl Default for DhtConfig { saf_high_priority_msg_storage_ttl: Duration::from_secs(3 * 24 * 60 * 60), // 3 days saf_auto_request: true, saf_max_message_size: 512 * 1024, - saf_minimum_request_period: Duration::from_secs(3 * 24 * 60 * 60), // 3 days dedup_cache_capacity: 2_500, dedup_cache_trim_interval: Duration::from_secs(5 * 60), database_url: DbConnectionUrl::Memory, diff --git a/comms/dht/src/outbound/broadcast.rs b/comms/dht/src/outbound/broadcast.rs index a3b122f8ab..0aa9fab611 100644 --- a/comms/dht/src/outbound/broadcast.rs +++ b/comms/dht/src/outbound/broadcast.rs @@ -268,7 +268,7 @@ where S: Service is_discovery_enabled, ); - let is_broadcast = broadcast_strategy.is_multi_message(); + let is_broadcast = broadcast_strategy.is_multi_message(&peers); // Discovery is required if: // - Discovery is enabled for this request diff --git a/comms/dht/src/outbound/message_params.rs b/comms/dht/src/outbound/message_params.rs index ffc463771a..0ad00bbc4e 100644 --- a/comms/dht/src/outbound/message_params.rs +++ b/comms/dht/src/outbound/message_params.rs @@ -116,7 +116,7 @@ impl SendMessageParams { /// `node_id` - Select the closest known peers to this `NodeId` /// `excluded_peers` - vector of `NodeId`s to exclude from broadcast. pub fn closest(&mut self, node_id: NodeId, excluded_peers: Vec) -> &mut Self { - self.params_mut().broadcast_strategy = BroadcastStrategy::Closest(Box::new(BroadcastClosestRequest { + self.params_mut().broadcast_strategy = BroadcastStrategy::ClosestNodes(Box::new(BroadcastClosestRequest { excluded_peers, node_id, connected_only: false, @@ -124,10 +124,10 @@ impl SendMessageParams { self } - /// Set broadcast_strategy to Closest.`excluded_peers` are excluded. Only peers that are currently connected will be - /// included. + /// Set broadcast_strategy to ClosestNodes.`excluded_peers` are excluded. Only peers that are currently connected + /// will be included. pub fn closest_connected(&mut self, node_id: NodeId, excluded_peers: Vec) -> &mut Self { - self.params_mut().broadcast_strategy = BroadcastStrategy::Closest(Box::new(BroadcastClosestRequest { + self.params_mut().broadcast_strategy = BroadcastStrategy::ClosestNodes(Box::new(BroadcastClosestRequest { excluded_peers, node_id, connected_only: true, @@ -135,6 +135,18 @@ impl SendMessageParams { self } + /// Set broadcast_strategy to DirectOrClosestNodes.`excluded_peers` are excluded. Only peers that are currently + /// connected will be included. + pub fn direct_or_closest_connected(&mut self, node_id: NodeId, excluded_peers: Vec) -> &mut Self { + self.params_mut().broadcast_strategy = + BroadcastStrategy::DirectOrClosestNodes(Box::new(BroadcastClosestRequest { + excluded_peers, + node_id, + connected_only: true, + })); + self + } + /// Set broadcast_strategy to Neighbours. `excluded_peers` are excluded. Only Peers that have /// `PeerFeatures::MESSAGE_PROPAGATION` are included. pub fn broadcast(&mut self, excluded_peers: Vec) -> &mut Self { diff --git a/comms/dht/src/outbound/mock.rs b/comms/dht/src/outbound/mock.rs index 6cf4b83e40..f5c3f30665 100644 --- a/comms/dht/src/outbound/mock.rs +++ b/comms/dht/src/outbound/mock.rs @@ -205,7 +205,7 @@ impl OutboundServiceMock { }, }; }, - BroadcastStrategy::Closest(_) => { + BroadcastStrategy::ClosestNodes(_) => { if behaviour.broadcast == ResponseType::Queued { let (response, mut inner_reply_tx) = self.add_call((*params).clone(), body); reply_tx.send(response).expect("Reply channel cancelled"); diff --git a/comms/dht/src/storage/dht_setting_entry.rs b/comms/dht/src/storage/dht_setting_entry.rs index 73cb39fe69..dd1e06597f 100644 --- a/comms/dht/src/storage/dht_setting_entry.rs +++ b/comms/dht/src/storage/dht_setting_entry.rs @@ -27,6 +27,8 @@ use std::fmt; pub enum DhtMetadataKey { /// Timestamp each time the DHT is shut down OfflineTimestamp, + /// Timestamp of the most recent SAF message received + LastSafMessageReceived, } impl fmt::Display for DhtMetadataKey { diff --git a/comms/dht/src/store_forward/database/mod.rs b/comms/dht/src/store_forward/database/mod.rs index ec6b19a42e..173d00e0ef 100644 --- a/comms/dht/src/store_forward/database/mod.rs +++ b/comms/dht/src/store_forward/database/mod.rs @@ -217,6 +217,17 @@ impl StoreAndForwardDatabase { .await } + pub(crate) async fn delete_messages_older_than(&self, since: NaiveDateTime) -> Result { + self.connection + .with_connection_async(move |conn| { + diesel::delete(stored_messages::table) + .filter(stored_messages::stored_at.lt(since)) + .execute(conn) + .map_err(Into::into) + }) + .await + } + pub(crate) async fn truncate_messages(&self, max_size: usize) -> Result { self.connection .with_connection_async(move |conn| { diff --git a/comms/dht/src/store_forward/forward.rs b/comms/dht/src/store_forward/forward.rs index 607dfe0fd1..95ce5e2500 100644 --- a/comms/dht/src/store_forward/forward.rs +++ b/comms/dht/src/store_forward/forward.rs @@ -219,7 +219,7 @@ where S: Service target: LOG_TARGET, "Forwarding SAF message directly to node: {}, Tag#{}", node_id, dht_header.message_tag ); - send_params.closest_connected(node_id.clone(), excluded_peers); + send_params.direct_or_closest_connected(node_id.clone(), excluded_peers); }, _ => { debug!( diff --git a/comms/dht/src/store_forward/message.rs b/comms/dht/src/store_forward/message.rs index d29481f3f2..85ba721934 100644 --- a/comms/dht/src/store_forward/message.rs +++ b/comms/dht/src/store_forward/message.rs @@ -52,12 +52,17 @@ impl StoredMessagesRequest { #[cfg(test)] impl StoredMessage { - pub fn new(version: u32, dht_header: crate::envelope::DhtMessageHeader, body: Vec) -> Self { + pub fn new( + version: u32, + dht_header: crate::envelope::DhtMessageHeader, + body: Vec, + stored_at: DateTime, + ) -> Self { Self { version, dht_header: Some(dht_header.into()), body, - stored_at: Some(datetime_to_timestamp(Utc::now())), + stored_at: Some(datetime_to_timestamp(stored_at)), } } } diff --git a/comms/dht/src/store_forward/saf_handler/task.rs b/comms/dht/src/store_forward/saf_handler/task.rs index e32e3f60a1..f3ba852118 100644 --- a/comms/dht/src/store_forward/saf_handler/task.rs +++ b/comms/dht/src/store_forward/saf_handler/task.rs @@ -36,8 +36,10 @@ use crate::{ StoredMessagesResponse, }, }, + storage::DhtMetadataKey, store_forward::{error::StoreAndForwardError, service::FetchStoredMessageQuery, StoreAndForwardRequester}, }; +use chrono::{DateTime, NaiveDateTime, Utc}; use digest::Digest; use futures::{channel::mpsc, future, stream, SinkExt, StreamExt}; use log::*; @@ -172,15 +174,19 @@ where S: Service // Compile a set of stored messages for the requesting peer let mut query = FetchStoredMessageQuery::new(source_pubkey, source_node_id.clone()); - if let Some(since) = retrieve_msgs.since.map(timestamp_to_datetime) { - debug!( - target: LOG_TARGET, - "Peer '{}' requested all messages since '{}'", - source_node_id.short_str(), - since - ); - query.since(since); - } + let since: Option> = match retrieve_msgs.since.map(timestamp_to_datetime) { + Some(since) => { + debug!( + target: LOG_TARGET, + "Peer '{}' requested all messages since '{}'", + source_node_id.short_str(), + since + ); + query.with_messages_since(since); + Some(since) + }, + None => None, + }; let response_types = vec![SafResponseType::ForMe]; @@ -188,7 +194,6 @@ where S: Service query.with_response_type(resp_type); let messages = self.saf_requester.fetch_messages(query.clone()).await?; - let message_ids = messages.iter().map(|msg| msg.id).collect::>(); let stored_messages = StoredMessagesResponse { messages: try_convert_all(messages)?, request_id: retrieve_msgs.request_id, @@ -201,6 +206,7 @@ where S: Service stored_messages.messages().len(), resp_type ); + match self .outbound_service .send_message_no_header( @@ -215,13 +221,15 @@ where S: Service .await { Ok(_) => { - debug!( - target: LOG_TARGET, - "Removing {} stored message(s) for peer '{}'", - message_ids.len(), - message.source_peer.node_id.short_str() - ); - self.saf_requester.remove_messages(message_ids).await?; + if let Some(threshold) = since { + debug!( + target: LOG_TARGET, + "Removing stored message(s) from before {} for peer '{}'", + threshold, + message.source_peer.node_id.short_str() + ); + self.saf_requester.remove_messages_older_than(threshold).await?; + } }, Err(err) => { error!( @@ -366,6 +374,14 @@ where S: Service return Err(StoreAndForwardError::DhtHeaderNotProvided); } + let stored_at = match message.stored_at { + None => chrono::MIN_DATETIME, + Some(t) => DateTime::from_utc( + NaiveDateTime::from_timestamp(t.seconds, t.nanos.try_into().unwrap_or(0)), + Utc, + ), + }; + let dht_header: DhtMessageHeader = message .dht_header .expect("previously checked") @@ -410,6 +426,27 @@ where S: Service DhtInboundMessage::new(MessageTag::new(), dht_header, Arc::clone(&source_peer), message.body); inbound_msg.is_saf_message = true; + let last_saf_received = self + .dht_requester + .get_metadata::>(DhtMetadataKey::LastSafMessageReceived) + .await + .ok() + .flatten() + .unwrap_or(chrono::MIN_DATETIME); + + if stored_at > last_saf_received { + if let Err(err) = self + .dht_requester + .set_metadata(DhtMetadataKey::LastSafMessageReceived, stored_at) + .await + { + warn!( + target: LOG_TARGET, + "Failed to set last SAF message received timestamp: {:?}", err + ); + } + } + Ok(DecryptedDhtMessage::succeeded( decrypted_body, authenticated_pk, @@ -515,6 +552,7 @@ mod test { use super::*; use crate::{ envelope::DhtMessageFlags, + outbound::mock::create_outbound_service_mock, proto::envelope::DhtHeader, store_forward::{message::StoredMessagePriority, StoredMessage}, test_utils::{ @@ -528,7 +566,7 @@ mod test { service_spy, }, }; - use chrono::Utc; + use chrono::{Duration as OldDuration, Utc}; use futures::channel::mpsc; use prost::Message; use std::time::Duration; @@ -536,12 +574,17 @@ mod test { use tari_crypto::tari_utilities::hex; use tari_test_utils::collect_stream; use tari_utilities::hex::Hex; - use tokio::runtime::Handle; + use tokio::{runtime::Handle, task, time::delay_for}; // TODO: unit tests for static functions (check_signature, etc) - fn make_stored_message(node_identity: &NodeIdentity, dht_header: DhtMessageHeader) -> StoredMessage { - let body = b"A".to_vec(); + fn make_stored_message( + message: String, + node_identity: &NodeIdentity, + dht_header: DhtMessageHeader, + stored_at: NaiveDateTime, + ) -> StoredMessage { + let body = message.as_bytes().to_vec(); let body_hash = hex::to_hex(&Challenge::new().chain(body.clone()).finalize()); StoredMessage { id: 1, @@ -554,19 +597,20 @@ mod test { body, is_encrypted: false, priority: StoredMessagePriority::High as i32, - stored_at: Utc::now().naive_utc(), + stored_at, body_hash, } } - #[tokio_macros::test_basic] + #[tokio_macros::test] async fn request_stored_messages() { - let rt_handle = Handle::current(); let spy = service_spy(); let (requester, mock_state) = create_store_and_forward_mock(); let peer_manager = build_peer_manager(); - let (oms_tx, mut oms_rx) = mpsc::channel(1); + let (outbound_requester, outbound_mock) = create_outbound_service_mock(10); + let oms_mock_state = outbound_mock.get_state(); + task::spawn(outbound_mock.run()); let node_identity = make_node_identity(); @@ -606,29 +650,59 @@ mod test { requester.clone(), dht_requester.clone(), peer_manager.clone(), - OutboundMessageRequester::new(oms_tx.clone()), + outbound_requester.clone(), node_identity.clone(), message.clone(), saf_response_signal_sender.clone(), ); - rt_handle.spawn(task.run()); + task::spawn(task.run()); - let (_, body) = unwrap_oms_send_msg!(oms_rx.next().await.unwrap()); - let body = body.to_vec(); + for _ in 0..6 { + if oms_mock_state.call_count() >= 1 { + break; + } + delay_for(Duration::from_secs(5)).await; + } + assert_eq!(oms_mock_state.call_count(), 1); + + let call = oms_mock_state.pop_call().unwrap(); + let body = call.1.to_vec(); let body = EnvelopeBody::decode(body.as_slice()).unwrap(); let msg = body.decode_part::(0).unwrap().unwrap(); assert_eq!(msg.messages().len(), 0); assert!(!spy.is_called()); - assert_eq!(mock_state.call_count(), 1); + // assert_eq!(mock_state.call_count(), 2); let calls = mock_state.take_calls().await; - assert!(calls[0].contains("FetchMessages")); - assert!(calls[0].contains(node_identity.public_key().to_hex().as_str())); - assert!(calls[0].contains(format!("{:?}", since).as_str())); + let fetch_call = calls.iter().find(|c| c.contains("FetchMessages")).unwrap(); + assert!(fetch_call.contains(node_identity.public_key().to_hex().as_str())); + assert!(fetch_call.contains(format!("{:?}", since).as_str())); + let msg1_time = Utc::now() + .checked_sub_signed(OldDuration::from_std(Duration::from_secs(120)).unwrap()) + .unwrap(); + let msg1 = "one".to_string(); mock_state - .add_message(make_stored_message(&node_identity, dht_header)) + .add_message(make_stored_message( + msg1.clone(), + &node_identity, + dht_header.clone(), + msg1_time.naive_utc(), + )) + .await; + + let msg2_time = Utc::now() + .checked_sub_signed(OldDuration::from_std(Duration::from_secs(30)).unwrap()) + .unwrap(); + let msg2 = "two".to_string(); + mock_state + .add_message(make_stored_message( + msg2.clone(), + &node_identity, + dht_header, + msg2_time.naive_utc(), + )) .await; // Now lets test its response where there are messages to return. @@ -638,27 +712,42 @@ mod test { requester, dht_requester, peer_manager, - OutboundMessageRequester::new(oms_tx), + outbound_requester.clone(), node_identity.clone(), message, saf_response_signal_sender, ); - rt_handle.spawn(task.run()); + task::spawn(task.run()); - let (_, body) = unwrap_oms_send_msg!(oms_rx.next().await.unwrap()); - let body = body.to_vec(); + for _ in 0..6 { + if oms_mock_state.call_count() >= 1 { + break; + } + delay_for(Duration::from_secs(5)).await; + } + assert_eq!(oms_mock_state.call_count(), 1); + let call = oms_mock_state.pop_call().unwrap(); + + let body = call.1.to_vec(); let body = EnvelopeBody::decode(body.as_slice()).unwrap(); let msg = body.decode_part::(0).unwrap().unwrap(); + assert_eq!(msg.messages().len(), 1); - assert_eq!(msg.messages()[0].body, b"A"); + assert_eq!(msg.messages()[0].body, "two".as_bytes()); assert!(!spy.is_called()); assert_eq!(mock_state.call_count(), 2); let calls = mock_state.take_calls().await; - assert!(calls[0].contains("FetchMessages")); - assert!(calls[0].contains(node_identity.public_key().to_hex().as_str())); - assert!(calls[0].contains(format!("{:?}", since).as_str())); + + let fetch_call = calls.iter().find(|c| c.contains("FetchMessages")).unwrap(); + assert!(fetch_call.contains(node_identity.public_key().to_hex().as_str())); + assert!(fetch_call.contains(format!("{:?}", since).as_str())); + + let stored_messages = mock_state.get_messages().await; + + assert!(!stored_messages.iter().any(|s| s.body == msg1.as_bytes())); + assert!(stored_messages.iter().any(|s| s.body == msg2.as_bytes())); } #[tokio_macros::test_basic] @@ -689,13 +778,23 @@ mod test { .await .unwrap(); - let msg1 = ProtoStoredMessage::new(0, inbound_msg_a.dht_header.clone(), inbound_msg_a.body); - let msg2 = ProtoStoredMessage::new(0, inbound_msg_b.dht_header, inbound_msg_b.body); + let msg1_time = Utc::now() + .checked_sub_signed(OldDuration::from_std(Duration::from_secs(60)).unwrap()) + .unwrap(); + let msg1 = ProtoStoredMessage::new(0, inbound_msg_a.dht_header.clone(), inbound_msg_a.body, msg1_time); + let msg2_time = Utc::now() + .checked_sub_signed(OldDuration::from_std(Duration::from_secs(30)).unwrap()) + .unwrap(); + let msg2 = ProtoStoredMessage::new(0, inbound_msg_b.dht_header, inbound_msg_b.body, msg2_time); + // Cleartext message let clear_msg = wrap_in_envelope_body!(b"Clear".to_vec()).to_encoded_bytes(); let clear_header = make_dht_inbound_message(&node_identity, clear_msg.clone(), DhtMessageFlags::empty(), false).dht_header; - let msg_clear = ProtoStoredMessage::new(0, clear_header, clear_msg); + let msg_clear_time = Utc::now() + .checked_sub_signed(OldDuration::from_std(Duration::from_secs(120)).unwrap()) + .unwrap(); + let msg_clear = ProtoStoredMessage::new(0, clear_header, clear_msg, msg_clear_time); let mut message = DecryptedDhtMessage::succeeded( wrap_in_envelope_body!(StoredMessagesResponse { messages: vec![msg1.clone(), msg2, msg_clear], @@ -712,15 +811,21 @@ mod test { ); message.dht_header.message_type = DhtMessageType::SafStoredMessages; - let (dht_requester, mock) = create_dht_actor_mock(1); + let (mut dht_requester, mock) = create_dht_actor_mock(1); rt_handle.spawn(mock.run()); let (saf_response_signal_sender, mut saf_response_signal_receiver) = mpsc::channel(20); + assert!(dht_requester + .get_metadata::>(DhtMetadataKey::LastSafMessageReceived) + .await + .unwrap() + .is_none()); + let task = MessageHandlerTask::new( Default::default(), spy.to_service::(), requester, - dht_requester, + dht_requester.clone(), peer_manager, OutboundMessageRequester::new(oms_tx), node_identity, @@ -746,5 +851,13 @@ mod test { timeout = Duration::from_secs(20) ); assert_eq!(signals.len(), 1); + + let last_saf_received = dht_requester + .get_metadata::>(DhtMetadataKey::LastSafMessageReceived) + .await + .unwrap() + .unwrap(); + + assert_eq!(last_saf_received, msg2_time); } } diff --git a/comms/dht/src/store_forward/service.rs b/comms/dht/src/store_forward/service.rs index c96d4311cb..5d06d85d56 100644 --- a/comms/dht/src/store_forward/service.rs +++ b/comms/dht/src/store_forward/service.rs @@ -43,7 +43,7 @@ use futures::{ StreamExt, }; use log::*; -use std::{cmp, convert::TryFrom, sync::Arc, time::Duration}; +use std::{convert::TryFrom, sync::Arc, time::Duration}; use tari_comms::{ connectivity::{ConnectivityEvent, ConnectivityEventRx, ConnectivityRequester}, peer_manager::{NodeId, PeerFeatures}, @@ -76,7 +76,7 @@ impl FetchStoredMessageQuery { } } - pub fn since(&mut self, since: DateTime) -> &mut Self { + pub fn with_messages_since(&mut self, since: DateTime) -> &mut Self { self.since = Some(since); self } @@ -85,6 +85,10 @@ impl FetchStoredMessageQuery { self.response_type = response_type; self } + + pub fn since(&self) -> Option> { + self.since + } } #[derive(Debug)] @@ -92,6 +96,7 @@ pub enum StoreAndForwardRequest { FetchMessages(FetchStoredMessageQuery, oneshot::Sender>>), InsertMessage(NewStoredMessage, oneshot::Sender>), RemoveMessages(Vec), + RemoveMessagesOlderThan(DateTime), SendStoreForwardRequestToPeer(Box), SendStoreForwardRequestNeighbours, } @@ -132,6 +137,14 @@ impl StoreAndForwardRequester { Ok(()) } + pub async fn remove_messages_older_than(&mut self, threshold: DateTime) -> SafResult<()> { + self.sender + .send(StoreAndForwardRequest::RemoveMessagesOlderThan(threshold)) + .await + .map_err(|_| StoreAndForwardError::RequesterChannelClosed)?; + Ok(()) + } + pub async fn request_saf_messages_from_peer(&mut self, node_id: NodeId) -> SafResult<()> { self.sender .send(StoreAndForwardRequest::SendStoreForwardRequestToPeer(Box::new(node_id))) @@ -297,6 +310,12 @@ impl StoreAndForwardService { ); } }, + RemoveMessagesOlderThan(threshold) => { + match self.database.delete_messages_older_than(threshold.naive_utc()).await { + Ok(_) => trace!(target: LOG_TARGET, "Removed messages older than {}", threshold), + Err(err) => error!(target: LOG_TARGET, "RemoveMessage failed because '{:?}'", err), + } + }, } } @@ -382,9 +401,9 @@ impl StoreAndForwardService { async fn get_saf_request(&mut self) -> SafResult { let request = self .dht_requester - .get_metadata(DhtMetadataKey::OfflineTimestamp) + .get_metadata(DhtMetadataKey::LastSafMessageReceived) .await? - .map(|t| StoredMessagesRequest::since(cmp::min(t, since_utc(self.config.saf_minimum_request_period)))) + .map(StoredMessagesRequest::since) .unwrap_or_else(StoredMessagesRequest::new); Ok(request) @@ -490,7 +509,3 @@ fn since(period: Duration) -> NaiveDateTime { .checked_sub_signed(period) .expect("period overflowed when used with checked_sub_signed") } - -fn since_utc(period: Duration) -> DateTime { - DateTime::::from_utc(since(period), Utc) -} diff --git a/comms/dht/src/store_forward/store.rs b/comms/dht/src/store_forward/store.rs index 32144df2a7..4393f36518 100644 --- a/comms/dht/src/store_forward/store.rs +++ b/comms/dht/src/store_forward/store.rs @@ -226,7 +226,7 @@ where S: Service + Se } if message.dht_header.message_type.is_saf_message() { - log_not_eligible("it is a SAF message"); + log_not_eligible("it is a SAF protocol message"); return Ok(None); } diff --git a/comms/dht/src/test_utils/store_and_forward_mock.rs b/comms/dht/src/test_utils/store_and_forward_mock.rs index 6a623a5764..0dd464c43a 100644 --- a/comms/dht/src/test_utils/store_and_forward_mock.rs +++ b/comms/dht/src/test_utils/store_and_forward_mock.rs @@ -83,7 +83,9 @@ impl StoreAndForwardMockState { } pub async fn take_calls(&self) -> Vec { - self.calls.write().await.drain(..).collect() + let calls = self.calls.write().await.drain(..).collect(); + self.call_count.store(0, Ordering::SeqCst); + calls } } @@ -115,9 +117,16 @@ impl StoreAndForwardMock { trace!(target: LOG_TARGET, "StoreAndForwardMock received request {:?}", req); self.state.add_call(&req).await; match req { - FetchMessages(_, reply_tx) => { + FetchMessages(request, reply_tx) => { + let since = request.since().unwrap(); + let msgs = self.state.stored_messages.read().await; - let _ = reply_tx.send(Ok(msgs.clone())); + + let _ = reply_tx.send(Ok(msgs + .clone() + .drain(..) + .filter(|m| m.stored_at >= since.naive_utc()) + .collect())); }, InsertMessage(msg, reply_tx) => { self.state.stored_messages.write().await.push(StoredMessage { @@ -143,6 +152,13 @@ impl StoreAndForwardMock { }, SendStoreForwardRequestToPeer(_) => {}, SendStoreForwardRequestNeighbours => {}, + RemoveMessagesOlderThan(threshold) => { + self.state + .stored_messages + .write() + .await + .retain(|msg| msg.stored_at >= threshold.naive_utc()); + }, } } } diff --git a/comms/rpc_macros/Cargo.toml b/comms/rpc_macros/Cargo.toml index f875bac4d4..3680ed81f9 100644 --- a/comms/rpc_macros/Cargo.toml +++ b/comms/rpc_macros/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [lib] diff --git a/comms/rpc_macros/src/generator.rs b/comms/rpc_macros/src/generator.rs index 0253b6a55f..f3e8cbffd1 100644 --- a/comms/rpc_macros/src/generator.rs +++ b/comms/rpc_macros/src/generator.rs @@ -196,7 +196,8 @@ impl RpcCodeGenerator { let client_struct_body = quote! { pub async fn connect(framed: #dep_mod::CanonicalFraming) -> Result where TSubstream: #dep_mod::AsyncRead + #dep_mod::AsyncWrite + Unpin + Send + 'static { - let inner = #dep_mod::RpcClient::connect(Default::default(), framed).await?; + use #dep_mod::NamedProtocolService; + let inner = #dep_mod::RpcClient::connect(Default::default(), framed, Self::PROTOCOL_NAME.into()).await?; Ok(Self { inner }) } @@ -210,6 +211,10 @@ impl RpcCodeGenerator { self.inner.get_last_request_latency().await } + pub async fn ping(&mut self) -> Result { + self.inner.ping().await + } + pub fn close(&mut self) { self.inner.close(); } @@ -234,6 +239,12 @@ impl RpcCodeGenerator { Self { inner } } } + + impl #dep_mod::RpcPoolClient for #client_struct { + fn is_connected(&self) -> bool { + self.inner.is_connected() + } + } } } } diff --git a/comms/src/bounded_executor.rs b/comms/src/bounded_executor.rs index 239b938b2e..ee65e68476 100644 --- a/comms/src/bounded_executor.rs +++ b/comms/src/bounded_executor.rs @@ -28,6 +28,7 @@ use tokio::{ sync::{OwnedSemaphorePermit, Semaphore}, task::JoinHandle, }; +use tracing::{span, Instrument, Level}; /// Error emitted from [`try_spawn`](self::BoundedExecutor::try_spawn) when there are no tasks available #[derive(Debug)] @@ -40,6 +41,7 @@ pub struct TrySpawnError; pub struct BoundedExecutor { inner: runtime::Handle, semaphore: Arc, + max_available: usize, } impl BoundedExecutor { @@ -47,6 +49,7 @@ impl BoundedExecutor { Self { inner: executor, semaphore: Arc::new(Semaphore::new(num_permits)), + max_available: num_permits, } } @@ -70,12 +73,18 @@ impl BoundedExecutor { self.num_available() > 0 } - /// Returns the number tasks that can be spawned on this executor without blocking. + /// Returns the remaining number of tasks that can be spawned on this executor without waiting. #[inline] pub fn num_available(&self) -> usize { self.semaphore.available_permits() } + /// Returns the maximum number of concurrent tasks that can be spawned on this executor without waiting. + #[inline] + pub fn max_available(&self) -> usize { + self.max_available + } + pub fn try_spawn(&self, future: F) -> Result, TrySpawnError> where F: Future + Send + 'static, @@ -135,7 +144,8 @@ impl BoundedExecutor { F: Future + Send + 'static, F::Output: Send + 'static, { - let permit = self.semaphore.clone().acquire_owned().await; + let span = span!(Level::TRACE, "bounded_executor::waiting_time"); + let permit = self.semaphore.clone().acquire_owned().instrument(span).await; self.do_spawn(permit, future) } @@ -145,7 +155,8 @@ impl BoundedExecutor { F::Output: Send + 'static, { self.inner.spawn(async move { - let ret = future.await; + let span = span!(Level::TRACE, "bounded_executor::do_work"); + let ret = future.instrument(span).await; // Task is finished, release the permit drop(permit); ret diff --git a/comms/src/builder/error.rs b/comms/src/builder/error.rs index b213fd09f0..a5df9ed2a9 100644 --- a/comms/src/builder/error.rs +++ b/comms/src/builder/error.rs @@ -36,7 +36,7 @@ pub enum CommsBuilderError { ConnectionManagerError(#[from] ConnectionManagerError), #[error("Node identity not set. Call `with_node_identity(node_identity)` on [CommsBuilder]")] NodeIdentityNotSet, - #[error("Shutdown signa not set. Call `with_shutdown_signal(shutdown_signal)` on [CommsBuilder]")] + #[error("Shutdown signal not set. Call `with_shutdown_signal(shutdown_signal)` on [CommsBuilder]")] ShutdownSignalNotSet, #[error("The PeerStorage was not provided to the CommsBuilder. Use `with_peer_storage` to set it.")] PeerStorageNotProvided, diff --git a/comms/src/connection_manager/dialer.rs b/comms/src/connection_manager/dialer.rs index 26371c009b..538ffe89bd 100644 --- a/comms/src/connection_manager/dialer.rs +++ b/comms/src/connection_manager/dialer.rs @@ -55,6 +55,7 @@ use log::*; use std::{collections::HashMap, sync::Arc, time::Duration}; use tari_shutdown::{Shutdown, ShutdownSignal}; use tokio::{task::JoinHandle, time}; +use tracing::{self, span, Instrument, Level}; const LOG_TARGET: &str = "comms::connection_manager::dialer"; @@ -254,6 +255,7 @@ where }); } + #[tracing::instrument(skip(self, pending_dials, reply_tx))] fn handle_dial_peer_request( &mut self, pending_dials: &mut DialFuturesUnordered, @@ -281,6 +283,7 @@ where let noise_config = self.noise_config.clone(); let config = self.config.clone(); + let span = span!(Level::TRACE, "handle_dial_peer_request_inner1"); let dial_fut = async move { let (dial_state, dial_result) = Self::dial_peer_with_retry(dial_state, noise_config, transport, backoff, &config).await; @@ -314,7 +317,8 @@ where }, Err(err) => (dial_state, Err(err)), } - }; + } + .instrument(span); pending_dials.push(dial_fut.boxed()); } @@ -335,6 +339,7 @@ where } #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip(peer_manager, socket, conn_man_notifier, config, cancel_signal), err)] async fn perform_socket_upgrade_procedure( peer_manager: Arc, node_identity: Arc, @@ -419,6 +424,7 @@ where ) } + #[tracing::instrument(skip(dial_state, noise_config, transport, backoff, config))] async fn dial_peer_with_retry( dial_state: DialState, noise_config: NoiseConfig, @@ -514,7 +520,7 @@ where .map_err(|_| ConnectionManagerError::WireFormatSendFailed)?; let noise_socket = time::timeout( - Duration::from_secs(30), + Duration::from_secs(40), noise_config.upgrade_socket(socket, ConnectionDirection::Outbound), ) .await diff --git a/comms/src/connection_manager/error.rs b/comms/src/connection_manager/error.rs index ebbf605b8d..f7bbbaa564 100644 --- a/comms/src/connection_manager/error.rs +++ b/comms/src/connection_manager/error.rs @@ -27,6 +27,7 @@ use crate::{ }; use futures::channel::mpsc; use thiserror::Error; +use tokio::{time, time::Elapsed}; #[derive(Debug, Error, Clone)] pub enum ConnectionManagerError { @@ -110,4 +111,12 @@ pub enum PeerConnectionError { InternalRequestSendFailed(#[from] mpsc::SendError), #[error("Protocol error: {0}")] ProtocolError(#[from] ProtocolError), + #[error("Protocol negotiation timeout")] + ProtocolNegotiationTimeout, +} + +impl From for PeerConnectionError { + fn from(_: Elapsed) -> Self { + PeerConnectionError::ProtocolNegotiationTimeout + } } diff --git a/comms/src/connection_manager/listener.rs b/comms/src/connection_manager/listener.rs index e192e9a66d..60ae3c2d12 100644 --- a/comms/src/connection_manager/listener.rs +++ b/comms/src/connection_manager/listener.rs @@ -30,7 +30,11 @@ use super::{ }; use crate::{ bounded_executor::BoundedExecutor, - connection_manager::{liveness::LivenessSession, types::OneshotTrigger, wire_mode::WireMode}, + connection_manager::{ + liveness::LivenessSession, + types::OneshotTrigger, + wire_mode::{WireMode, LIVENESS_WIRE_MODE}, + }, multiaddr::Multiaddr, multiplexing::Yamux, noise::NoiseConfig, @@ -38,6 +42,7 @@ use crate::{ protocol::ProtocolId, runtime, transports::Transport, + types::CommsPublicKey, utils::multiaddr::multiaddr_to_socketaddr, PeerManager, }; @@ -56,14 +61,17 @@ use log::*; use std::{ convert::TryInto, future::Future, + io::{Error, ErrorKind}, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::Duration, }; +use tari_crypto::tari_utilities::hex::Hex; use tari_shutdown::ShutdownSignal; use tokio::time; +use tracing::{span, Instrument, Level}; const LOG_TARGET: &str = "comms::connection_manager::listener"; @@ -168,22 +176,34 @@ where } } - async fn read_wire_format(socket: &mut TTransport::Output, time_to_first_byte: Duration) -> Option { + async fn read_wire_format( + socket: &mut TTransport::Output, + time_to_first_byte: Duration, + ) -> Result { let mut buf = [0u8; 1]; - match time::timeout(time_to_first_byte, socket.read_exact(&mut buf)) - .await - .ok()? - { - Ok(_) => match buf[0].try_into().ok() { - Some(wf) => Some(wf), - None => { - warn!(target: LOG_TARGET, "Invalid wire format byte '{}'", buf[0]); - None + match time::timeout(time_to_first_byte, socket.read_exact(&mut buf)).await { + Ok(result) => match result { + Ok(_) => match buf[0].try_into().ok() { + Some(wf) => Ok(wf), + None => { + warn!(target: LOG_TARGET, "Invalid wire format byte '{}'", buf[0]); + Err(ErrorKind::InvalidData.into()) + }, + }, + Err(err) => { + warn!( + target: LOG_TARGET, + "Failed to read wire format byte due to error: {}", err + ); + Err(err) }, }, - Err(err) => { - warn!(target: LOG_TARGET, "Failed to read first byte: {}", err); - None + Err(elapsed) => { + warn!( + target: LOG_TARGET, + "Failed to read wire format byte within timeout of {:#?}. {}", time_to_first_byte, elapsed + ); + Err(elapsed.into()) }, } } @@ -225,14 +245,15 @@ where let liveness_session_count = self.liveness_session_count.clone(); let shutdown_signal = self.shutdown_signal.clone(); + let span = span!(Level::TRACE, "connection_mann::listener::inbound_task",); let inbound_fut = async move { match Self::read_wire_format(&mut socket, config.time_to_first_byte).await { - Some(WireMode::Comms(byte)) if byte == config.network_info.network_byte => { + Ok(WireMode::Comms(byte)) if byte == config.network_info.network_byte => { let this_node_id_str = node_identity.node_id().short_str(); let result = Self::perform_socket_upgrade_procedure( node_identity, peer_manager, - noise_config, + noise_config.clone(), conn_man_notifier.clone(), socket, peer_addr, @@ -268,16 +289,21 @@ where }, } }, - Some(WireMode::Comms(byte)) => { + Ok(WireMode::Comms(byte)) => { + // TODO: This call is expensive and only added for the benefit of improved logging and may lead to + // TODO: DoS attacks. Remove later when not needed anymore or make it optional with a config file + // TODO: setting. + let public_key = Self::remote_public_key_from_socket(socket, noise_config).await; warn!( target: LOG_TARGET, - "Peer at address '{}' sent invalid wire format byte. Expected {:x?} got: {:x?} ", + "Peer at address '{}' ({}) sent invalid wire format byte. Expected {:x?} got: {:x?} ", peer_addr, + public_key, config.network_info.network_byte, byte, ); }, - Some(WireMode::Liveness) => { + Ok(WireMode::Liveness) => { if liveness_session_count.load(Ordering::SeqCst) > 0 && Self::is_address_in_liveness_cidr_range(&peer_addr, &config.liveness_cidr_allowlist) { @@ -295,20 +321,52 @@ where let _ = socket.close().await; } }, - None => { + Err(err) => { warn!( target: LOG_TARGET, - "Peer at address '{}' failed to send valid wire format", peer_addr + "Peer at address '{}' failed to send its wire format. Expected network byte {:x?} or liveness \ + byte {:x?} not received. Error: {}", + peer_addr, + config.network_info.network_byte, + LIVENESS_WIRE_MODE, + err ); }, } - }; + } + .instrument(span); // This will block (asynchronously) if we have reached the maximum simultaneous connections, creating // back-pressure on nodes connecting to this node self.bounded_executor.spawn(inbound_fut).await; } + async fn remote_public_key_from_socket(socket: TTransport::Output, noise_config: NoiseConfig) -> String { + let public_key: Option = match time::timeout( + Duration::from_secs(30), + noise_config.upgrade_socket(socket, ConnectionDirection::Inbound), + ) + .await + .map_err(|_| ConnectionManagerError::NoiseProtocolTimeout) + { + Ok(Ok(noise_socket)) => { + match noise_socket + .get_remote_public_key() + .ok_or(ConnectionManagerError::InvalidStaticPublicKey) + { + Ok(pk) => Some(pk), + _ => None, + } + }, + _ => None, + }; + + match public_key { + None => "public key not known".to_string(), + Some(pk) => pk.to_hex(), + } + } + #[allow(clippy::too_many_arguments)] async fn perform_socket_upgrade_procedure( node_identity: Arc, diff --git a/comms/src/connection_manager/manager.rs b/comms/src/connection_manager/manager.rs index a1a9bffe9d..928b53611f 100644 --- a/comms/src/connection_manager/manager.rs +++ b/comms/src/connection_manager/manager.rs @@ -50,6 +50,7 @@ use std::{fmt, sync::Arc}; use tari_shutdown::{Shutdown, ShutdownSignal}; use time::Duration; use tokio::{sync::broadcast, task, time}; +use tracing::{span, Instrument, Level}; const LOG_TARGET: &str = "comms::connection_manager::manager"; @@ -94,14 +95,14 @@ pub struct ConnectionManagerConfig { /// The number of dial attempts to make before giving up. Default: 3 pub max_dial_attempts: usize, /// The maximum number of connection tasks that will be spawned at the same time. Once this limit is reached, peers - /// attempting to connect will have to wait for another connection attempt to complete. Default: 20 + /// attempting to connect will have to wait for another connection attempt to complete. Default: 100 pub max_simultaneous_inbound_connects: usize, /// Set to true to allow peers to send loopback, local-link and other addresses normally not considered valid for /// peer-to-peer comms. Default: false pub allow_test_addresses: bool, /// Version information for this node pub network_info: NodeNetworkInfo, - /// The maximum time to wait for the first byte before closing the connection. Default: 7s + /// The maximum time to wait for the first byte before closing the connection. Default: 45s pub time_to_first_byte: Duration, /// The number of liveness check sessions to allow. Default: 0 pub liveness_max_sessions: usize, @@ -121,8 +122,8 @@ impl Default for ConnectionManagerConfig { .expect("DEFAULT_LISTENER_ADDRESS is malformed"), #[cfg(test)] listener_address: "/memory/0".parse().unwrap(), - max_dial_attempts: 3, - max_simultaneous_inbound_connects: 20, + max_dial_attempts: 1, + max_simultaneous_inbound_connects: 100, network_info: Default::default(), #[cfg(not(test))] allow_test_addresses: false, @@ -130,7 +131,7 @@ impl Default for ConnectionManagerConfig { #[cfg(test)] allow_test_addresses: true, liveness_max_sessions: 0, - time_to_first_byte: Duration::from_secs(7), + time_to_first_byte: Duration::from_secs(45), liveness_cidr_allowlist: vec![cidr::AnyIpCidr::V4("127.0.0.1/32".parse().unwrap())], auxilary_tcp_listener_address: None, } @@ -258,6 +259,8 @@ where } pub async fn run(mut self) { + let span = span!(Level::DEBUG, "comms::connection_manager::run"); + let _enter = span.enter(); let mut shutdown = self .shutdown_signal .take() @@ -350,7 +353,16 @@ where use ConnectionManagerRequest::*; trace!(target: LOG_TARGET, "Connection manager got request: {:?}", request); match request { - DialPeer(node_id, reply) => self.dial_peer(node_id, reply).await, + DialPeer { + node_id, + reply_tx, + tracing_id: _tracing, + } => { + let span = span!(Level::TRACE, "connection_manager::handle_request"); + // This causes a panic for some reason? + // span.follows_from(tracing_id); + self.dial_peer(node_id, reply_tx).instrument(span).await + }, CancelDial(node_id) => { if let Err(err) = self.dialer_tx.send(DialerRequest::CancelPendingDial(node_id)).await { error!( @@ -392,15 +404,25 @@ where node_id.short_str(), proto_str ); - if let Err(err) = self + let notify_fut = self .protocols - .notify(&protocol, ProtocolEvent::NewInboundSubstream(*node_id, stream)) - .await - { - error!( - target: LOG_TARGET, - "Error sending NewSubstream notification for protocol '{}' because '{:?}'", proto_str, err - ); + .notify(&protocol, ProtocolEvent::NewInboundSubstream(*node_id, stream)); + match time::timeout(Duration::from_secs(10), notify_fut).await { + Ok(Err(err)) => { + error!( + target: LOG_TARGET, + "Error sending NewSubstream notification for protocol '{}' because '{:?}'", proto_str, err + ); + }, + Err(err) => { + error!( + target: LOG_TARGET, + "Error sending NewSubstream notification for protocol '{}' because {}", proto_str, err + ); + }, + _ => { + debug!(target: LOG_TARGET, "Protocol notification for '{}' sent", proto_str); + }, } }, @@ -422,6 +444,7 @@ where let _ = self.connection_manager_events_tx.send(Arc::new(event)); } + #[tracing::instrument(skip(self, reply))] async fn dial_peer( &mut self, node_id: NodeId, diff --git a/comms/src/connection_manager/peer_connection.rs b/comms/src/connection_manager/peer_connection.rs index 3ede6ef5fc..6f0d90da5d 100644 --- a/comms/src/connection_manager/peer_connection.rs +++ b/comms/src/connection_manager/peer_connection.rs @@ -21,7 +21,15 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #[cfg(feature = "rpc")] -use crate::protocol::rpc::{NamedProtocolService, RpcClient, RpcClientBuilder, RpcError, RPC_MAX_FRAME_SIZE}; +use crate::protocol::rpc::{ + NamedProtocolService, + RpcClient, + RpcClientBuilder, + RpcClientPool, + RpcError, + RpcPoolClient, + RPC_MAX_FRAME_SIZE, +}; use super::{ error::{ConnectionManagerError, PeerConnectionError}, @@ -49,7 +57,8 @@ use std::{ sync::atomic::{AtomicUsize, Ordering}, time::{Duration, Instant}, }; -use tari_shutdown::Shutdown; +use tokio::time; +use tracing::{self, span, Instrument, Level, Span}; const LOG_TARGET: &str = "comms::connection_manager::peer_connection"; @@ -103,10 +112,11 @@ pub fn create( #[derive(Debug)] pub enum PeerConnectionRequest { /// Open a new substream and negotiate the given protocol - OpenSubstream( - ProtocolId, - oneshot::Sender, PeerConnectionError>>, - ), + OpenSubstream { + protocol_id: ProtocolId, + reply_tx: oneshot::Sender, PeerConnectionError>>, + tracing_id: Option, + }, /// Disconnect all substreams and close the transport connection Disconnect(bool, oneshot::Sender>), } @@ -180,19 +190,25 @@ impl PeerConnection { self.substream_counter.get() } + #[tracing::instrument("peer_connection::open_substream", skip(self), err)] pub async fn open_substream( &mut self, protocol_id: &ProtocolId, ) -> Result, PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx - .send(PeerConnectionRequest::OpenSubstream(protocol_id.clone(), reply_tx)) + .send(PeerConnectionRequest::OpenSubstream { + protocol_id: protocol_id.clone(), + reply_tx, + tracing_id: Span::current().id(), + }) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } + #[tracing::instrument("peer_connection::open_framed_substream", skip(self), err)] pub async fn open_framed_substream( &mut self, protocol_id: &ProtocolId, @@ -203,23 +219,39 @@ impl PeerConnection { } #[cfg(feature = "rpc")] + #[tracing::instrument("peer_connection::connect_rpc", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()), err)] pub async fn connect_rpc(&mut self) -> Result where T: From + NamedProtocolService { self.connect_rpc_using_builder(Default::default()).await } #[cfg(feature = "rpc")] + #[tracing::instrument("peer_connection::connect_rpc_with_builder", skip(self, builder), err)] pub async fn connect_rpc_using_builder(&mut self, builder: RpcClientBuilder) -> Result where T: From + NamedProtocolService { - let protocol = T::PROTOCOL_NAME; + let protocol = ProtocolId::from_static(T::PROTOCOL_NAME); debug!( target: LOG_TARGET, "Attempting to establish RPC protocol `{}` to peer `{}`", - String::from_utf8_lossy(protocol), + String::from_utf8_lossy(&protocol), self.peer_node_id ); - let framed = self.open_framed_substream(&protocol.into(), RPC_MAX_FRAME_SIZE).await?; - builder.connect(framed).await + let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?; + builder.with_protocol_id(protocol).connect(framed).await + } + + /// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to + /// `max_sessions` sessions and provides client session that is least used. + #[cfg(feature = "rpc")] + pub fn create_rpc_client_pool( + &self, + max_sessions: usize, + client_config: RpcClientBuilder, + ) -> RpcClientPool + where + T: RpcPoolClient + From + NamedProtocolService + Clone, + { + RpcClientPool::new(self.clone(), max_sessions, client_config) } /// Immediately disconnects the peer connection. This can only fail if the peer connection worker @@ -272,12 +304,10 @@ struct PeerConnectionActor { request_rx: Fuse>, direction: ConnectionDirection, incoming_substreams: Fuse, - substream_shutdown: Option, control: Control, event_notifier: mpsc::Sender, our_supported_protocols: Vec, their_supported_protocols: Vec, - shutdown: bool, } impl PeerConnectionActor { @@ -298,10 +328,8 @@ impl PeerConnectionActor { direction, control: connection.get_yamux_control(), incoming_substreams: connection.incoming().fuse(), - substream_shutdown: None, request_rx: request_rx.fuse(), event_notifier, - shutdown: false, our_supported_protocols, their_supported_protocols, } @@ -328,22 +356,26 @@ impl PeerConnectionActor { None => { debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str()); let _ = self.disconnect(false).await; + break; }, } } } - - if self.shutdown { - break; - } } + self.request_rx.get_mut().close(); } async fn handle_request(&mut self, request: PeerConnectionRequest) { use PeerConnectionRequest::*; match request { - OpenSubstream(proto, reply_tx) => { - let result = self.open_negotiated_protocol_stream(proto).await; + OpenSubstream { + protocol_id, + reply_tx, + tracing_id, + } => { + let span = span!(Level::TRACE, "handle_request"); + span.follows_from(tracing_id); + let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await; log_if_error_fmt!( target: LOG_TARGET, reply_tx.send(result), @@ -364,6 +396,7 @@ impl PeerConnectionActor { } } + #[tracing::instrument(skip(self, stream), err, fields(comms.direction="inbound"))] async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> { let selected_protocol = ProtocolNegotiation::new(&mut stream) .negotiate_protocol_inbound(&self.our_supported_protocols) @@ -379,10 +412,12 @@ impl PeerConnectionActor { Ok(()) } + #[tracing::instrument(skip(self), err)] async fn open_negotiated_protocol_stream( &mut self, protocol: ProtocolId, ) -> Result, PeerConnectionError> { + const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10); debug!( target: LOG_TARGET, "[{}] Negotiating protocol '{}' on new substream for peer '{}'", @@ -395,9 +430,12 @@ impl PeerConnectionActor { let mut negotiation = ProtocolNegotiation::new(&mut stream); let selected_protocol = if self.their_supported_protocols.contains(&protocol) { - negotiation.negotiate_protocol_outbound_optimistic(&protocol).await? + let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol); + time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? } else { - negotiation.negotiate_protocol_outbound(&[protocol]).await? + let selected_protocols = [protocol]; + let fut = negotiation.negotiate_protocol_outbound(&selected_protocols); + time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? }; Ok(NegotiatedSubstream::new(selected_protocol, stream)) @@ -417,7 +455,13 @@ impl PeerConnectionActor { /// /// silent - true to suppress the PeerDisconnected event, false to publish the event async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> { - let mut error = None; + if !silent { + self.notify_event(ConnectionManagerEvent::PeerDisconnected(Box::new( + self.peer_node_id.clone(), + ))) + .await; + } + if let Err(err) = self.control.close().await { warn!( target: LOG_TARGET, @@ -426,28 +470,16 @@ impl PeerConnectionActor { self.peer_node_id.short_str(), err ); - error = Some(err); + return Err(err.into()); } + debug!( target: LOG_TARGET, "(Peer = {}) Connection closed", self.peer_node_id.short_str() ); - self.shutdown = true; - // Shut down the incoming substream task - if let Some(shutdown) = self.substream_shutdown.as_mut() { - let _ = shutdown.trigger(); - } - - if !silent { - self.notify_event(ConnectionManagerEvent::PeerDisconnected(Box::new( - self.peer_node_id.clone(), - ))) - .await; - } - - error.map(Into::into).map(Err).unwrap_or(Ok(())) + Ok(()) } } diff --git a/comms/src/connection_manager/requester.rs b/comms/src/connection_manager/requester.rs index 3b86a88bc2..1f3f5cc887 100644 --- a/comms/src/connection_manager/requester.rs +++ b/comms/src/connection_manager/requester.rs @@ -36,7 +36,11 @@ use tokio::sync::broadcast; #[derive(Debug)] pub enum ConnectionManagerRequest { /// Dial a given peer by node id. - DialPeer(NodeId, oneshot::Sender>), + DialPeer { + node_id: NodeId, + reply_tx: oneshot::Sender>, + tracing_id: Option, + }, /// Cancels a pending dial if one exists CancelDial(NodeId), /// Register a oneshot to get triggered when the node is listening, or has failed to listen @@ -74,9 +78,10 @@ impl ConnectionManagerRequester { } /// Attempt to connect to a remote peer + #[tracing::instrument(skip(self), err)] pub async fn dial_peer(&mut self, node_id: NodeId) -> Result { let (reply_tx, reply_rx) = oneshot::channel(); - self.send_dial_peer(node_id, reply_tx).await?; + self.send_dial_peer(node_id, Some(reply_tx)).await?; reply_rx .await .map_err(|_| ConnectionManagerError::ActorRequestCanceled)? @@ -92,22 +97,36 @@ impl ConnectionManagerRequester { } /// Send instruction to ConnectionManager to dial a peer and return the result on the given oneshot + #[tracing::instrument(skip(self, reply_tx), err)] pub(crate) async fn send_dial_peer( &mut self, node_id: NodeId, - reply_tx: oneshot::Sender>, + reply_tx: Option>>, ) -> Result<(), ConnectionManagerError> { + let tracing_id; + let reply_tx = if let Some(r) = reply_tx { + tracing_id = tracing::Span::current().id(); + r + } else { + let (tx, _) = oneshot::channel(); + tracing_id = None; + tx + }; self.sender - .send(ConnectionManagerRequest::DialPeer(node_id, reply_tx)) + .send(ConnectionManagerRequest::DialPeer { + node_id, + reply_tx, + tracing_id, + }) .await .map_err(|_| ConnectionManagerError::SendToActorFailed)?; Ok(()) } /// Send instruction to ConnectionManager to dial a peer without waiting for a result. + #[tracing::instrument(skip(self), err)] pub(crate) async fn send_dial_peer_no_reply(&mut self, node_id: NodeId) -> Result<(), ConnectionManagerError> { - let (reply_tx, _) = oneshot::channel(); - self.send_dial_peer(node_id, reply_tx).await?; + self.send_dial_peer(node_id, None).await?; Ok(()) } diff --git a/comms/src/connection_manager/wire_mode.rs b/comms/src/connection_manager/wire_mode.rs index e2421c078b..d8b5cac5d0 100644 --- a/comms/src/connection_manager/wire_mode.rs +++ b/comms/src/connection_manager/wire_mode.rs @@ -22,7 +22,7 @@ use std::convert::TryFrom; -const LIVENESS_WIRE_MODE: u8 = 0x46; // E +pub(crate) const LIVENESS_WIRE_MODE: u8 = 0x46; // E pub enum WireMode { Comms(u8), diff --git a/comms/src/connectivity/manager.rs b/comms/src/connectivity/manager.rs index 308727a44a..35f37627c4 100644 --- a/comms/src/connectivity/manager.rs +++ b/comms/src/connectivity/manager.rs @@ -53,6 +53,7 @@ use std::{ }; use tari_shutdown::ShutdownSignal; use tokio::{sync::broadcast, task::JoinHandle, time}; +use tracing::{span, Instrument, Level}; const LOG_TARGET: &str = "comms::connectivity::manager"; @@ -124,6 +125,12 @@ impl ConnectivityStatus { } } +impl Default for ConnectivityStatus { + fn default() -> Self { + ConnectivityStatus::Initializing + } +} + impl fmt::Display for ConnectivityStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self) @@ -150,6 +157,7 @@ impl ConnectivityManagerActor { task::spawn(Self::run(self)) } + #[tracing::instrument(name = "connectivity_manager_actor::run", skip(self))] pub async fn run(mut self) { info!(target: LOG_TARGET, "ConnectivityManager started"); let mut shutdown_signal = self @@ -210,28 +218,41 @@ impl ConnectivityManagerActor { GetConnectivityStatus(reply) => { let _ = reply.send(self.status); }, - DialPeer(node_id, reply) => match self.pool.get(&node_id) { - Some(state) if state.is_connected() => { - debug!( - target: LOG_TARGET, - "Found existing connection for peer `{}`", - node_id.short_str() - ); - let _ = reply.send(Ok(state.connection().cloned().expect("Already checked"))); - }, - _ => { - debug!( - target: LOG_TARGET, - "No existing connection found for peer `{}`. Dialling...", - node_id.short_str() - ); - if let Err(err) = self.connection_manager.send_dial_peer(node_id, reply).await { - error!( - target: LOG_TARGET, - "Failed to send dial request to connection manager: {:?}", err - ); + DialPeer { + node_id, + reply_tx, + tracing_id, + } => { + let span = span!(Level::TRACE, "handle_request"); + // let _e = span.enter(); + span.follows_from(tracing_id); + async move { + match self.pool.get(&node_id) { + Some(state) if state.is_connected() => { + debug!( + target: LOG_TARGET, + "Found existing connection for peer `{}`", + node_id.short_str() + ); + let _ = reply_tx.send(Ok(state.connection().cloned().expect("Already checked"))); + }, + _ => { + debug!( + target: LOG_TARGET, + "No existing connection found for peer `{}`. Dialing...", + node_id.short_str() + ); + if let Err(err) = self.connection_manager.send_dial_peer(node_id, Some(reply_tx)).await { + error!( + target: LOG_TARGET, + "Failed to send dial request to connection manager: {:?}", err + ); + } + }, } - }, + } + .instrument(span) + .await }, AddManagedPeers(node_ids) => { self.add_managed_peers(node_ids).await; @@ -429,6 +450,7 @@ impl ConnectivityManagerActor { Ok(conns.into_iter().cloned().collect()) } + #[tracing::instrument(skip(self))] async fn add_managed_peers(&mut self, node_ids: Vec) { let pool = &mut self.pool; let mut should_update_connectivity = false; @@ -522,7 +544,7 @@ impl ConnectivityManagerActor { num_failed ); if self.peer_manager.set_offline(node_id, true).await? { - warn!( + debug!( target: LOG_TARGET, "Peer `{}` was marked as offline but was already offline.", node_id ); diff --git a/comms/src/connectivity/requester.rs b/comms/src/connectivity/requester.rs index b496f91ddc..740c8a6c81 100644 --- a/comms/src/connectivity/requester.rs +++ b/comms/src/connectivity/requester.rs @@ -44,6 +44,7 @@ use std::{ }; use tokio::{sync::broadcast, time}; const LOG_TARGET: &str = "comms::connectivity::requester"; +use tracing; pub type ConnectivityEventRx = broadcast::Receiver>; pub type ConnectivityEventTx = broadcast::Sender>; @@ -90,7 +91,11 @@ impl fmt::Display for ConnectivityEvent { #[derive(Debug)] pub enum ConnectivityRequest { WaitStarted(oneshot::Sender<()>), - DialPeer(NodeId, oneshot::Sender>), + DialPeer { + node_id: NodeId, + reply_tx: oneshot::Sender>, + tracing_id: Option, + }, GetConnectivityStatus(oneshot::Sender), AddManagedPeers(Vec), RemovePeer(NodeId), @@ -123,12 +128,17 @@ impl ConnectivityRequester { self.event_tx.clone() } + #[tracing::instrument(skip(self), err)] pub async fn dial_peer(&mut self, peer: NodeId) -> Result { let mut num_cancels = 0; loop { let (reply_tx, reply_rx) = oneshot::channel(); self.sender - .send(ConnectivityRequest::DialPeer(peer.clone(), reply_tx)) + .send(ConnectivityRequest::DialPeer { + node_id: peer.clone(), + reply_tx, + tracing_id: tracing::Span::current().id(), + }) .await .map_err(|_| ConnectivityError::ActorDisconnected)?; @@ -137,7 +147,7 @@ impl ConnectivityRequester { Err(err @ ConnectionManagerError::DialCancelled) => { num_cancels += 1; // Due to simultaneous dialing, it's possible for the dial to be cancelled. However, typically if - // dial is called right after, the resolved connection will be returned. + // dial is called again right after, the resolved connection will be returned. if num_cancels == 1 { continue; } diff --git a/comms/src/connectivity/test.rs b/comms/src/connectivity/test.rs index 09861490ed..a4fec1e896 100644 --- a/comms/src/connectivity/test.rs +++ b/comms/src/connectivity/test.rs @@ -110,7 +110,7 @@ async fn connecting_peers() { peers .iter() .cloned() - .map(|peer| create_peer_connection_mock_pair(1, peer, node_identity.to_peer())), + .map(|peer| create_peer_connection_mock_pair(peer, node_identity.to_peer())), ) .await .into_iter() @@ -144,7 +144,7 @@ async fn add_many_managed_peers() { let connections = future::join_all( (0..5) .map(|i| peers[i].clone()) - .map(|peer| create_peer_connection_mock_pair(1, node_identity.to_peer(), peer)), + .map(|peer| create_peer_connection_mock_pair(node_identity.to_peer(), peer)), ) .await .into_iter() @@ -218,7 +218,7 @@ async fn online_then_offline() { let client_connections = future::join_all( clients .iter() - .map(|peer| create_peer_connection_mock_pair(1, node_identity.to_peer(), peer.to_peer())), + .map(|peer| create_peer_connection_mock_pair(node_identity.to_peer(), peer.to_peer())), ) .await .into_iter() @@ -228,7 +228,7 @@ async fn online_then_offline() { let connections = future::join_all( (0..5) .map(|i| peers[i].clone()) - .map(|peer| create_peer_connection_mock_pair(1, node_identity.to_peer(), peer)), + .map(|peer| create_peer_connection_mock_pair(node_identity.to_peer(), peer)), ) .await .into_iter() @@ -308,7 +308,7 @@ async fn ban_peer() { let (mut connectivity, mut event_stream, node_identity, peer_manager, cm_mock_state, _shutdown) = setup_connectivity_manager(Default::default()); let peer = add_test_peers(&peer_manager, 1).await.pop().unwrap(); - let (conn, _, _, _) = create_peer_connection_mock_pair(1, node_identity.to_peer(), peer.clone()).await; + let (conn, _, _, _) = create_peer_connection_mock_pair(node_identity.to_peer(), peer.clone()).await; let mut events = collect_stream!(event_stream, take = 1, timeout = Duration::from_secs(10)); unpack_enum!(ConnectivityEvent::ConnectivityStateInitialized = &*events.remove(0).unwrap()); @@ -358,7 +358,7 @@ async fn peer_selection() { peers .iter() .cloned() - .map(|peer| create_peer_connection_mock_pair(1, peer, node_identity.to_peer())), + .map(|peer| create_peer_connection_mock_pair(peer, node_identity.to_peer())), ) .await .into_iter() diff --git a/comms/src/multiplexing/yamux.rs b/comms/src/multiplexing/yamux.rs index 1ec8a8de10..1ba104ce04 100644 --- a/comms/src/multiplexing/yamux.rs +++ b/comms/src/multiplexing/yamux.rs @@ -23,8 +23,6 @@ use crate::{connection_manager::ConnectionDirection, runtime}; use futures::{ channel::mpsc, - future, - future::Either, io::{AsyncRead, AsyncWrite}, stream::FusedStream, task::Context, @@ -32,9 +30,9 @@ use futures::{ Stream, StreamExt, }; -use log::*; use std::{future::Future, io, pin::Pin, sync::Arc, task::Poll}; use tari_shutdown::{Shutdown, ShutdownSignal}; +use tracing::{self, debug, error, event, Level}; use yamux::Mode; type IncomingRx = mpsc::Receiver; @@ -64,10 +62,7 @@ impl Yamux { }; let mut config = yamux::Config::default(); - // Use OnRead mode instead of OnReceive mode to provide back pressure to the sending side. - // Caveat: the OnRead mode has the risk of deadlock, where both sides send data larger than - // receive window and don't read before finishing writes. - // This should never happen as the window size should be large enough for all protocol messages. + config.set_window_update_mode(yamux::WindowUpdateMode::OnRead); // Because OnRead mode increases the RTT of window update, bigger buffer size and receive // window size perform better. @@ -99,7 +94,7 @@ impl Yamux { let (incoming_tx, incoming_rx) = mpsc::channel(10); let stream = yamux::into_stream(connection).boxed(); let incoming = IncomingWorker::new(stream, incoming_tx, shutdown.to_signal()); - runtime::current().spawn(incoming.run()); + runtime::task::spawn(incoming.run()); IncomingSubstreams::new(incoming_rx, counter, shutdown) } @@ -245,7 +240,7 @@ impl AsyncWrite for Substream { struct IncomingWorker { inner: S, sender: mpsc::Sender, - shutdown_signal: Option, + shutdown_signal: ShutdownSignal, } impl IncomingWorker @@ -255,55 +250,42 @@ where S: Stream> + Unpin Self { inner: stream, sender, - shutdown_signal: Some(shutdown_signal), + shutdown_signal, } } + #[tracing::instrument(name = "yamux::incoming_worker::run", skip(self))] pub async fn run(mut self) { - let mut signal = self.shutdown_signal.take(); - loop { - let either = future::select(self.inner.next(), signal.take().expect("cannot fail")).await; - match either { - Either::Left((Some(Err(err)), _)) => { - debug!( - target: LOG_TARGET, - "Incoming peer substream task received an error because '{}'", err - ); - break; - }, - // Received a substream result - Either::Left((Some(Ok(stream)), sig)) => { - signal = Some(sig); - if let Err(err) = self.sender.send(stream).await { - if err.is_disconnected() { - debug!( - target: LOG_TARGET, - "Incoming peer substream task is shutting down because the internal stream sender \ - channel was closed" - ); - break; - } + let mut mux_stream = self.inner.take_until(&mut self.shutdown_signal); + while let Some(result) = mux_stream.next().await { + match result { + Ok(stream) => { + event!(Level::TRACE, "yamux::stream received {}", stream); + if self.sender.send(stream).await.is_err() { + debug!( + target: LOG_TARGET, + "Incoming peer substream task is shutting down because the internal stream sender channel \ + was closed" + ); + break; } }, - // The substream closed - Either::Left((None, _)) => { - debug!( - target: LOG_TARGET, - "Incoming peer substream task is shutting down because the stream ended" + Err(err) => { + event!( + Level::ERROR, + "Incoming peer substream task received an error because '{}'", + err ); - break; - }, - // The shutdown signal was received - Either::Right((_, _)) => { - debug!( + error!( target: LOG_TARGET, - "Incoming peer substream task is shutting down because the shutdown signal was received" + "Incoming peer substream task received an error because '{}'", err ); break; }, } } + debug!(target: LOG_TARGET, "Incoming peer substream task is shutting down"); self.sender.close_channel(); } } diff --git a/comms/src/noise/config.rs b/comms/src/noise/config.rs index 7776ade335..30cab07c48 100644 --- a/comms/src/noise/config.rs +++ b/comms/src/noise/config.rs @@ -60,6 +60,7 @@ impl NoiseConfig { /// Upgrades the given socket to using the noise protocol. The upgraded socket and the peer's static key /// is returned. + #[tracing::instrument(name = "noise::upgrade_socket", skip(self, socket), err)] pub async fn upgrade_socket( &self, socket: TSocket, diff --git a/comms/src/noise/socket.rs b/comms/src/noise/socket.rs index 149328cf60..eaf02a60b0 100644 --- a/comms/src/noise/socket.rs +++ b/comms/src/noise/socket.rs @@ -513,7 +513,7 @@ pub struct Handshake { impl Handshake { pub fn new(socket: TSocket, state: HandshakeState) -> Self { Self { - socket: NoiseSocket::new(socket, Box::new(state).into()), + socket: NoiseSocket::new(socket, state.into()), } } } @@ -585,8 +585,8 @@ where TSocket: AsyncRead + AsyncWrite + Unpin #[derive(Debug)] enum NoiseState { - HandshakeState(Box), - TransportState(Box), + HandshakeState(HandshakeState), + TransportState(TransportState), } macro_rules! proxy_state_method { @@ -619,20 +619,20 @@ impl NoiseState { pub fn into_transport_mode(self) -> Result { match self { - NoiseState::HandshakeState(state) => Ok(NoiseState::TransportState(Box::new(state.into_transport_mode()?))), + NoiseState::HandshakeState(state) => Ok(NoiseState::TransportState(state.into_transport_mode()?)), _ => Err(snow::Error::State(StateProblem::HandshakeAlreadyFinished)), } } } -impl From> for NoiseState { - fn from(state: Box) -> Self { +impl From for NoiseState { + fn from(state: HandshakeState) -> Self { NoiseState::HandshakeState(state) } } -impl From> for NoiseState { - fn from(state: Box) -> Self { +impl From for NoiseState { + fn from(state: TransportState) -> Self { NoiseState::TransportState(state) } } @@ -662,8 +662,8 @@ mod test { let (dialer_socket, listener_socket) = MemorySocket::new_pair(); let (dialer, listener) = ( - NoiseSocket::new(dialer_socket, Box::new(dialer_session).into()), - NoiseSocket::new(listener_socket, Box::new(listener_session).into()), + NoiseSocket::new(dialer_socket, dialer_session.into()), + NoiseSocket::new(listener_socket, listener_session.into()), ); Ok(( diff --git a/comms/src/pipeline/inbound.rs b/comms/src/pipeline/inbound.rs index c2035cf9f0..0b2116bc37 100644 --- a/comms/src/pipeline/inbound.rs +++ b/comms/src/pipeline/inbound.rs @@ -71,6 +71,18 @@ where return; } let service = self.service.clone(); + + let num_available = self.executor.num_available(); + let max_available = self.executor.max_available(); + // Only emit this message if there is any concurrent usage + if num_available < max_available { + debug!( + target: LOG_TARGET, + "Inbound pipeline usage: {}/{}", + max_available - num_available, + max_available + ); + } // Call the service in it's own spawned task self.executor .spawn(async move { @@ -80,6 +92,7 @@ where }) .await; } + info!(target: LOG_TARGET, "Inbound pipeline terminated: the stream completed"); } } diff --git a/comms/src/protocol/identity.rs b/comms/src/protocol/identity.rs index df2900e1af..2c4eba1db5 100644 --- a/comms/src/protocol/identity.rs +++ b/comms/src/protocol/identity.rs @@ -34,10 +34,12 @@ use std::{io, time::Duration}; use thiserror::Error; use tokio::time; use tokio_util::codec::{Framed, LengthDelimitedCodec}; +use tracing; pub static IDENTITY_PROTOCOL: ProtocolId = ProtocolId::from_static(b"t/identity/1.0"); const LOG_TARGET: &str = "comms::protocol::identity"; +#[tracing::instrument(skip(socket, our_supported_protocols), err)] pub async fn identity_exchange<'p, TSocket, P>( node_identity: &NodeIdentity, direction: ConnectionDirection, diff --git a/comms/src/protocol/messaging/extension.rs b/comms/src/protocol/messaging/extension.rs index 58b8a67248..241a152a5b 100644 --- a/comms/src/protocol/messaging/extension.rs +++ b/comms/src/protocol/messaging/extension.rs @@ -26,7 +26,7 @@ use crate::{ message::InboundMessage, pipeline, protocol::{ - messaging::{consts, protocol::MESSAGING_PROTOCOL, MessagingEventSender}, + messaging::{protocol::MESSAGING_PROTOCOL, MessagingEventSender}, ProtocolExtension, ProtocolExtensionContext, ProtocolExtensionError, @@ -38,6 +38,18 @@ use futures::channel::mpsc; use std::fmt; use tower::Service; +/// Buffer size for inbound messages from _all_ peers. This should be large enough to buffer quite a few incoming +/// messages before creating backpressure on peers speaking the messaging protocol. +pub const INBOUND_MESSAGE_BUFFER_SIZE: usize = 100; +/// Buffer size notifications that a peer wants to speak /tari/messaging. This buffer is used for all peers, but a low +/// value is ok because this events happen once (or less) per connecting peer. For e.g. a value of 10 would allow 10 +/// peers to concurrently request to speak /tari/messaging. +pub const MESSAGING_PROTOCOL_EVENTS_BUFFER_SIZE: usize = 30; + +/// Buffer size for requests to the messaging protocol. All outbound messages will be sent along this channel. Some +/// buffering may be required if the node needs to send many messages out at the same time. +pub const MESSAGING_REQUEST_BUFFER_SIZE: usize = 50; + pub struct MessagingProtocolExtension { event_tx: MessagingEventSender, pipeline: pipeline::Config, @@ -60,11 +72,11 @@ where TOutReq: Send + 'static, { fn install(self: Box, context: &mut ProtocolExtensionContext) -> Result<(), ProtocolExtensionError> { - let (proto_tx, proto_rx) = mpsc::channel(consts::MESSAGING_PROTOCOL_EVENTS_BUFFER_SIZE); + let (proto_tx, proto_rx) = mpsc::channel(MESSAGING_PROTOCOL_EVENTS_BUFFER_SIZE); context.add_protocol(&[MESSAGING_PROTOCOL.clone()], proto_tx); - let (messaging_request_tx, messaging_request_rx) = mpsc::channel(consts::MESSAGING_REQUEST_BUFFER_SIZE); - let (inbound_message_tx, inbound_message_rx) = mpsc::channel(consts::INBOUND_MESSAGE_BUFFER_SIZE); + let (messaging_request_tx, messaging_request_rx) = mpsc::channel(MESSAGING_REQUEST_BUFFER_SIZE); + let (inbound_message_tx, inbound_message_rx) = mpsc::channel(INBOUND_MESSAGE_BUFFER_SIZE); let messaging = MessagingProtocol::new( Default::default(), diff --git a/comms/src/protocol/messaging/mod.rs b/comms/src/protocol/messaging/mod.rs index 732bc73c0c..88fca6af05 100644 --- a/comms/src/protocol/messaging/mod.rs +++ b/comms/src/protocol/messaging/mod.rs @@ -26,8 +26,6 @@ pub use config::MessagingConfig; mod extension; pub use extension::MessagingProtocolExtension; -mod consts; - mod error; mod inbound; mod outbound; diff --git a/comms/src/protocol/messaging/outbound.rs b/comms/src/protocol/messaging/outbound.rs index 377f7a2d8a..9d47895338 100644 --- a/comms/src/protocol/messaging/outbound.rs +++ b/comms/src/protocol/messaging/outbound.rs @@ -36,6 +36,7 @@ use std::{ time::{Duration, Instant}, }; use tokio::stream as tokio_stream; +use tracing::{event, span, Instrument, Level}; const LOG_TARGET: &str = "comms::protocol::messaging::outbound"; /// The number of times to retry sending a failed message before publishing a SendMessageFailed event. @@ -69,49 +70,69 @@ impl OutboundMessaging { } pub async fn run(self) { - debug!( - target: LOG_TARGET, - "Attempting to dial peer '{}' if required", - self.peer_node_id.short_str() + let span = span!( + Level::DEBUG, + "comms::messaging::outbound", + node_id = self.peer_node_id.to_string().as_str() ); - let peer_node_id = self.peer_node_id.clone(); - let mut messaging_events_tx = self.messaging_events_tx.clone(); - match self.run_inner().await { - Ok(_) => { - debug!( - target: LOG_TARGET, - "Outbound messaging for peer '{}' has stopped because the stream was closed", - peer_node_id.short_str() - ); - }, - Err(MessagingProtocolError::Inactivity) => { - debug!( - target: LOG_TARGET, - "Outbound messaging for peer '{}' has stopped because it was inactive", - peer_node_id.short_str() - ); - }, - Err(err) => { - debug!(target: LOG_TARGET, "Outbound messaging substream failed: {}", err); - }, - } + async move { + debug!( + target: LOG_TARGET, + "Attempting to dial peer '{}' if required", + self.peer_node_id.short_str() + ); + let peer_node_id = self.peer_node_id.clone(); + let mut messaging_events_tx = self.messaging_events_tx.clone(); + match self.run_inner().await { + Ok(_) => { + event!( + Level::DEBUG, + "Outbound messaging for peer has stopped because the stream was closed" + ); - let _ = messaging_events_tx - .send(MessagingEvent::OutboundProtocolExited(peer_node_id)) - .await; + debug!( + target: LOG_TARGET, + "Outbound messaging for peer '{}' has stopped because the stream was closed", + peer_node_id.short_str() + ); + }, + Err(MessagingProtocolError::Inactivity) => { + event!( + Level::ERROR, + "Outbound messaging for peer has stopped because it was inactive" + ); + debug!( + target: LOG_TARGET, + "Outbound messaging for peer '{}' has stopped because it was inactive", + peer_node_id.short_str() + ); + }, + Err(err) => { + event!(Level::ERROR, "Outbound messaging substream failed:{}", err); + debug!(target: LOG_TARGET, "Outbound messaging substream failed: {}", err); + }, + } + + let _ = messaging_events_tx + .send(MessagingEvent::OutboundProtocolExited(peer_node_id)) + .await; + } + .instrument(span) + .await } async fn run_inner(mut self) -> Result<(), MessagingProtocolError> { let mut attempts = 0; + let substream = loop { match self.try_establish().await { - Ok(substream) => break substream, + Ok(substream) => { + event!(Level::DEBUG, "Substream established"); + break substream; + }, Err(err) => { - assert!( - attempts <= MAX_SEND_RETRIES, - "Attempt count was greater than the maximum" - ); - if attempts == MAX_SEND_RETRIES { + event!(Level::ERROR, "Error establishing messaging protocol"); + if attempts >= MAX_SEND_RETRIES { debug!( target: LOG_TARGET, "Error establishing messaging protocol: {}. Aborting because maximum retries reached.", err @@ -133,77 +154,110 @@ impl OutboundMessaging { } async fn try_dial_peer(&mut self) -> Result { - loop { - match self.connectivity.dial_peer(self.peer_node_id.clone()).await { - Ok(conn) => break Ok(conn), - Err(ConnectivityError::DialCancelled) => { - debug!( - target: LOG_TARGET, - "Dial was cancelled for peer '{}'. This is probably because of connection tie-breaking. \ - Retrying...", - self.peer_node_id.short_str(), - ); - continue; - }, - Err(err) => { - debug!( - target: LOG_TARGET, - "MessagingProtocol failed to dial peer '{}' because '{:?}'", - self.peer_node_id.short_str(), - err - ); + let span = span!( + Level::DEBUG, + "dial_peer", + node_id = self.peer_node_id.to_string().as_str() + ); + async move { + loop { + match self.connectivity.dial_peer(self.peer_node_id.clone()).await { + Ok(conn) => break Ok(conn), + Err(ConnectivityError::DialCancelled) => { + debug!( + target: LOG_TARGET, + "Dial was cancelled for peer '{}'. This is probably because of connection tie-breaking. \ + Retrying...", + self.peer_node_id.short_str(), + ); + continue; + }, + Err(err) => { + debug!( + target: LOG_TARGET, + "MessagingProtocol failed to dial peer '{}' because '{:?}'", + self.peer_node_id.short_str(), + err + ); - break Err(MessagingProtocolError::PeerDialFailed); - }, + break Err(MessagingProtocolError::PeerDialFailed); + }, + } } } + .instrument(span) + .await } async fn try_establish(&mut self) -> Result, MessagingProtocolError> { - debug!( - target: LOG_TARGET, - "Attempting to establish messaging protocol connection to peer `{}`", - self.peer_node_id.short_str() + let span = span!( + Level::DEBUG, + "establish_connection", + node_id = self.peer_node_id.to_string().as_str() ); - let start = Instant::now(); - let conn = self.try_dial_peer().await?; - debug!( - target: LOG_TARGET, - "Connection succeeded for peer `{}` in {:.0?}", - self.peer_node_id.short_str(), - start.elapsed() - ); - let substream = self.try_open_substream(conn).await?; - debug!( - target: LOG_TARGET, - "Substream established for peer `{}`", - self.peer_node_id.short_str(), - ); - Ok(substream) + async move { + debug!( + target: LOG_TARGET, + "Attempting to establish messaging protocol connection to peer `{}`", + self.peer_node_id.short_str() + ); + let start = Instant::now(); + let conn = self.try_dial_peer().await?; + debug!( + target: LOG_TARGET, + "Connection succeeded for peer `{}` in {:.0?}", + self.peer_node_id.short_str(), + start.elapsed() + ); + let substream = self.try_open_substream(conn).await?; + debug!( + target: LOG_TARGET, + "Substream established for peer `{}`", + self.peer_node_id.short_str(), + ); + Ok(substream) + } + .instrument(span) + .await } async fn try_open_substream( &mut self, mut conn: PeerConnection, ) -> Result, MessagingProtocolError> { - match conn.open_substream(&MESSAGING_PROTOCOL).await { - Ok(substream) => Ok(substream), - Err(err) => { - debug!( - target: LOG_TARGET, - "MessagingProtocol failed to open a substream to peer '{}' because '{}'", - self.peer_node_id.short_str(), - err - ); - Err(err.into()) - }, + let span = span!( + Level::DEBUG, + "open_substream", + node_id = self.peer_node_id.to_string().as_str() + ); + async move { + match conn.open_substream(&MESSAGING_PROTOCOL).await { + Ok(substream) => Ok(substream), + Err(err) => { + debug!( + target: LOG_TARGET, + "MessagingProtocol failed to open a substream to peer '{}' because '{}'", + self.peer_node_id.short_str(), + err + ); + Err(err.into()) + }, + } } + .instrument(span) + .await } async fn start_forwarding_messages( self, substream: NegotiatedSubstream, ) -> Result<(), MessagingProtocolError> { + let span = span!( + Level::DEBUG, + "start_forwarding_messages", + node_id = self.peer_node_id.to_string().as_str() + ); + let _enter = span.enter(); debug!( target: LOG_TARGET, "Starting direct message forwarding for peer `{}`", @@ -236,7 +290,7 @@ impl OutboundMessaging { stream .map(|msg| { msg.map(|mut out_msg| { - trace!(target: LOG_TARGET, "Message buffered for sending {}", out_msg); + event!(Level::DEBUG, "Message buffered for sending {}", out_msg); out_msg.reply_success(); out_msg.body }) diff --git a/comms/src/protocol/messaging/protocol.rs b/comms/src/protocol/messaging/protocol.rs index 6e53e80e6c..1f6fe029ab 100644 --- a/comms/src/protocol/messaging/protocol.rs +++ b/comms/src/protocol/messaging/protocol.rs @@ -247,6 +247,7 @@ impl MessagingProtocol { Ok(()) } + // #[tracing::instrument(skip(self, out_msg), err)] async fn send_message(&mut self, out_msg: OutboundMessage) -> Result<(), MessagingProtocolError> { let peer_node_id = out_msg.peer_node_id.clone(); let sender = loop { diff --git a/comms/src/protocol/messaging/test.rs b/comms/src/protocol/messaging/test.rs index 7c61f50abc..e954af31f2 100644 --- a/comms/src/protocol/messaging/test.rs +++ b/comms/src/protocol/messaging/test.rs @@ -173,7 +173,7 @@ async fn send_message_request() { let peer_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (conn1, peer_conn_mock1, _, peer_conn_mock2) = - create_peer_connection_mock_pair(1, node_identity.to_peer(), peer_node_identity.to_peer()).await; + create_peer_connection_mock_pair(node_identity.to_peer(), peer_node_identity.to_peer()).await; // Add mock peer connection to connection manager mock for node 2 conn_man_mock.add_active_connection(conn1).await; @@ -221,7 +221,7 @@ async fn send_message_substream_bulk_failure() { let peer_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (conn1, _, _, peer_conn_mock2) = - create_peer_connection_mock_pair(1, node_identity.to_peer(), peer_node_identity.to_peer()).await; + create_peer_connection_mock_pair(node_identity.to_peer(), peer_node_identity.to_peer()).await; let peer_node_id = peer_node_identity.node_id(); // Add mock peer connection to connection manager mock for node 2 @@ -276,7 +276,7 @@ async fn many_concurrent_send_message_requests() { let node_identity2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (conn1, peer_conn_mock1, _, peer_conn_mock2) = - create_peer_connection_mock_pair(1, node_identity1.to_peer(), node_identity2.to_peer()).await; + create_peer_connection_mock_pair(node_identity1.to_peer(), node_identity2.to_peer()).await; let node_id2 = node_identity2.node_id(); // Add mock peer connection to connection manager mock for node 2 diff --git a/comms/src/protocol/rpc/client.rs b/comms/src/protocol/rpc/client.rs index 89653679f7..2806366f9f 100644 --- a/comms/src/protocol/rpc/client.rs +++ b/comms/src/protocol/rpc/client.rs @@ -25,14 +25,17 @@ use crate::{ framing::CanonicalFraming, message::MessageExt, proto, - protocol::rpc::{ - body::ClientStreaming, - message::BaseRequest, - Handshake, - NamedProtocolService, - Response, - RpcError, - RpcStatus, + protocol::{ + rpc::{ + body::ClientStreaming, + message::{BaseRequest, RpcMessageFlags}, + Handshake, + NamedProtocolService, + Response, + RpcError, + RpcStatus, + }, + ProtocolId, }, runtime::task, }; @@ -50,6 +53,8 @@ use futures::{ use log::*; use prost::Message; use std::{ + borrow::Cow, + convert::TryFrom, fmt, future::Future, marker::PhantomData, @@ -57,6 +62,7 @@ use std::{ }; use tokio::time; use tower::{Service, ServiceExt}; +use tracing::{event, span, Instrument, Level}; const LOG_TARGET: &str = "comms::rpc::client"; @@ -70,14 +76,23 @@ impl RpcClient { pub async fn connect( config: RpcClientConfig, framed: CanonicalFraming, + protocol_name: ProtocolId, ) -> Result where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { let (request_tx, request_rx) = mpsc::channel(1); - let connector = ClientConnector { inner: request_tx }; + let connector = ClientConnector::new(request_tx); let (ready_tx, ready_rx) = oneshot::channel(); - task::spawn(RpcClientWorker::new(config, request_rx, framed, ready_tx).run()); + let tracing_id = tracing::Span::current().id(); + task::spawn({ + let span = span!(Level::TRACE, "start_rpc_worker"); + span.follows_from(tracing_id); + + RpcClientWorker::new(config, request_rx, framed, ready_tx, protocol_name) + .run() + .instrument(span) + }); ready_rx .await .expect("ready_rx oneshot is never dropped without a reply")?; @@ -121,11 +136,20 @@ impl RpcClient { self.connector.close() } + pub fn is_connected(&self) -> bool { + self.connector.is_connected() + } + /// Return the latency of the last request pub fn get_last_request_latency(&mut self) -> impl Future, RpcError>> + '_ { self.connector.get_last_request_latency() } + /// Sends a ping and returns the latency + pub fn ping(&mut self) -> impl Future> + '_ { + self.connector.send_ping() + } + async fn call_inner( &mut self, request: BaseRequest, @@ -145,6 +169,7 @@ impl fmt::Debug for RpcClient { #[derive(Debug, Clone)] pub struct RpcClientBuilder { config: RpcClientConfig, + protocol_id: Option, _client: PhantomData, } @@ -152,6 +177,7 @@ impl Default for RpcClientBuilder { fn default() -> Self { Self { config: Default::default(), + protocol_id: None, _client: PhantomData, } } @@ -193,10 +219,21 @@ where TClient: From + NamedProtocolService self } + pub(crate) fn with_protocol_id(mut self, protocol_id: ProtocolId) -> Self { + self.protocol_id = Some(protocol_id); + self + } + /// Negotiates and establishes a session to the peer's RPC service pub async fn connect(self, framed: CanonicalFraming) -> Result where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static { - RpcClient::connect(self.config, framed).await.map(Into::into) + RpcClient::connect( + self.config, + framed, + self.protocol_id.as_ref().cloned().unwrap_or_default(), + ) + .await + .map(Into::into) } } @@ -222,9 +259,9 @@ impl RpcClientConfig { impl Default for RpcClientConfig { fn default() -> Self { Self { - deadline: Some(Duration::from_secs(30)), - deadline_grace_period: Duration::from_secs(10), - handshake_timeout: Duration::from_secs(30), + deadline: Some(Duration::from_secs(120)), + deadline_grace_period: Duration::from_secs(60), + handshake_timeout: Duration::from_secs(90), } } } @@ -235,6 +272,10 @@ pub struct ClientConnector { } impl ClientConnector { + pub(self) fn new(sender: mpsc::Sender) -> Self { + Self { inner: sender } + } + pub fn close(&mut self) { self.inner.close_channel(); } @@ -248,6 +289,21 @@ impl ClientConnector { reply_rx.await.map_err(|_| RpcError::RequestCancelled) } + + pub async fn send_ping(&mut self) -> Result { + let (reply, reply_rx) = oneshot::channel(); + self.inner + .send(ClientRequest::SendPing(reply)) + .await + .map_err(|_| RpcError::ClientClosed)?; + + let latency = reply_rx.await.map_err(|_| RpcError::RequestCancelled)??; + Ok(latency) + } + + pub fn is_connected(&self) -> bool { + !self.inner.is_closed() + } } impl fmt::Debug for ClientConnector { @@ -285,10 +341,11 @@ pub struct RpcClientWorker { request_rx: mpsc::Receiver, framed: CanonicalFraming, // Request ids are limited to u16::MAX because varint encoding is used over the wire and the magnitude of the value - // sent determines the byte size. A u16 will be more than enough for the purpose (currently just logging) - request_id: u16, + // sent determines the byte size. A u16 will be more than enough for the purpose + next_request_id: u16, ready_tx: Option>>, - latency: Option, + last_request_latency: Option, + protocol_id: ProtocolId, } impl RpcClientWorker @@ -299,19 +356,30 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send request_rx: mpsc::Receiver, framed: CanonicalFraming, ready_tx: oneshot::Sender>, + protocol_id: ProtocolId, ) -> Self { Self { config, request_rx, framed, - request_id: 0, + next_request_id: 0, ready_tx: Some(ready_tx), - latency: None, + last_request_latency: None, + protocol_id, } } + fn protocol_name(&self) -> Cow<'_, str> { + String::from_utf8_lossy(&self.protocol_id) + } + + #[tracing::instrument(name = "rpc_client_worker run", skip(self), fields(next_request_id= self.next_request_id))] async fn run(mut self) { - debug!(target: LOG_TARGET, "Performing client handshake"); + debug!( + target: LOG_TARGET, + "Performing client handshake for '{}'", + self.protocol_name() + ); let start = Instant::now(); let mut handshake = Handshake::new(&mut self.framed).with_timeout(self.config.handshake_timeout()); match handshake.perform_client_handshake().await { @@ -319,9 +387,11 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send let latency = start.elapsed(); debug!( target: LOG_TARGET, - "RPC Session negotiation completed. Latency: {:.0?}", latency + "RPC Session ({}) negotiation completed. Latency: {:.0?}", + self.protocol_name(), + latency ); - self.latency = Some(latency); + self.last_request_latency = Some(latency); if let Some(r) = self.ready_tx.take() { let _ = r.send(Ok(())); } @@ -340,12 +410,18 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send match req { SendRequest { request, reply } => { if let Err(err) = self.do_request_response(request, reply).await { - debug!(target: LOG_TARGET, "Unexpected error: {}. Worker is terminating.", err); + error!(target: LOG_TARGET, "Unexpected error: {}. Worker is terminating.", err); break; } }, GetLastRequestLatency(reply) => { - let _ = reply.send(self.latency); + let _ = reply.send(self.last_request_latency); + }, + SendPing(reply) => { + if let Err(err) = self.do_ping_pong(reply).await { + error!(target: LOG_TARGET, "Unexpected error: {}. Worker is terminating.", err); + break; + } }, } } @@ -353,9 +429,60 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send debug!(target: LOG_TARGET, "IO Error when closing substream: {}", err); } - debug!(target: LOG_TARGET, "RpcClientWorker terminated."); + debug!( + target: LOG_TARGET, + "RpcClientWorker ({}) terminated.", + self.protocol_name() + ); } + async fn do_ping_pong(&mut self, reply: oneshot::Sender>) -> Result<(), RpcError> { + let ack = proto::rpc::RpcRequest { + flags: RpcMessageFlags::ACK.bits() as u32, + deadline: self.config.deadline.map(|t| t.as_secs()).unwrap_or(0), + ..Default::default() + }; + + let start = Instant::now(); + self.framed.send(ack.to_encoded_bytes().into()).await?; + + debug!( + target: LOG_TARGET, + "Ping (protocol {}) sent in {:.2?}", + self.protocol_name(), + start.elapsed() + ); + let resp = match self.read_reply().await { + Ok(resp) => resp, + Err(RpcError::ReplyTimeout) => { + debug!(target: LOG_TARGET, "Ping timed out after {:.0?}", start.elapsed()); + let _ = reply.send(Err(RpcStatus::timed_out("Response timed out"))); + return Ok(()); + }, + Err(err) => return Err(err), + }; + + let status = RpcStatus::from(&resp); + if !status.is_ok() { + let _ = reply.send(Err(status.clone())); + return Err(status.into()); + } + + let resp_flags = RpcMessageFlags::from_bits_truncate(resp.flags as u8); + if !resp_flags.contains(RpcMessageFlags::ACK) { + warn!(target: LOG_TARGET, "Invalid ping response {:?}", resp); + let _ = reply.send(Err(RpcStatus::protocol_error(format!( + "Received invalid ping response on protocol '{}'", + self.protocol_name() + )))); + return Err(RpcError::InvalidPingResponse); + } + + let _ = reply.send(Ok(start.elapsed())); + Ok(()) + } + + #[tracing::instrument(name = "rpc_do_request_response", skip(self, reply), err)] async fn do_request_response( &mut self, request: BaseRequest, @@ -376,42 +503,32 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send let start = Instant::now(); self.framed.send(req.to_encoded_bytes().into()).await?; - let (mut response_tx, response_rx) = mpsc::channel(1); + let (mut response_tx, response_rx) = mpsc::channel(10); if reply.send(response_rx).is_err() { - debug!(target: LOG_TARGET, "Client request was cancelled."); + event!(Level::WARN, "Client request was cancelled"); + warn!(target: LOG_TARGET, "Client request was cancelled."); response_tx.close_channel(); + // TODO: Should this not exit here? } loop { - // Wait until the timeout, allowing an extra grace period to account for latency - let next_msg_fut = match self.config.timeout_with_grace_period() { - Some(timeout) => Either::Left(time::timeout(timeout, self.framed.next())), - None => Either::Right(self.framed.next().map(Ok)), - }; - - let resp = match next_msg_fut.await { - Ok(Some(Ok(resp))) => { + let resp = match self.read_reply().await { + Ok(resp) => { let latency = start.elapsed(); + event!(Level::TRACE, "Message received"); trace!( target: LOG_TARGET, - "Received response ({} byte(s)) from request #{} (method={}) in {:.0?}", - resp.len(), + "Received response ({} byte(s)) from request #{} (protocol = {}, method={}) in {:.0?}", + resp.message.len(), request_id, + self.protocol_name(), method, latency ); - self.latency = Some(latency); - proto::rpc::RpcResponse::decode(resp)? - }, - Ok(Some(Err(err))) => { - return Err(err.into()); + self.last_request_latency = Some(latency); + resp }, - Ok(None) => { - return Err(RpcError::ServerClosedRequest); - }, - - // Timeout - Err(_) => { + Err(RpcError::ReplyTimeout) => { debug!( target: LOG_TARGET, "Request {} (method={}) timed out after {:.0?}", @@ -419,19 +536,31 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send method, start.elapsed() ); + event!(Level::ERROR, "Response timed out"); let _ = response_tx.send(Err(RpcStatus::timed_out("Response timed out"))).await; response_tx.close_channel(); break; }, + Err(err) => { + event!(Level::ERROR, "Errored:{}", err); + return Err(err); + }, }; - match Self::convert_to_result(resp) { - Ok(resp) => { + match Self::convert_to_result(resp, request_id) { + Ok(Ok(resp)) => { // The consumer may drop the receiver before all responses are received. // We just ignore that as we still want obey the protocol and receive messages until the FIN flag or // the connection is dropped let is_finished = resp.is_finished(); - if !response_tx.is_closed() { + if response_tx.is_closed() { + warn!( + target: LOG_TARGET, + "Response receiver was dropped before the response/stream could complete for protocol {}, \ + the stream will continue until completed", + self.protocol_name() + ); + } else { let _ = response_tx.send(Ok(resp)).await; } if is_finished { @@ -439,7 +568,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send break; } }, - Err(err) => { + Ok(Err(err)) => { debug!(target: LOG_TARGET, "Remote service returned error: {}", err); if !response_tx.is_closed() { let _ = response_tx.send(Err(err)).await; @@ -447,23 +576,65 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send response_tx.close_channel(); break; }, + Err(err @ RpcError::ResponseIdDidNotMatchRequest { .. }) | + Err(err @ RpcError::UnexpectedAckResponse) => { + warn!(target: LOG_TARGET, "{}", err); + // Ignore the response, this can happen when there is excessive latency. The server sends back a + // reply before the deadline but it is only received after the client has timed + // out + continue; + }, + Err(err) => return Err(err), } } Ok(()) } + async fn read_reply(&mut self) -> Result { + // Wait until the timeout, allowing an extra grace period to account for latency + let next_msg_fut = match self.config.timeout_with_grace_period() { + Some(timeout) => Either::Left(time::timeout(timeout, self.framed.next())), + None => Either::Right(self.framed.next().map(Ok)), + }; + + match next_msg_fut.await { + Ok(Some(Ok(resp))) => Ok(proto::rpc::RpcResponse::decode(resp)?), + Ok(Some(Err(err))) => Err(err.into()), + Ok(None) => Err(RpcError::ServerClosedRequest), + Err(_) => Err(RpcError::ReplyTimeout), + } + } + fn next_request_id(&mut self) -> u16 { - let next_id = self.request_id; + let next_id = self.next_request_id; // request_id is allowed to wrap around back to 0 - self.request_id = self.request_id.checked_add(1).unwrap_or(0); + self.next_request_id = self.next_request_id.checked_add(1).unwrap_or(0); next_id } - fn convert_to_result(resp: proto::rpc::RpcResponse) -> Result, RpcStatus> { + fn convert_to_result( + resp: proto::rpc::RpcResponse, + request_id: u16, + ) -> Result, RpcStatus>, RpcError> { + let resp_id = u16::try_from(resp.request_id) + .map_err(|_| RpcStatus::protocol_error(format!("invalid request_id: must be less than {}", u16::MAX)))?; + + let flags = RpcMessageFlags::from_bits_truncate(resp.flags as u8); + if flags.contains(RpcMessageFlags::ACK) { + return Err(RpcError::UnexpectedAckResponse); + } + + if resp_id != request_id { + return Err(RpcError::ResponseIdDidNotMatchRequest { + expected: request_id, + actual: resp.request_id as u16, + }); + } + let status = RpcStatus::from(&resp); if !status.is_ok() { - return Err(status); + return Ok(Err(status)); } let resp = Response { @@ -471,7 +642,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send message: resp.message.into(), }; - Ok(resp) + Ok(Ok(resp)) } } @@ -481,4 +652,5 @@ pub enum ClientRequest { reply: oneshot::Sender, RpcStatus>>>, }, GetLastRequestLatency(oneshot::Sender>), + SendPing(oneshot::Sender>), } diff --git a/comms/src/protocol/rpc/client_pool.rs b/comms/src/protocol/rpc/client_pool.rs new file mode 100644 index 0000000000..6829b41265 --- /dev/null +++ b/comms/src/protocol/rpc/client_pool.rs @@ -0,0 +1,268 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + peer_manager::NodeId, + protocol::rpc::{ + error::HandshakeRejectReason, + NamedProtocolService, + RpcClient, + RpcClientBuilder, + RpcError, + RpcHandshakeError, + }, + PeerConnection, +}; +use log::*; +use std::{ + ops::{Deref, DerefMut}, + sync::Arc, +}; +use tokio::sync::Mutex; + +const LOG_TARGET: &str = "comms::protocol::rpc::client_pool"; + +#[derive(Clone)] +pub struct RpcClientPool { + pool: Arc>>, +} + +impl RpcClientPool +where T: RpcPoolClient + From + NamedProtocolService + Clone +{ + /// Create a new RpcClientPool. Panics if passed a pool_size of 0. + pub(crate) fn new(peer_connection: PeerConnection, pool_size: usize, client_config: RpcClientBuilder) -> Self { + let pool = LazyPool::new(peer_connection, pool_size, client_config); + Self { + pool: Arc::new(Mutex::new(pool)), + } + } + + pub async fn get(&self) -> Result, RpcClientPoolError> { + let mut pool = self.pool.lock().await; + pool.get_least_used_or_connect().await + } +} + +#[derive(Clone)] +pub(super) struct LazyPool { + connection: PeerConnection, + clients: Vec>, + client_config: RpcClientBuilder, +} + +impl LazyPool +where T: RpcPoolClient + From + NamedProtocolService + Clone +{ + pub fn new(connection: PeerConnection, capacity: usize, client_config: RpcClientBuilder) -> Self { + assert!(capacity > 0, "Pool capacity of 0 is invalid"); + Self { + connection, + clients: Vec::with_capacity(capacity), + client_config, + } + } + + pub async fn get_least_used_or_connect(&mut self) -> Result, RpcClientPoolError> { + loop { + self.check_peer_connection()?; + + let client = match self.get_next_lease() { + Some(c) => c, + None => match self.add_new_client_session().await { + Ok(c) => c, + // This is an edge case where the remote node does not have any further sessions available. This is + // gracefully handled by returning one of the existing used sessions. + Err(RpcClientPoolError::NoMoreRemoteRpcSessions) => self + .get_least_used() + .ok_or(RpcClientPoolError::NoMoreRemoteRpcSessions)?, + Err(err) => { + return Err(err); + }, + }, + }; + + if !client.is_connected() { + self.prune(); + continue; + } + + // Clone is what actually takes the lease out (increments the Arc) + return Ok(client.clone()); + } + } + + pub(super) fn refresh_num_active_connections(&mut self) -> usize { + self.prune(); + self.clients.len() + } + + fn check_peer_connection(&self) -> Result<(), RpcClientPoolError> { + if self.connection.is_connected() { + Ok(()) + } else { + Err(RpcClientPoolError::PeerConnectionDropped { + peer: self.connection.peer_node_id().clone(), + }) + } + } + + /// Return the next client that is not in use. If all clients are in use and there are still more slots open in the + /// pool, None is returned. Otherwise, we return a client with the least uses. + /// If the pool is full, this function is _guaranteed_ to return Some(&T), however it is up to the caller to + /// check that the session is still connected. + fn get_next_lease(&self) -> Option<&RpcClientLease> { + let client = self.get_least_used()?; + // If the pool is full, we choose the client with the smallest lease_count (i.e. the one that is being used + // the least or not at all). + if self.is_full() { + return Some(client); + } + + // Otherwise, if the least used connection is still in use and since there is capacity for more connections, + // return None. This indicates that the best option is to create a new connection. + if client.lease_count() > 0 { + return None; + } + + Some(client) + } + + fn get_least_used(&self) -> Option<&RpcClientLease> { + let mut min_count = usize::MAX; + let mut selected_client = None; + for client in &self.clients { + let lease_count = client.lease_count(); + if lease_count == 0 { + return Some(client); + } + + if min_count > lease_count { + selected_client = Some(client); + min_count = lease_count; + } + } + + selected_client + } + + pub fn is_full(&self) -> bool { + self.clients.len() == self.clients.capacity() + } + + async fn add_new_client_session(&mut self) -> Result<&RpcClientLease, RpcClientPoolError> { + debug_assert!(!self.is_full(), "add_new_client called when pool is full"); + let client = self + .connection + .connect_rpc_using_builder(self.client_config.clone()) + .await?; + let client = RpcClientLease::new(client); + self.clients.push(client); + Ok(self.clients.last().unwrap()) + } + + fn prune(&mut self) { + let initial_len = self.clients.len(); + let cap = self.clients.capacity(); + self.clients = self.clients.drain(..).fold(Vec::with_capacity(cap), |mut vec, c| { + if c.is_connected() { + vec.push(c); + } + vec + }); + assert_eq!(self.clients.capacity(), cap); + debug!( + target: LOG_TARGET, + "Pruned {} client(s) (total connections: {})", + initial_len - self.clients.len(), + self.clients.len() + ) + } +} + +/// A lease of a client RPC session. This is a thin wrapper that provides an atomic reference counted lease around an +/// RPC client session. This wrapper dereferences into the client it holds, meaning that it can be used in the same way +/// as the inner client itself. +#[derive(Debug, Clone)] +pub struct RpcClientLease { + inner: T, + rc: Arc<()>, +} + +impl RpcClientLease { + pub fn new(inner: T) -> Self { + Self { + inner, + rc: Arc::new(()), + } + } + + /// Returns the number of active leases for this client + pub(super) fn lease_count(&self) -> usize { + Arc::strong_count(&self.rc) - 1 + } +} + +impl Deref for RpcClientLease { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl DerefMut for RpcClientLease { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl RpcPoolClient for RpcClientLease { + fn is_connected(&self) -> bool { + self.inner.is_connected() + } +} + +#[derive(Debug, thiserror::Error)] +pub enum RpcClientPoolError { + #[error("Peer connection to peer '{peer}' dropped")] + PeerConnectionDropped { peer: NodeId }, + #[error("No peer RPC sessions are available")] + NoMoreRemoteRpcSessions, + #[error("Failed to create client connection: {0}")] + FailedToConnect(RpcError), +} + +impl From for RpcClientPoolError { + fn from(err: RpcError) -> Self { + match err { + RpcError::HandshakeError(RpcHandshakeError::Rejected(HandshakeRejectReason::NoSessionsAvailable)) => { + RpcClientPoolError::NoMoreRemoteRpcSessions + }, + err => RpcClientPoolError::FailedToConnect(err), + } + } +} + +pub trait RpcPoolClient { + fn is_connected(&self) -> bool; +} diff --git a/comms/src/protocol/rpc/context.rs b/comms/src/protocol/rpc/context.rs index 1de5e6a023..f6e9d988f6 100644 --- a/comms/src/protocol/rpc/context.rs +++ b/comms/src/protocol/rpc/context.rs @@ -77,19 +77,28 @@ impl RpcCommsProvider for RpcCommsBackend { } pub struct RequestContext { + request_id: u32, backend: Box, node_id: NodeId, } impl RequestContext { - pub(super) fn new(node_id: NodeId, backend: Box) -> Self { - Self { backend, node_id } + pub(super) fn new(request_id: u32, node_id: NodeId, backend: Box) -> Self { + Self { + request_id, + backend, + node_id, + } } pub fn peer_node_id(&self) -> &NodeId { &self.node_id } + pub fn request_id(&self) -> u32 { + self.request_id + } + pub(crate) async fn fetch_peer(&self) -> Result { self.backend.fetch_peer(&self.node_id).await } diff --git a/comms/src/protocol/rpc/error.rs b/comms/src/protocol/rpc/error.rs index 0565e93575..64f811f9d8 100644 --- a/comms/src/protocol/rpc/error.rs +++ b/comms/src/protocol/rpc/error.rs @@ -45,6 +45,8 @@ pub enum RpcError { ServerClosedRequest, #[error("Request cancelled")] RequestCancelled, + #[error("Response did not match the request ID (expected {expected} actual {actual})")] + ResponseIdDidNotMatchRequest { expected: u16, actual: u16 }, #[error("Client internal error: {0}")] ClientInternalError(String), #[error("Handshake error: {0}")] @@ -57,6 +59,12 @@ pub enum RpcError { PeerManagerError(#[from] PeerManagerError), #[error("Connectivity error: {0}")] ConnectivityError(#[from] ConnectivityError), + #[error("Reply Timeout")] + ReplyTimeout, + #[error("Received an invalid ping response")] + InvalidPingResponse, + #[error("Unexpected ACK response. This is likely because of a previous ACK timeout")] + UnexpectedAckResponse, #[error(transparent)] UnknownError(#[from] anyhow::Error), } diff --git a/comms/src/protocol/rpc/handshake.rs b/comms/src/protocol/rpc/handshake.rs index 0bc75c0fa4..3abd62cef6 100644 --- a/comms/src/protocol/rpc/handshake.rs +++ b/comms/src/protocol/rpc/handshake.rs @@ -23,10 +23,10 @@ use crate::{framing::CanonicalFraming, message::MessageExt, proto, protocol::rpc::error::HandshakeRejectReason}; use bytes::BytesMut; use futures::{AsyncRead, AsyncWrite, SinkExt, StreamExt}; -use log::*; use prost::{DecodeError, Message}; use std::{io, time::Duration}; use tokio::time; +use tracing::{debug, error, event, span, warn, Instrument, Level}; const LOG_TARGET: &str = "comms::rpc::handshake"; @@ -66,42 +66,60 @@ where T: AsyncRead + AsyncWrite + Unpin Self { framed, timeout: None } } - /// Set the length of time that a client/server should wait for the other side to response before timing out. + /// Set the length of time that a client/server should wait for the other side to respond before timing out. pub fn with_timeout(mut self, timeout: Duration) -> Self { self.timeout = Some(timeout); self } /// Server-side handshake protocol + #[tracing::instrument(name = "rpc::server::perform_server_handshake", skip(self), err, fields(comms.direction="inbound"))] pub async fn perform_server_handshake(&mut self) -> Result { match self.recv_next_frame().await { Ok(Some(Ok(msg))) => { + event!(Level::DEBUG, "Handshake bytes received"); let msg = proto::rpc::RpcSession::decode(&mut msg.freeze())?; let version = SUPPORTED_RPC_VERSIONS .iter() .find(|v| msg.supported_versions.contains(v)); if let Some(version) = version { + event!(Level::INFO, version = version, "Server accepted version"); debug!(target: LOG_TARGET, "Server accepted version {}", version); let reply = proto::rpc::RpcSessionReply { session_result: Some(proto::rpc::rpc_session_reply::SessionResult::AcceptedVersion(*version)), ..Default::default() }; - self.framed.send(reply.to_encoded_bytes().into()).await?; + let span = span!(Level::INFO, "rpc::server::handshake::send_accept_version_reply"); + self.framed + .send(reply.to_encoded_bytes().into()) + .instrument(span) + .await?; return Ok(*version); } + let span = span!(Level::INFO, "rpc::server::handshake::send_rejection"); self.reject_with_reason(HandshakeRejectReason::UnsupportedVersion) + .instrument(span) .await?; Err(RpcHandshakeError::ClientNoSupportedVersion) }, - Ok(Some(Err(err))) => Err(err.into()), - Ok(None) => Err(RpcHandshakeError::ClientClosed), - Err(_elapsed) => Err(RpcHandshakeError::TimedOut), + Ok(Some(Err(err))) => { + event!(Level::ERROR, "Error: {}", err); + Err(err.into()) + }, + Ok(None) => { + event!(Level::ERROR, "Client closed request"); + Err(RpcHandshakeError::ClientClosed) + }, + Err(_elapsed) => { + event!(Level::ERROR, "Timed out"); + Err(RpcHandshakeError::TimedOut) + }, } } pub async fn reject_with_reason(&mut self, reject_reason: HandshakeRejectReason) -> Result<(), RpcHandshakeError> { - debug!(target: LOG_TARGET, "Rejecting handshake because {}", reject_reason); + warn!(target: LOG_TARGET, "Rejecting handshake because {}", reject_reason); let reply = proto::rpc::RpcSessionReply { session_result: Some(proto::rpc::rpc_session_reply::SessionResult::Rejected(true)), reject_reason: reject_reason.as_i32(), @@ -112,6 +130,7 @@ where T: AsyncRead + AsyncWrite + Unpin } /// Client-side handshake protocol + #[tracing::instrument(name = "rpc::client::perform_client_handshake", skip(self), err, fields(comms.direction="outbound"))] pub async fn perform_client_handshake(&mut self) -> Result<(), RpcHandshakeError> { let msg = proto::rpc::RpcSession { supported_versions: SUPPORTED_RPC_VERSIONS.to_vec(), @@ -120,7 +139,7 @@ where T: AsyncRead + AsyncWrite + Unpin // anything. Rather than returning an IO error, let's ignore the send error and see if we can receive anything, // or return an IO error similarly to what send would have done. if let Err(err) = self.framed.send(msg.to_encoded_bytes().into()).await { - debug!( + warn!( target: LOG_TARGET, "IO error when sending new session handshake to peer: {}", err ); @@ -129,15 +148,26 @@ where T: AsyncRead + AsyncWrite + Unpin Ok(Some(Ok(msg))) => { let msg = proto::rpc::RpcSessionReply::decode(&mut msg.freeze())?; let version = msg.result()?; + event!(Level::INFO, "Server accepted version: {}", version); debug!(target: LOG_TARGET, "Server accepted version {}", version); Ok(()) }, - Ok(Some(Err(err))) => Err(err.into()), - Ok(None) => Err(RpcHandshakeError::ServerClosedRequest), - Err(_) => Err(RpcHandshakeError::TimedOut), + Ok(Some(Err(err))) => { + event!(Level::ERROR, "Error: {}", err); + Err(err.into()) + }, + Ok(None) => { + event!(Level::ERROR, "Server closed request"); + Err(RpcHandshakeError::ServerClosedRequest) + }, + Err(_) => { + event!(Level::ERROR, "Timed out"); + Err(RpcHandshakeError::TimedOut) + }, } } + #[tracing::instrument(name = "rpc::receive_handshake_reply", skip(self), err)] async fn recv_next_frame(&mut self) -> Result>, time::Elapsed> { match self.timeout { Some(timeout) => time::timeout(timeout, self.framed.next()).await, diff --git a/comms/src/protocol/rpc/message.rs b/comms/src/protocol/rpc/message.rs index c1455fb3c9..dedd7e04fb 100644 --- a/comms/src/protocol/rpc/message.rs +++ b/comms/src/protocol/rpc/message.rs @@ -197,13 +197,20 @@ impl Into for RpcMethod { bitflags! { pub struct RpcMessageFlags: u8 { + /// Message stream has completed const FIN = 0x01; + /// Typically sent with empty contents and used to confirm a substream is alive. + const ACK = 0x02; } } impl RpcMessageFlags { pub fn is_fin(&self) -> bool { self.contains(Self::FIN) } + + pub fn is_ack(&self) -> bool { + self.contains(Self::ACK) + } } impl Default for RpcMessageFlags { @@ -242,6 +249,10 @@ impl proto::rpc::RpcResponse { pub fn flags(&self) -> RpcMessageFlags { RpcMessageFlags::from_bits_truncate(self.flags as u8) } + + pub fn is_fin(&self) -> bool { + self.flags as u8 & RpcMessageFlags::FIN.bits() != 0 + } } impl fmt::Display for proto::rpc::RpcResponse { diff --git a/comms/src/protocol/rpc/mod.rs b/comms/src/protocol/rpc/mod.rs index 0f606ac04b..d4e91fa8e4 100644 --- a/comms/src/protocol/rpc/mod.rs +++ b/comms/src/protocol/rpc/mod.rs @@ -48,6 +48,9 @@ pub use error::RpcError; mod handshake; pub use handshake::{Handshake, RpcHandshakeError}; +mod client_pool; +pub use client_pool::{RpcClientLease, RpcClientPool, RpcClientPoolError, RpcPoolClient}; + mod status; pub use status::{RpcStatus, RpcStatusCode}; @@ -62,6 +65,7 @@ pub mod __macro_reexports { framing::CanonicalFraming, protocol::{ rpc::{ + client_pool::RpcPoolClient, message::{Request, Response}, server::{NamedProtocolService, RpcServerError}, Body, diff --git a/comms/src/protocol/rpc/server/mock.rs b/comms/src/protocol/rpc/server/mock.rs index d1472e786f..19741a0a1a 100644 --- a/comms/src/protocol/rpc/server/mock.rs +++ b/comms/src/protocol/rpc/server/mock.rs @@ -28,6 +28,7 @@ use crate::{ context::{RequestContext, RpcCommsBackend, RpcCommsProvider}, server::{handle::RpcServerRequest, PeerRpcServer, RpcServerError}, Body, + NamedProtocolService, Request, Response, RpcError, @@ -48,9 +49,17 @@ use crate::{ }; use async_trait::async_trait; use bytes::Bytes; -use futures::{channel::mpsc, stream, SinkExt}; -use std::sync::Arc; -use tokio::{sync::RwLock, task}; +use futures::{channel::mpsc, future::BoxFuture, stream, SinkExt}; +use std::{ + collections::HashMap, + future, + sync::Arc, + task::{Context, Poll}, +}; +use tokio::{ + sync::{Mutex, RwLock}, + task, +}; use tower::Service; use tower_make::MakeService; @@ -71,7 +80,7 @@ impl RpcRequestMock { } pub fn request_with_context(&self, node_id: NodeId, msg: T) -> Request { - let context = RequestContext::new(node_id, Box::new(self.comms_provider.clone())); + let context = RequestContext::new(0, node_id, Box::new(self.comms_provider.clone())); Request::with_context(context, 0.into(), msg) } @@ -223,7 +232,7 @@ where /// Create a PeerConnection that can open a substream to this mock server. pub async fn create_connection(&self, peer: Peer, protocol_id: ProtocolId) -> PeerConnection { let peer_node_id = peer.node_id.clone(); - let (_, our_conn_mock, peer_conn, _) = create_peer_connection_mock_pair(1, peer, self.our_node.to_peer()).await; + let (_, our_conn_mock, peer_conn, _) = create_peer_connection_mock_pair(peer, self.our_node.to_peer()).await; let mut protocol_tx = self.protocol_tx.clone(); task::spawn(async move { @@ -244,3 +253,69 @@ where task::spawn(inner.serve()) } } + +impl MockRpcServer { + pub async fn create_mockimpl_connection(&self, peer: Peer) -> PeerConnection { + // MockRpcImpl accepts any protocol + self.create_connection(peer, ProtocolId::new()).await + } +} + +#[derive(Clone, Default)] +pub struct MockRpcImpl { + state: Arc>, +} + +#[derive(Default)] +struct State { + accepted_calls: HashMap>, +} + +impl MockRpcImpl { + pub fn new() -> Self { + Default::default() + } +} + +impl Service> for MockRpcImpl { + type Error = RpcStatus; + type Future = BoxFuture<'static, Result, RpcStatus>>; + type Response = Response; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + let state = self.state.clone(); + Box::pin(async move { + let method_id = req.method().id(); + match state.lock().await.accepted_calls.get(&method_id) { + Some(resp) => Ok(resp.clone().map(Body::single)), + None => Err(RpcStatus::unsupported_method(format!( + "Method identifier `{}` is not recognised or supported", + method_id + ))), + } + }) + } +} + +impl NamedProtocolService for MockRpcImpl { + const PROTOCOL_NAME: &'static [u8] = b"mock-service"; +} + +/// A service maker for GreetingServer +impl Service for MockRpcImpl { + type Error = RpcServerError; + type Future = future::Ready>; + type Response = Self; + + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _: ProtocolId) -> Self::Future { + future::ready(Ok(self.clone())) + } +} diff --git a/comms/src/protocol/rpc/server/mod.rs b/comms/src/protocol/rpc/server/mod.rs index d00c50c75d..b2b7dbf76f 100644 --- a/comms/src/protocol/rpc/server/mod.rs +++ b/comms/src/protocol/rpc/server/mod.rs @@ -53,17 +53,17 @@ use crate::{ protocol::{ProtocolEvent, ProtocolId, ProtocolNotification, ProtocolNotificationRx}, Bytes, }; -use futures::{channel::mpsc, AsyncRead, AsyncWrite, Sink, SinkExt, StreamExt}; -use log::*; +use futures::{channel::mpsc, AsyncRead, AsyncWrite, SinkExt, StreamExt}; use prost::Message; use std::{ - io, + borrow::Cow, + future::Future, time::{Duration, Instant}, }; -use tari_shutdown::{OptionalShutdownSignal, ShutdownSignal}; use tokio::time; use tower::Service; use tower_make::MakeService; +use tracing::{debug, error, instrument, span, trace, warn, Instrument, Level}; const LOG_TARGET: &str = "comms::rpc"; @@ -150,7 +150,6 @@ pub struct RpcServerBuilder { maximum_simultaneous_sessions: Option, minimum_client_deadline: Duration, handshake_timeout: Duration, - shutdown_signal: OptionalShutdownSignal, } impl RpcServerBuilder { @@ -173,11 +172,6 @@ impl RpcServerBuilder { self } - pub fn with_shutdown_signal(mut self, shutdown_signal: ShutdownSignal) -> Self { - self.shutdown_signal = Some(shutdown_signal).into(); - self - } - pub fn finish(self) -> RpcServer { let (request_tx, request_rx) = mpsc::channel(10); RpcServer { @@ -194,7 +188,6 @@ impl Default for RpcServerBuilder { maximum_simultaneous_sessions: Some(1000), minimum_client_deadline: Duration::from_secs(1), handshake_timeout: Duration::from_secs(15), - shutdown_signal: Default::default(), } } } @@ -248,8 +241,7 @@ where let mut protocol_notifs = self .protocol_notifications .take() - .expect("PeerRpcServer initialized without protocol_notifications") - .take_until(self.config.shutdown_signal.clone()); + .expect("PeerRpcServer initialized without protocol_notifications"); let mut requests = self .request_rx @@ -274,8 +266,7 @@ where debug!( target: LOG_TARGET, - "Peer RPC server is shut down because the shutdown signal was triggered or the protocol notification \ - stream ended" + "Peer RPC server is shut down because the protocol notification stream ended" ); Ok(()) @@ -295,6 +286,7 @@ where } } + #[tracing::instrument(name = "rpc::server::new_client_connection", skip(self, notification), err)] async fn handle_protocol_notification( &mut self, notification: ProtocolNotification, @@ -321,6 +313,7 @@ where Ok(()) } + #[tracing::instrument(name = "rpc::server::try_initiate_service", skip(self, framed), err)] async fn try_initiate_service( &mut self, protocol: ProtocolId, @@ -366,11 +359,11 @@ where let service = ActivePeerRpcService { config: self.config.clone(), + protocol, node_id: node_id.clone(), - framed: Some(framed), + framed, service, comms_provider: self.comms_provider.clone(), - shutdown_signal: self.config.shutdown_signal.clone(), }; self.executor @@ -383,11 +376,11 @@ where struct ActivePeerRpcService { config: RpcServerBuilder, + protocol: ProtocolId, node_id: NodeId, service: TSvc, - framed: Option>, + framed: CanonicalFraming, comms_provider: TCommsProvider, - shutdown_signal: OptionalShutdownSignal, } impl ActivePeerRpcService @@ -397,39 +390,56 @@ where TCommsProvider: RpcCommsProvider + Send + Clone + 'static, { async fn start(mut self) { - debug!(target: LOG_TARGET, "(Peer = `{}`) Rpc server started.", self.node_id); + debug!( + target: LOG_TARGET, + "(Peer = `{}`) Rpc server ({}) started.", + self.node_id, + self.protocol_name() + ); if let Err(err) = self.run().await { error!( target: LOG_TARGET, - "(Peer = `{}`) Rpc server exited with an error: {}", self.node_id, err + "(Peer = `{}`) Rpc server ({}) exited with an error: {}", + self.node_id, + self.protocol_name(), + err ); } - debug!(target: LOG_TARGET, "(Peer = {}) Rpc service shutdown", self.node_id); + debug!( + target: LOG_TARGET, + "(Peer = {}) Rpc service ({}) shutdown", + self.node_id, + self.protocol_name() + ); } - async fn run(&mut self) -> Result<(), RpcServerError> { - let (mut sink, stream) = self.framed.take().unwrap().split(); - let mut stream = stream.fuse().take_until(self.shutdown_signal.clone()); + fn protocol_name(&self) -> Cow<'_, str> { + String::from_utf8_lossy(&self.protocol) + } - while let Some(result) = stream.next().await { + async fn run(&mut self) -> Result<(), RpcServerError> { + while let Some(result) = self.framed.next().await { let start = Instant::now(); - if let Err(err) = self.handle(&mut sink, result?.freeze()).await { - sink.close().await?; + if let Err(err) = self.handle(result?.freeze()).await { + self.framed.close().await?; return Err(err); } - debug!(target: LOG_TARGET, "RPC request completed in {:.0?}", start.elapsed()); + let elapsed = start.elapsed(); + debug!( + target: LOG_TARGET, + "RPC ({}) request completed in {:.0?}{}", + self.protocol_name(), + elapsed, + if elapsed.as_secs() > 5 { " (LONG REQUEST)" } else { "" } + ); } - sink.close().await?; + self.framed.close().await?; Ok(()) } - fn create_request_context(&self) -> RequestContext { - RequestContext::new(self.node_id.clone(), Box::new(self.comms_provider.clone())) - } - - async fn handle(&mut self, sink: &mut W, mut request: Bytes) -> Result<(), RpcServerError> - where W: Sink + Unpin { + #[instrument(name = "rpc::server::handle_req", skip(self), err)] + async fn handle(&mut self, mut request: Bytes) -> Result<(), RpcServerError> { let decoded_msg = proto::rpc::RpcRequest::decode(&mut request)?; let request_id = decoded_msg.request_id; @@ -453,7 +463,25 @@ where flags: RpcMessageFlags::FIN.bits().into(), message: status.details_bytes(), }; - sink.send(bad_request.to_encoded_bytes().into()).await?; + self.framed.send(bad_request.to_encoded_bytes().into()).await?; + return Ok(()); + } + + let msg_flags = RpcMessageFlags::from_bits_truncate(decoded_msg.flags as u8); + if msg_flags.contains(RpcMessageFlags::ACK) { + debug!( + target: LOG_TARGET, + "[Peer=`{}` {}] sending ACK response.", + self.node_id, + self.protocol_name() + ); + let ack = proto::rpc::RpcResponse { + request_id, + status: RpcStatus::ok().as_code(), + flags: RpcMessageFlags::ACK.bits().into(), + ..Default::default() + }; + self.framed.send(ack.to_encoded_bytes().into()).await?; return Ok(()); } @@ -462,9 +490,14 @@ where "[Peer=`{}`] Got request {}", self.node_id, decoded_msg ); - let req = Request::with_context(self.create_request_context(), method, decoded_msg.message.into()); + let req = Request::with_context( + self.create_request_context(request_id), + method, + decoded_msg.message.into(), + ); - let service_result = time::timeout(deadline, self.service.call(req)).await; + let service_call = log_timing(request_id, "service call", self.service.call(req)); + let service_result = time::timeout(deadline, service_call).await; let service_result = match service_result { Ok(v) => v, Err(_) => { @@ -478,9 +511,42 @@ where match service_result { Ok(body) => { + // This is the most basic way we can push responses back to the peer. Keeping this here for reference + // and possible future evaluation + // + // body.into_message() + // .map(|msg| match msg { + // Ok(msg) => { + // trace!(target: LOG_TARGET, "Sending body len = {}", msg.len()); + // let mut flags = RpcMessageFlags::empty(); + // if msg.is_finished() { + // flags |= RpcMessageFlags::FIN; + // } + // proto::rpc::RpcResponse { + // request_id, + // status: RpcStatus::ok().as_code(), + // flags: flags.bits().into(), + // message: msg.into(), + // } + // }, + // Err(err) => { + // debug!(target: LOG_TARGET, "Body contained an error: {}", err); + // proto::rpc::RpcResponse { + // request_id, + // status: err.as_code(), + // flags: RpcMessageFlags::FIN.bits().into(), + // message: err.details().as_bytes().to_vec(), + // } + // }, + // }) + // .map(|resp| Ok(resp.to_encoded_bytes().into())) + // .forward(PreventClose::new(sink)) + // .await?; + let mut message = body.into_message(); loop { - match time::timeout(deadline, message.next()).await { + let msg_read = log_timing(request_id, "message read", message.next()); + match time::timeout(deadline, msg_read).await { Ok(Some(msg)) => { let resp = match msg { Ok(msg) => { @@ -507,7 +573,10 @@ where }, }; - if !send_response_checked(sink, request_id, resp).await? { + let is_valid = + log_timing(request_id, "transmit", self.send_response(request_id, resp)).await?; + + if !is_valid { break; } }, @@ -521,7 +590,7 @@ where break; }, } - } + } // end loop }, Err(err) => { debug!(target: LOG_TARGET, "Service returned an error: {}", err); @@ -532,50 +601,64 @@ where message: err.details_bytes(), }; - sink.send(resp.to_encoded_bytes().into()).await?; + self.framed.send(resp.to_encoded_bytes().into()).await?; }, } Ok(()) } -} -/// Sends an RpcResponse on the given Sink. If the size of the message exceeds the RPC_MAX_FRAME_SIZE, an error is -/// returned to the client and false is returned from this function, otherwise the message is sent and true is returned -#[inline] -async fn send_response_checked( - sink: &mut S, - request_id: u32, - resp: proto::rpc::RpcResponse, -) -> Result -where - S: Sink + Unpin, -{ - match resp.to_encoded_bytes() { - buf if buf.len() > RPC_MAX_FRAME_SIZE => { - let msg = format!( - "This node tried to return a message that exceeds the maximum frame size. Max = {:.4} MiB, Got = \ - {:.4} MiB", - RPC_MAX_FRAME_SIZE as f32 / (1024.0 * 1024.0), - buf.len() as f32 / (1024.0 * 1024.0) - ); - warn!(target: LOG_TARGET, "{}", msg); - sink.send( - proto::rpc::RpcResponse { - request_id, - status: RpcStatusCode::MalformedResponse as u32, - flags: RpcMessageFlags::FIN.bits().into(), - message: msg.as_bytes().to_vec(), - } - .to_encoded_bytes() - .into(), - ) - .await?; - Ok(false) - }, - buf => { - sink.send(buf.into()).await?; - Ok(true) - }, + /// Sends an RpcResponse on the given Sink. If the size of the message exceeds the RPC_MAX_FRAME_SIZE, an error is + /// returned to the client and false is returned from this function, otherwise the message is sent and true is + /// returned + async fn send_response(&mut self, request_id: u32, resp: proto::rpc::RpcResponse) -> Result { + match resp.to_encoded_bytes() { + buf if buf.len() > RPC_MAX_FRAME_SIZE => { + let msg = format!( + "This node tried to return a message that exceeds the maximum frame size. Max = {:.4} MiB, Got = \ + {:.4} MiB", + RPC_MAX_FRAME_SIZE as f32 / (1024.0 * 1024.0), + buf.len() as f32 / (1024.0 * 1024.0) + ); + warn!(target: LOG_TARGET, "{}", msg); + self.framed + .send( + proto::rpc::RpcResponse { + request_id, + status: RpcStatusCode::MalformedResponse as u32, + flags: RpcMessageFlags::FIN.bits().into(), + message: msg.as_bytes().to_vec(), + } + .to_encoded_bytes() + .into(), + ) + .await?; + Ok(false) + }, + buf => { + self.framed.send(buf.into()).await?; + Ok(true) + }, + } + } + + fn create_request_context(&self, request_id: u32) -> RequestContext { + RequestContext::new(request_id, self.node_id.clone(), Box::new(self.comms_provider.clone())) } } + +async fn log_timing>(request_id: u32, tag: &str, fut: F) -> R { + let t = Instant::now(); + let span = span!(Level::TRACE, "rpc::internal::timing::{}::{}", request_id, tag); + let ret = fut.instrument(span).await; + let elapsed = t.elapsed(); + trace!( + target: LOG_TARGET, + "RPC TIMING(REQ_ID={}): '{}' took {:.2}s{}", + request_id, + tag, + elapsed.as_secs_f32(), + if elapsed.as_secs() >= 5 { " (SLOW)" } else { "" } + ); + ret +} diff --git a/comms/src/protocol/rpc/server/router.rs b/comms/src/protocol/rpc/server/router.rs index 1e15434b69..9d03c6535d 100644 --- a/comms/src/protocol/rpc/server/router.rs +++ b/comms/src/protocol/rpc/server/router.rs @@ -208,7 +208,7 @@ where >>::Future: Send + 'static, { fn install(self: Box, context: &mut ProtocolExtensionContext) -> Result<(), ProtocolExtensionError> { - let (proto_notif_tx, proto_notif_rx) = mpsc::channel(10); + let (proto_notif_tx, proto_notif_rx) = mpsc::channel(20); context.add_protocol(&self.protocol_names, proto_notif_tx); let rpc_context = RpcCommsBackend::new(context.peer_manager(), context.connectivity()); task::spawn(self.serve(proto_notif_rx, rpc_context)); diff --git a/comms/src/protocol/rpc/status.rs b/comms/src/protocol/rpc/status.rs index ffa4fa839f..e0ddf7fe22 100644 --- a/comms/src/protocol/rpc/status.rs +++ b/comms/src/protocol/rpc/status.rs @@ -36,21 +36,21 @@ pub struct RpcStatus { impl RpcStatus { pub fn ok() -> Self { - RpcStatus { + Self { code: RpcStatusCode::Ok, details: Default::default(), } } pub fn unsupported_method(details: T) -> Self { - RpcStatus { + Self { code: RpcStatusCode::UnsupportedMethod, details: details.to_string(), } } pub fn not_implemented(details: T) -> Self { - RpcStatus { + Self { code: RpcStatusCode::NotImplemented, details: details.to_string(), } @@ -99,6 +99,13 @@ impl RpcStatus { } } + pub(super) fn protocol_error(details: T) -> Self { + Self { + code: RpcStatusCode::ProtocolError, + details: details.to_string(), + } + } + pub fn as_code(&self) -> u32 { self.code as u32 } @@ -177,6 +184,8 @@ pub enum RpcStatusCode { General = 6, /// Entity not found NotFound = 7, + /// RPC protocol error + ProtocolError = 8, // The following status represents anything that is not recognised (i.e not one of the above codes). /// Unrecognised RPC status code InvalidRpcStatusCode, diff --git a/comms/src/protocol/rpc/test/client_pool.rs b/comms/src/protocol/rpc/test/client_pool.rs new file mode 100644 index 0000000000..e1eb957d5f --- /dev/null +++ b/comms/src/protocol/rpc/test/client_pool.rs @@ -0,0 +1,173 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + connection_manager::PeerConnection, + protocol::{ + rpc::{ + test::{ + greeting_service::{GreetingClient, GreetingServer, GreetingService}, + mock::create_mocked_rpc_context, + }, + NamedProtocolService, + RpcServer, + }, + ProtocolEvent, + ProtocolId, + ProtocolNotification, + }, + runtime, + runtime::task, + test_utils::mocks::{new_peer_connection_mock_pair, PeerConnectionMockState}, +}; +use futures::{channel::mpsc, SinkExt}; +use tari_shutdown::Shutdown; +use tari_test_utils::{async_assert_eventually, unpack_enum}; + +async fn setup(num_concurrent_sessions: usize) -> (PeerConnection, PeerConnectionMockState, Shutdown) { + let (conn1, conn1_state, conn2, conn2_state) = new_peer_connection_mock_pair().await; + let (mut notif_tx, notif_rx) = mpsc::channel(1); + let shutdown = Shutdown::new(); + let (context, _) = create_mocked_rpc_context(); + + task::spawn( + RpcServer::builder() + .with_maximum_simultaneous_sessions(num_concurrent_sessions) + .finish() + .add_service(GreetingServer::new(GreetingService::default())) + .serve(notif_rx, context), + ); + + task::spawn(async move { + while let Some(stream) = conn2_state.next_incoming_substream().await { + notif_tx + .send(ProtocolNotification::new( + ProtocolId::from_static(&GreetingClient::PROTOCOL_NAME), + ProtocolEvent::NewInboundSubstream(conn2.peer_node_id().clone(), stream), + )) + .await + .unwrap(); + } + }); + + (conn1, conn1_state, shutdown) +} + +mod lazy_pool { + use super::*; + use crate::protocol::rpc::client_pool::{LazyPool, RpcClientPoolError}; + + #[runtime::test] + async fn it_connects_lazily() { + let (conn, mock_state, _shutdown) = setup(2).await; + let mut pool = LazyPool::::new(conn, 2, Default::default()); + assert_eq!(mock_state.num_open_substreams(), 0); + let _conn1 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 1); + let _conn2 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 2); + } + + #[runtime::test] + async fn it_reuses_unused_connections() { + let (conn, mock_state, _shutdown) = setup(2).await; + let mut pool = LazyPool::::new(conn, 2, Default::default()); + let _ = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(pool.refresh_num_active_connections(), 1); + async_assert_eventually!(mock_state.num_open_substreams(), expect = 1); + let _ = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(pool.refresh_num_active_connections(), 1); + async_assert_eventually!(mock_state.num_open_substreams(), expect = 1); + } + + #[runtime::test] + async fn it_reuses_least_used_connections() { + let (conn, mock_state, _shutdown) = setup(2).await; + let mut pool = LazyPool::::new(conn, 2, Default::default()); + let conn1 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 1); + let conn2 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 2); + let conn3 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(conn3.lease_count(), 2); + assert!((conn1.lease_count() == 1) ^ (conn2.lease_count() == 1)); + assert_eq!(mock_state.num_open_substreams(), 2); + let conn4 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(conn4.lease_count(), 2); + assert_eq!(mock_state.num_open_substreams(), 2); + + assert_eq!(conn1.lease_count(), 2); + assert_eq!(conn2.lease_count(), 2); + assert_eq!(conn3.lease_count(), 2); + } + + #[runtime::test] + async fn it_reuses_used_connections_if_necessary() { + let (conn, mock_state, _shutdown) = setup(2).await; + let mut pool = LazyPool::::new(conn, 1, Default::default()); + let conn1 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 1); + let conn2 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 1); + drop(conn1); + drop(conn2); + } + + #[runtime::test] + async fn it_gracefully_handles_insufficient_server_sessions() { + let (conn, mock_state, _shutdown) = setup(1).await; + let mut pool = LazyPool::::new(conn, 2, Default::default()); + let conn1 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 1); + let conn2 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 1); + assert_eq!(conn1.lease_count(), 2); + assert_eq!(conn2.lease_count(), 2); + } + + #[runtime::test] + async fn it_prunes_disconnected_sessions() { + let (conn, mock_state, _shutdown) = setup(2).await; + let mut pool = LazyPool::::new(conn, 2, Default::default()); + let mut conn1 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 1); + let _conn2 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(mock_state.num_open_substreams(), 2); + conn1.close(); + drop(conn1); + async_assert_eventually!(mock_state.num_open_substreams(), expect = 1); + assert_eq!(pool.refresh_num_active_connections(), 1); + let _conn3 = pool.get_least_used_or_connect().await.unwrap(); + assert_eq!(pool.refresh_num_active_connections(), 2); + assert_eq!(mock_state.num_open_substreams(), 2); + } + + #[runtime::test] + async fn it_fails_when_peer_connected_disconnects() { + let (mut peer_conn, _, _shutdown) = setup(2).await; + let mut pool = LazyPool::::new(peer_conn.clone(), 2, Default::default()); + let mut _conn1 = pool.get_least_used_or_connect().await.unwrap(); + peer_conn.disconnect().await.unwrap(); + let err = pool.get_least_used_or_connect().await.unwrap_err(); + unpack_enum!(RpcClientPoolError::PeerConnectionDropped { .. } = err); + } +} diff --git a/comms/src/protocol/rpc/test/greeting_service.rs b/comms/src/protocol/rpc/test/greeting_service.rs new file mode 100644 index 0000000000..0e190473dd --- /dev/null +++ b/comms/src/protocol/rpc/test/greeting_service.rs @@ -0,0 +1,394 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use crate::{ + async_trait, + protocol::{ + rpc::{NamedProtocolService, Request, Response, RpcError, RpcServerError, RpcStatus, Streaming}, + ProtocolId, + }, +}; +use core::iter; +use futures::{channel::mpsc, stream, SinkExt, StreamExt}; +use std::{sync::Arc, time::Duration}; +use tari_crypto::tari_utilities::hex::Hex; +use tokio::{sync::RwLock, task, time}; + +#[async_trait] +// #[tari_rpc(protocol_name = "/tari/greeting/1.0", server_struct = GreetingServer, client_struct = GreetingClient)] +pub trait GreetingRpc: Send + Sync + 'static { + // #[rpc(method = 1)] + async fn say_hello(&self, request: Request) -> Result, RpcStatus>; + // #[rpc(method = 2)] + async fn return_error(&self, request: Request<()>) -> Result, RpcStatus>; + // #[rpc(method = 3)] + async fn get_greetings(&self, request: Request) -> Result, RpcStatus>; + // #[rpc(method = 4)] + async fn streaming_error(&self, request: Request) -> Result, RpcStatus>; + // #[rpc(method = 5)] + async fn streaming_error2(&self, _: Request<()>) -> Result, RpcStatus>; + // #[rpc(method = 6)] + async fn get_public_key_hex(&self, _: Request<()>) -> Result; + // #[rpc(method = 7)] + async fn reply_with_msg_of_size(&self, request: Request) -> Result, RpcStatus>; +} + +pub struct GreetingService { + greetings: Vec, +} + +impl GreetingService { + pub const DEFAULT_GREETINGS: &'static [&'static str] = + &["Sawubona", "Jambo", "Bonjour", "Hello", "Molo", "Olá", "سلام", "你好"]; + + pub fn new(greetings: &[&str]) -> Self { + Self { + greetings: greetings.iter().map(ToString::to_string).collect(), + } + } +} + +impl Default for GreetingService { + fn default() -> Self { + Self::new(Self::DEFAULT_GREETINGS) + } +} + +#[async_trait] +impl GreetingRpc for GreetingService { + async fn say_hello(&self, request: Request) -> Result, RpcStatus> { + let msg = request.message(); + let greeting = self + .greetings + .get(msg.language as usize) + .ok_or_else(|| RpcStatus::bad_request(format!("{} is not a valid language identifier", msg.language)))?; + + let greeting = format!("{} {}", greeting, msg.name); + Ok(Response::new(SayHelloResponse { greeting })) + } + + async fn return_error(&self, _: Request<()>) -> Result, RpcStatus> { + Err(RpcStatus::not_implemented("I haven't gotten to this yet :(")) + } + + async fn get_greetings(&self, request: Request) -> Result, RpcStatus> { + let (mut tx, rx) = mpsc::channel(1); + let num = *request.message(); + let greetings = self.greetings[..num as usize].to_vec(); + task::spawn(async move { + let iter = greetings.into_iter().map(Ok); + let mut stream = stream::iter(iter) + // "Extra" Result::Ok is to satisfy send_all + .map(Ok); + match tx.send_all(&mut stream).await { + Ok(_) => {}, + Err(_err) => { + // Log error + }, + } + }); + + Ok(Streaming::new(rx)) + } + + async fn streaming_error(&self, request: Request) -> Result, RpcStatus> { + Err(RpcStatus::bad_request(format!( + "What does '{}' mean?", + request.message() + ))) + } + + async fn streaming_error2(&self, _: Request<()>) -> Result, RpcStatus> { + let (mut tx, rx) = mpsc::channel(2); + tx.send(Ok("This is ok".to_string())).await.unwrap(); + tx.send(Err(RpcStatus::bad_request("This is a problem"))).await.unwrap(); + + Ok(Streaming::new(rx)) + } + + async fn get_public_key_hex(&self, req: Request<()>) -> Result { + let context = req.context(); + let peer = context.fetch_peer().await?; + Ok(peer.public_key.to_hex()) + } + + async fn reply_with_msg_of_size(&self, request: Request) -> Result, RpcStatus> { + let size = request.into_message() as usize; + Ok(iter::repeat(0).take(size).collect()) + } +} + +pub struct SlowGreetingService { + delay: Arc>, +} + +impl SlowGreetingService { + pub fn new(delay: Arc>) -> Self { + Self { delay } + } +} + +#[async_trait] +impl GreetingRpc for SlowGreetingService { + async fn say_hello(&self, _: Request) -> Result, RpcStatus> { + let delay = *self.delay.read().await; + time::delay_for(delay).await; + Ok(Response::new(SayHelloResponse { + greeting: "took a while to load".to_string(), + })) + } + + async fn return_error(&self, _: Request<()>) -> Result, RpcStatus> { + unimplemented!() + } + + async fn get_greetings(&self, _: Request) -> Result, RpcStatus> { + unimplemented!() + } + + async fn streaming_error(&self, _: Request) -> Result, RpcStatus> { + unimplemented!() + } + + async fn streaming_error2(&self, _: Request<()>) -> Result, RpcStatus> { + unimplemented!() + } + + async fn get_public_key_hex(&self, _: Request<()>) -> Result { + unimplemented!() + } + + async fn reply_with_msg_of_size(&self, _: Request) -> Result, RpcStatus> { + unimplemented!() + } +} + +#[derive(prost::Message)] +pub struct SayHelloRequest { + #[prost(string, tag = "1")] + pub name: String, + #[prost(uint32, tag = "2")] + pub language: u32, +} + +#[derive(prost::Message)] +pub struct SayHelloResponse { + #[prost(string, tag = "1")] + pub greeting: String, +} + +// This is approximately what is generated from the #[tari_rpc(...)] macro. +mod __rpc_deps { + pub use crate::protocol::rpc::__macro_reexports::*; +} + +pub struct GreetingServer { + inner: Arc, +} + +impl GreetingServer { + pub fn new(service: T) -> Self { + Self { + inner: Arc::new(service), + } + } +} + +impl __rpc_deps::Service> for GreetingServer { + type Error = RpcStatus; + type Future = __rpc_deps::BoxFuture<'static, Result, RpcStatus>>; + type Response = Response<__rpc_deps::Body>; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> std::task::Poll> { + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request<__rpc_deps::Bytes>) -> Self::Future { + use __rpc_deps::IntoBody; + let inner = self.inner.clone(); + match req.method().id() { + // say_hello + 1 => { + let fut = async move { + let resp = inner.say_hello(req.decode()?).await?; + Ok(resp.map(IntoBody::into_body)) + }; + Box::pin(fut) + }, + // return_error + 2 => { + let fut = async move { + let resp = inner.return_error(req.decode()?).await?; + Ok(resp.map(IntoBody::into_body)) + }; + Box::pin(fut) + }, + // get_greetings + 3 => { + let fut = async move { + let resp = inner.get_greetings(req.decode()?).await?; + Ok(Response::new(resp.into_body())) + }; + Box::pin(fut) + }, + // streaming_error + 4 => { + let fut = async move { + let resp = inner.streaming_error(req.decode()?).await?; + Ok(Response::new(resp.into_body())) + }; + Box::pin(fut) + }, + // streaming_error2 + 5 => { + let fut = async move { + let resp = inner.streaming_error2(req.decode()?).await?; + Ok(Response::new(resp.into_body())) + }; + Box::pin(fut) + }, + // get_public_key_hex + 6 => { + let fut = async move { + let resp = inner.get_public_key_hex(req.decode()?).await?; + Ok(Response::new(resp.into_body())) + }; + Box::pin(fut) + }, + // reply_with_msg_of_size + 7 => { + let fut = async move { + let resp = inner.reply_with_msg_of_size(req.decode()?).await?; + Ok(Response::new(resp.into_body())) + }; + Box::pin(fut) + }, + + id => Box::pin(__rpc_deps::future::ready(Err(RpcStatus::unsupported_method(format!( + "Method identifier `{}` is not recognised or supported", + id + ))))), + } + } +} + +impl Clone for GreetingServer { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl __rpc_deps::NamedProtocolService for GreetingServer { + const PROTOCOL_NAME: &'static [u8] = b"/test/greeting/1.0"; +} + +/// A service maker for GreetingServer +impl __rpc_deps::Service for GreetingServer +where T: GreetingRpc +{ + type Error = RpcServerError; + type Future = __rpc_deps::future::Ready>; + type Response = Self; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> std::task::Poll> { + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, _: ProtocolId) -> Self::Future { + __rpc_deps::future::ready(Ok(self.clone())) + } +} + +#[derive(Debug, Clone)] +pub struct GreetingClient { + inner: __rpc_deps::RpcClient, +} + +impl __rpc_deps::NamedProtocolService for GreetingClient { + const PROTOCOL_NAME: &'static [u8] = b"/test/greeting/1.0"; +} + +impl GreetingClient { + pub async fn connect(framed: __rpc_deps::CanonicalFraming) -> Result + where TSubstream: __rpc_deps::AsyncRead + __rpc_deps::AsyncWrite + Unpin + Send + 'static { + let inner = __rpc_deps::RpcClient::connect(Default::default(), framed, Self::PROTOCOL_NAME.into()).await?; + Ok(Self { inner }) + } + + pub fn builder() -> __rpc_deps::RpcClientBuilder { + __rpc_deps::RpcClientBuilder::new() + } + + pub async fn say_hello(&mut self, request: SayHelloRequest) -> Result { + self.inner.request_response(request, 1).await + } + + pub async fn return_error(&mut self) -> Result<(), RpcError> { + self.inner.request_response((), 2).await + } + + pub async fn get_greetings(&mut self, request: u32) -> Result<__rpc_deps::ClientStreaming, RpcError> { + self.inner.server_streaming(request, 3).await + } + + pub async fn streaming_error(&mut self, request: String) -> Result<__rpc_deps::ClientStreaming, RpcError> { + self.inner.server_streaming(request, 4).await + } + + pub async fn streaming_error2(&mut self) -> Result<__rpc_deps::ClientStreaming, RpcError> { + self.inner.server_streaming((), 5).await + } + + pub async fn get_public_key_hex(&mut self) -> Result { + self.inner.request_response((), 6).await + } + + pub async fn reply_with_msg_of_size(&mut self, request: u64) -> Result { + self.inner.request_response(request, 7).await + } + + pub async fn get_last_request_latency(&mut self) -> Result, RpcError> { + self.inner.get_last_request_latency().await + } + + pub async fn ping(&mut self) -> Result { + self.inner.ping().await + } + + pub fn close(&mut self) { + self.inner.close(); + } +} + +impl From<__rpc_deps::RpcClient> for GreetingClient { + fn from(inner: __rpc_deps::RpcClient) -> Self { + Self { inner } + } +} + +impl __rpc_deps::RpcPoolClient for GreetingClient { + fn is_connected(&self) -> bool { + self.inner.is_connected() + } +} diff --git a/comms/src/protocol/rpc/test/handshake.rs b/comms/src/protocol/rpc/test/handshake.rs index 29a053d0f9..cdd79746f2 100644 --- a/comms/src/protocol/rpc/test/handshake.rs +++ b/comms/src/protocol/rpc/test/handshake.rs @@ -29,9 +29,9 @@ use crate::{ Handshake, }, runtime, + runtime::task, }; use tari_test_utils::unpack_enum; -use tokio::task; #[runtime::test_basic] async fn it_performs_the_handshake() { diff --git a/comms/src/protocol/rpc/test.rs b/comms/src/protocol/rpc/test/mod.rs similarity index 95% rename from comms/src/protocol/rpc/test.rs rename to comms/src/protocol/rpc/test/mod.rs index efd3990150..873db4c044 100644 --- a/comms/src/protocol/rpc/test.rs +++ b/comms/src/protocol/rpc/test/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2020, The Tari Project +// Copyright 2021, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: @@ -20,7 +20,9 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +mod client_pool; mod comms_integration; +mod greeting_service; mod handshake; mod mock; mod smoke; diff --git a/comms/src/protocol/rpc/test/smoke.rs b/comms/src/protocol/rpc/test/smoke.rs index fb8c4c9d65..a762ac4c9c 100644 --- a/comms/src/protocol/rpc/test/smoke.rs +++ b/comms/src/protocol/rpc/test/smoke.rs @@ -25,17 +25,22 @@ use crate::{ memsocket::MemorySocket, protocol::{ rpc::{ - body::Streaming, context::RpcCommsBackend, error::HandshakeRejectReason, handshake::RpcHandshakeError, - message::Request, - server::RpcServerError, - test::mock::create_mocked_rpc_context, - Response, + test::{ + greeting_service::{ + GreetingClient, + GreetingRpc, + GreetingServer, + GreetingService, + SayHelloRequest, + SlowGreetingService, + }, + mock::create_mocked_rpc_context, + }, RpcError, RpcServer, - RpcStatus, RpcStatusCode, RPC_MAX_FRAME_SIZE, }, @@ -44,71 +49,55 @@ use crate::{ ProtocolNotification, }, runtime, - runtime::task, test_utils::node_identity::build_node_identity, NodeIdentity, }; -use async_trait::async_trait; -use futures::{channel::mpsc, stream, SinkExt, StreamExt}; -use std::{iter, sync::Arc, time::Duration}; +use futures::{channel::mpsc, future, future::Either, SinkExt, StreamExt}; +use std::{sync::Arc, time::Duration}; use tari_crypto::tari_utilities::hex::Hex; use tari_shutdown::Shutdown; use tari_test_utils::unpack_enum; -use tokio::{sync::RwLock, time}; - -#[async_trait] -// #[tari_rpc(protocol_name = "/tari/greeting/1.0", server_struct = GreetingServer, client_struct = GreetingClient)] -pub trait GreetingRpc: Send + Sync + 'static { - // #[rpc(method = 1)] - async fn say_hello(&self, request: Request) -> Result, RpcStatus>; - // #[rpc(method = 2)] - async fn return_error(&self, request: Request<()>) -> Result, RpcStatus>; - // #[rpc(method = 3)] - async fn get_greetings(&self, request: Request) -> Result, RpcStatus>; - // #[rpc(method = 4)] - async fn streaming_error(&self, request: Request) -> Result, RpcStatus>; - // #[rpc(method = 5)] - async fn streaming_error2(&self, _: Request<()>) -> Result, RpcStatus>; - // #[rpc(method = 6)] - async fn get_public_key_hex(&self, _: Request<()>) -> Result; - // #[rpc(method = 7)] - async fn reply_with_msg_of_size(&self, request: Request) -> Result, RpcStatus>; -} +use tokio::{sync::RwLock, task}; -async fn setup_service( - service: T, +pub(super) async fn setup_service( + service_impl: T, num_concurrent_sessions: usize, ) -> ( mpsc::Sender>, - task::JoinHandle>, + task::JoinHandle<()>, RpcCommsBackend, Shutdown, ) { let (notif_tx, notif_rx) = mpsc::channel(1); let shutdown = Shutdown::new(); let (context, _) = create_mocked_rpc_context(); - let server_hnd = task::spawn( - RpcServer::builder() - .with_maximum_simultaneous_sessions(num_concurrent_sessions) - .with_minimum_client_deadline(Duration::from_secs(0)) - .with_shutdown_signal(shutdown.to_signal()) - .finish() - .add_service(GreetingServer::new(service)) - .serve(notif_rx, context.clone()), - ); + let server_hnd = task::spawn({ + let context = context.clone(); + let shutdown_signal = shutdown.to_signal(); + async move { + let fut = RpcServer::builder() + .with_maximum_simultaneous_sessions(num_concurrent_sessions) + .with_minimum_client_deadline(Duration::from_secs(0)) + .finish() + .add_service(GreetingServer::new(service_impl)) + .serve(notif_rx, context); + + futures::pin_mut!(fut); + + match future::select(shutdown_signal, fut).await { + Either::Left((r, _)) => r.unwrap(), + Either::Right((r, _)) => r.unwrap(), + } + } + }); (notif_tx, server_hnd, context, shutdown) } -async fn setup( - service: T, +pub(super) async fn setup( + service_impl: T, num_concurrent_sessions: usize, -) -> ( - MemorySocket, - task::JoinHandle>, - Arc, - Shutdown, -) { - let (mut notif_tx, server_hnd, context, shutdown) = setup_service(service, num_concurrent_sessions).await; +) -> (MemorySocket, task::JoinHandle<()>, Arc, Shutdown) { + let (mut notif_tx, server_hnd, context, shutdown) = setup_service(service_impl, num_concurrent_sessions).await; let (inbound, outbound) = MemorySocket::new_pair(); let node_identity = build_node_identity(Default::default()); @@ -126,10 +115,8 @@ async fn setup( } #[runtime::test_basic] -async fn request_reponse_errors_and_streaming() // a.k.a smoke test -{ - let greetings = &["Sawubona", "Jambo", "Bonjour", "Hello", "Molo", "Olá"]; - let (socket, server_hnd, node_identity, mut shutdown) = setup(GreetingService::new(greetings), 1).await; +async fn request_response_errors_and_streaming() { + let (socket, server_hnd, node_identity, mut shutdown) = setup(GreetingService::default(), 1).await; let framed = framing::canonical(socket, 1024); let mut client = GreetingClient::builder() @@ -197,7 +184,47 @@ async fn request_reponse_errors_and_streaming() // a.k.a smoke test unpack_enum!(RpcError::ClientClosed = err); shutdown.trigger().unwrap(); - server_hnd.await.unwrap().unwrap(); + server_hnd.await.unwrap(); +} + +#[runtime::test_basic] +async fn concurrent_requests() { + let (socket, _, _, _shutdown) = setup(GreetingService::default(), 1).await; + + let framed = framing::canonical(socket, 1024); + let mut client = GreetingClient::builder() + .with_deadline(Duration::from_secs(5)) + .connect(framed) + .await + .unwrap(); + + let mut cloned_client = client.clone(); + let spawned1 = task::spawn(async move { + let resp = cloned_client + .say_hello(SayHelloRequest { + name: "Madeupington".to_string(), + language: 2, + }) + .await + .unwrap(); + resp + }); + let mut cloned_client = client.clone(); + let spawned2 = task::spawn(async move { + let resp = cloned_client.get_greetings(5).await.unwrap().collect::>().await; + resp.into_iter().map(Result::unwrap).collect::>() + }); + let resp = client + .say_hello(SayHelloRequest { + name: "Yathvan".to_string(), + language: 1, + }) + .await + .unwrap(); + assert_eq!(resp.greeting, "Jambo Yathvan"); + + assert_eq!(spawned1.await.unwrap().greeting, "Bonjour Madeupington"); + assert_eq!(spawned2.await.unwrap(), GreetingService::DEFAULT_GREETINGS[..5]); } #[runtime::test_basic] @@ -222,14 +249,16 @@ async fn response_too_big() { } #[runtime::test_basic] -async fn server_shutdown_after_connect() { - let (socket, _, _, mut shutdown) = setup(GreetingService::new(&[]), 1).await; - let framed = framing::canonical(socket, 1024); - let mut client = GreetingClient::connect(framed).await.unwrap(); - shutdown.trigger().unwrap(); +async fn ping_latency() { + let (socket, _, _, _shutdown) = setup(GreetingService::new(&[]), 1).await; - let err = client.say_hello(Default::default()).await.unwrap_err(); - unpack_enum!(RpcError::RequestCancelled = err); + let framed = framing::canonical(socket, RPC_MAX_FRAME_SIZE); + let mut client = GreetingClient::builder().connect(framed).await.unwrap(); + + let latency = client.ping().await.unwrap(); + // This is plenty (typically would be < 1ms over MemorySocket), however CI can be very slow, so to prevent flakiness + // we leave a wide berth + assert!(latency.as_secs() < 5); } #[runtime::test_basic] @@ -305,327 +334,3 @@ async fn rejected_no_sessions_available() { RpcError::HandshakeError(RpcHandshakeError::Rejected(HandshakeRejectReason::NoSessionsAvailable)) )); } - -//---------------------------------- Greeting Service --------------------------------------------// - -pub struct GreetingService { - greetings: Vec, -} - -impl GreetingService { - pub fn new(greetings: &[&str]) -> Self { - Self { - greetings: greetings.iter().map(ToString::to_string).collect(), - } - } -} - -#[async_trait] -impl GreetingRpc for GreetingService { - async fn say_hello(&self, request: Request) -> Result, RpcStatus> { - let msg = request.message(); - let greeting = self - .greetings - .get(msg.language as usize) - .ok_or_else(|| RpcStatus::bad_request(format!("{} is not a valid language identifier", msg.language)))?; - - let greeting = format!("{} {}", greeting, msg.name); - Ok(Response::new(SayHelloResponse { greeting })) - } - - async fn return_error(&self, _: Request<()>) -> Result, RpcStatus> { - Err(RpcStatus::not_implemented("I haven't gotten to this yet :(")) - } - - async fn get_greetings(&self, request: Request) -> Result, RpcStatus> { - let (mut tx, rx) = mpsc::channel(1); - let num = *request.message(); - let greetings = self.greetings[..num as usize].to_vec(); - task::spawn(async move { - let iter = greetings.into_iter().map(Ok); - let mut stream = stream::iter(iter) - // "Extra" Result::Ok is to satisfy send_all - .map(Ok); - match tx.send_all(&mut stream).await { - Ok(_) => {}, - Err(_err) => { - // Log error - }, - } - }); - - Ok(Streaming::new(rx)) - } - - async fn streaming_error(&self, request: Request) -> Result, RpcStatus> { - Err(RpcStatus::bad_request(format!( - "What does '{}' mean?", - request.message() - ))) - } - - async fn streaming_error2(&self, _: Request<()>) -> Result, RpcStatus> { - let (mut tx, rx) = mpsc::channel(2); - tx.send(Ok("This is ok".to_string())).await.unwrap(); - tx.send(Err(RpcStatus::bad_request("This is a problem"))).await.unwrap(); - - Ok(Streaming::new(rx)) - } - - async fn get_public_key_hex(&self, req: Request<()>) -> Result { - let context = req.context(); - let peer = context.fetch_peer().await?; - Ok(peer.public_key.to_hex()) - } - - async fn reply_with_msg_of_size(&self, request: Request) -> Result, RpcStatus> { - let size = request.into_message() as usize; - Ok(iter::repeat(0).take(size).collect()) - } -} - -pub struct SlowGreetingService { - delay: Arc>, -} - -impl SlowGreetingService { - pub fn new(delay: Arc>) -> Self { - Self { delay } - } -} - -#[async_trait] -impl GreetingRpc for SlowGreetingService { - async fn say_hello(&self, _: Request) -> Result, RpcStatus> { - let delay = *self.delay.read().await; - time::delay_for(delay).await; - Ok(Response::new(SayHelloResponse { - greeting: "took a while to load".to_string(), - })) - } - - async fn return_error(&self, _: Request<()>) -> Result, RpcStatus> { - unimplemented!() - } - - async fn get_greetings(&self, _: Request) -> Result, RpcStatus> { - unimplemented!() - } - - async fn streaming_error(&self, _: Request) -> Result, RpcStatus> { - unimplemented!() - } - - async fn streaming_error2(&self, _: Request<()>) -> Result, RpcStatus> { - unimplemented!() - } - - async fn get_public_key_hex(&self, _: Request<()>) -> Result { - unimplemented!() - } - - async fn reply_with_msg_of_size(&self, _: Request) -> Result, RpcStatus> { - unimplemented!() - } -} - -#[derive(prost::Message)] -pub struct SayHelloRequest { - #[prost(string, tag = "1")] - name: String, - #[prost(uint32, tag = "2")] - language: u32, -} - -#[derive(prost::Message)] -pub struct SayHelloResponse { - #[prost(string, tag = "1")] - greeting: String, -} - -// This is approximately what is generated from the #[tari_rpc(...)] macro. -mod __rpc_deps { - pub use crate::protocol::rpc::__macro_reexports::*; -} - -pub struct GreetingServer { - inner: Arc, -} - -impl GreetingServer { - pub fn new(service: T) -> Self { - Self { - inner: Arc::new(service), - } - } -} - -impl __rpc_deps::Service> for GreetingServer { - type Error = RpcStatus; - type Future = __rpc_deps::BoxFuture<'static, Result, RpcStatus>>; - type Response = Response<__rpc_deps::Body>; - - fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> std::task::Poll> { - std::task::Poll::Ready(Ok(())) - } - - fn call(&mut self, req: Request<__rpc_deps::Bytes>) -> Self::Future { - use __rpc_deps::IntoBody; - let inner = self.inner.clone(); - match req.method().id() { - // say_hello - 1 => { - let fut = async move { - let resp = inner.say_hello(req.decode()?).await?; - Ok(resp.map(IntoBody::into_body)) - }; - Box::pin(fut) - }, - // return_error - 2 => { - let fut = async move { - let resp = inner.return_error(req.decode()?).await?; - Ok(resp.map(IntoBody::into_body)) - }; - Box::pin(fut) - }, - // get_greetings - 3 => { - let fut = async move { - let resp = inner.get_greetings(req.decode()?).await?; - Ok(Response::new(resp.into_body())) - }; - Box::pin(fut) - }, - // streaming_error - 4 => { - let fut = async move { - let resp = inner.streaming_error(req.decode()?).await?; - Ok(Response::new(resp.into_body())) - }; - Box::pin(fut) - }, - // streaming_error2 - 5 => { - let fut = async move { - let resp = inner.streaming_error2(req.decode()?).await?; - Ok(Response::new(resp.into_body())) - }; - Box::pin(fut) - }, - // get_public_key_hex - 6 => { - let fut = async move { - let resp = inner.get_public_key_hex(req.decode()?).await?; - Ok(Response::new(resp.into_body())) - }; - Box::pin(fut) - }, - // reply_with_msg_of_size - 7 => { - let fut = async move { - let resp = inner.reply_with_msg_of_size(req.decode()?).await?; - Ok(Response::new(resp.into_body())) - }; - Box::pin(fut) - }, - - id => Box::pin(__rpc_deps::future::ready(Err(RpcStatus::unsupported_method(format!( - "Method identifier `{}` is not recognised or supported", - id - ))))), - } - } -} - -impl Clone for GreetingServer { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -impl __rpc_deps::NamedProtocolService for GreetingServer { - const PROTOCOL_NAME: &'static [u8] = b"/test/greeting/1.0"; -} - -/// A service maker for GreetingServer -impl __rpc_deps::Service for GreetingServer -where T: GreetingRpc -{ - type Error = RpcServerError; - type Future = __rpc_deps::future::Ready>; - type Response = Self; - - fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> std::task::Poll> { - std::task::Poll::Ready(Ok(())) - } - - fn call(&mut self, _: ProtocolId) -> Self::Future { - __rpc_deps::future::ready(Ok(self.clone())) - } -} - -#[derive(Debug, Clone)] -pub struct GreetingClient { - inner: __rpc_deps::RpcClient, -} - -impl __rpc_deps::NamedProtocolService for GreetingClient { - const PROTOCOL_NAME: &'static [u8] = b"/test/greeting/1.0"; -} - -impl GreetingClient { - pub async fn connect(framed: __rpc_deps::CanonicalFraming) -> Result - where TSubstream: __rpc_deps::AsyncRead + __rpc_deps::AsyncWrite + Unpin + Send + 'static { - let inner = __rpc_deps::RpcClient::connect(Default::default(), framed).await?; - Ok(Self { inner }) - } - - pub fn builder() -> __rpc_deps::RpcClientBuilder { - __rpc_deps::RpcClientBuilder::new() - } - - pub async fn say_hello(&mut self, request: SayHelloRequest) -> Result { - self.inner.request_response(request, 1).await - } - - pub async fn return_error(&mut self) -> Result<(), RpcError> { - self.inner.request_response((), 2).await - } - - pub async fn get_greetings(&mut self, request: u32) -> Result<__rpc_deps::ClientStreaming, RpcError> { - self.inner.server_streaming(request, 3).await - } - - pub async fn streaming_error(&mut self, request: String) -> Result<__rpc_deps::ClientStreaming, RpcError> { - self.inner.server_streaming(request, 4).await - } - - pub async fn streaming_error2(&mut self) -> Result<__rpc_deps::ClientStreaming, RpcError> { - self.inner.server_streaming((), 5).await - } - - pub async fn get_public_key_hex(&mut self) -> Result { - self.inner.request_response((), 6).await - } - - pub async fn reply_with_msg_of_size(&mut self, request: u64) -> Result { - self.inner.request_response(request, 7).await - } - - pub async fn get_last_request_latency(&mut self) -> Result, RpcError> { - self.inner.get_last_request_latency().await - } - - pub fn close(&mut self) { - self.inner.close(); - } -} - -impl From<__rpc_deps::RpcClient> for GreetingClient { - fn from(inner: __rpc_deps::RpcClient) -> Self { - Self { inner } - } -} diff --git a/comms/src/socks/client.rs b/comms/src/socks/client.rs index b9d7dc255d..f155f2f420 100644 --- a/comms/src/socks/client.rs +++ b/comms/src/socks/client.rs @@ -104,6 +104,7 @@ where TSocket: AsyncRead + AsyncWrite + Unpin /// Connects to a address through a SOCKS5 proxy and returns the 'upgraded' socket. This consumes the /// `Socks5Client` as once connected, the socks protocol does not recognise any further commands. + #[tracing::instrument(name = "socks::connect", skip(self), err)] pub async fn connect(mut self, address: &Multiaddr) -> Result<(TSocket, Multiaddr)> { let address = self.execute_command(Command::Connect, address).await?; Ok((self.protocol.socket, address)) @@ -111,6 +112,7 @@ where TSocket: AsyncRead + AsyncWrite + Unpin /// Requests the tor proxy to resolve a DNS address is resolved into an IP address. /// This operation only works with the tor SOCKS proxy. + #[tracing::instrument(name = "socks:tor_resolve", skip(self), err)] pub async fn tor_resolve(&mut self, address: &Multiaddr) -> Result { // Tor resolve does not return the port back let (dns, rest) = multiaddr_split_first(&address); @@ -124,6 +126,7 @@ where TSocket: AsyncRead + AsyncWrite + Unpin /// Requests the tor proxy to reverse resolve an IP address into a DNS address if it is able. /// This operation only works with the tor SOCKS proxy. + #[tracing::instrument(name = "socks::tor_resolve_ptr", skip(self), err)] pub async fn tor_resolve_ptr(&mut self, address: &Multiaddr) -> Result { self.execute_command(Command::TorResolvePtr, address).await } diff --git a/comms/src/test_utils/mocks/connection_manager.rs b/comms/src/test_utils/mocks/connection_manager.rs index 1637074ff7..cc489af60e 100644 --- a/comms/src/test_utils/mocks/connection_manager.rs +++ b/comms/src/test_utils/mocks/connection_manager.rs @@ -131,7 +131,11 @@ impl ConnectionManagerMock { self.state.inc_call_count(); self.state.add_call(format!("{:?}", req)).await; match req { - DialPeer(node_id, reply_tx) => { + DialPeer { + node_id, + reply_tx, + tracing_id: _, + } => { // Send Ok(conn) if we have an active connection, otherwise Err(DialConnectFailedAllAddresses) let _ = reply_tx.send( self.state diff --git a/comms/src/test_utils/mocks/connectivity_manager.rs b/comms/src/test_utils/mocks/connectivity_manager.rs index 56ff7156af..122a60127b 100644 --- a/comms/src/test_utils/mocks/connectivity_manager.rs +++ b/comms/src/test_utils/mocks/connectivity_manager.rs @@ -26,9 +26,14 @@ use crate::{ peer_manager::NodeId, runtime::task, }; -use futures::{channel::mpsc, lock::Mutex, stream::Fuse, StreamExt}; -use std::{collections::HashMap, sync::Arc}; -use tokio::sync::broadcast; +use futures::{ + channel::{mpsc, oneshot}, + lock::Mutex, + stream::Fuse, + StreamExt, +}; +use std::{collections::HashMap, sync::Arc, time::Duration}; +use tokio::{sync::broadcast, time}; pub fn create_connectivity_mock() -> (ConnectivityRequester, ConnectivityManagerMock) { let (tx, rx) = mpsc::channel(10); @@ -41,67 +46,126 @@ pub fn create_connectivity_mock() -> (ConnectivityRequester, ConnectivityManager #[derive(Debug, Clone)] pub struct ConnectivityManagerMockState { - calls: Arc>>, - active_conns: Arc>>, - selected_connections: Arc>>, - managed_peers: Arc>>, + inner: Arc>, event_tx: broadcast::Sender>, - connectivity_status: Arc>, +} + +#[derive(Debug, Default)] +struct State { + calls: Vec, + active_conns: HashMap, + pending_conns: HashMap>>>, + selected_connections: Vec, + managed_peers: Vec, + connectivity_status: ConnectivityStatus, } impl ConnectivityManagerMockState { pub fn new(event_tx: broadcast::Sender>) -> Self { Self { - calls: Arc::new(Mutex::new(Vec::new())), event_tx, - selected_connections: Arc::new(Mutex::new(Vec::new())), - managed_peers: Arc::new(Mutex::new(Vec::new())), - active_conns: Arc::new(Mutex::new(HashMap::new())), - connectivity_status: Arc::new(Mutex::new(ConnectivityStatus::Initializing)), + inner: Default::default(), } } async fn add_call(&self, call_str: String) { - self.calls.lock().await.push(call_str); + self.with_state(|state| state.calls.push(call_str)).await } pub async fn take_calls(&self) -> Vec { - self.calls.lock().await.drain(..).collect() + self.with_state(|state| state.calls.drain(..).collect()).await } pub async fn count_calls_containing(&self, pat: &str) -> usize { - self.calls.lock().await.iter().filter(|s| s.contains(pat)).count() + self.with_state(|state| state.calls.iter().filter(|s| s.contains(pat)).count()) + .await } pub async fn get_selected_connections(&self) -> Vec { - self.selected_connections.lock().await.clone() + self.with_state(|state| state.selected_connections.clone()).await } pub async fn set_selected_connections(&self, conns: Vec) { - *self.selected_connections.lock().await = conns; + self.with_state(|state| { + state.selected_connections = conns; + }) + .await } pub async fn set_connectivity_status(&self, status: ConnectivityStatus) { - *self.connectivity_status.lock().await = status; + self.with_state(|state| { + state.connectivity_status = status; + }) + .await } pub async fn get_managed_peers(&self) -> Vec { - self.managed_peers.lock().await.clone() + self.with_state(|state| state.managed_peers.clone()).await } #[allow(dead_code)] pub async fn call_count(&self) -> usize { - self.calls.lock().await.len() + self.with_state(|state| state.calls.len()).await + } + + pub async fn expect_dial_peer(&self, peer: &NodeId) { + let is_found = self + .with_state(|state| { + let peer_str = peer.to_string(); + state + .calls + .iter() + .any(|s| s.contains("DialPeer") && s.contains(&peer_str)) + }) + .await; + + assert!(is_found, "expected call to dial peer {} but no dial was found", peer); + } + + pub async fn await_call_count(&self, count: usize) { + let mut attempts = 0; + while self.call_count().await < count { + attempts += 1; + assert!( + attempts <= 10, + "expected call count to equal {} within 1 second but it was {}", + count, + self.call_count().await + ); + time::delay_for(Duration::from_millis(100)).await; + } } pub async fn add_active_connection(&self, conn: PeerConnection) { - self.active_conns.lock().await.insert(conn.peer_node_id().clone(), conn); + self.with_state(|state| { + let peer = conn.peer_node_id(); + state.active_conns.insert(peer.clone(), conn.clone()); + if let Some(replies) = state.pending_conns.remove(&peer) { + replies.into_iter().for_each(|reply| { + reply.send(Ok(conn.clone())).unwrap(); + }); + } + }) + .await + } + + pub async fn set_pending_connection(&self, peer: &NodeId) { + self.with_state(|state| { + state.pending_conns.entry(peer.clone()).or_default(); + }) + .await } #[allow(dead_code)] pub fn publish_event(&self, event: ConnectivityEvent) { self.event_tx.send(Arc::new(event)).unwrap(); } + + pub(self) async fn with_state(&self, f: F) -> R + where F: FnOnce(&mut State) -> R { + let mut lock = self.inner.lock().await; + (f)(&mut *lock) + } } pub struct ConnectivityManagerMock { @@ -124,8 +188,10 @@ impl ConnectivityManagerMock { self.state.clone() } - pub fn spawn(self) { + pub fn spawn(self) -> ConnectivityManagerMockState { + let state = self.get_shared_state(); task::spawn(Self::run(self)); + state } pub async fn run(mut self) { @@ -138,53 +204,77 @@ impl ConnectivityManagerMock { use ConnectivityRequest::*; self.state.add_call(format!("{:?}", req)).await; match req { - DialPeer(node_id, reply) => { + DialPeer { + node_id, + reply_tx, + tracing_id: _, + } => { // Send Ok(conn) if we have an active connection, otherwise Err(DialConnectFailedAllAddresses) - let _ = reply.send( - self.state - .active_conns - .lock() - .await - .get(&node_id) - .cloned() - .ok_or(ConnectionManagerError::DialConnectFailedAllAddresses) - .map_err(Into::into), - ); + self.state + .with_state(|state| match state.pending_conns.get_mut(&node_id) { + Some(replies) => { + replies.push(reply_tx); + }, + None => { + let _ = reply_tx.send( + state + .active_conns + .get(&node_id) + .cloned() + .ok_or(ConnectionManagerError::DialConnectFailedAllAddresses) + .map_err(Into::into), + ); + }, + }) + .await; }, GetConnectivityStatus(reply) => { - reply.send(*self.state.connectivity_status.lock().await).unwrap(); + self.state + .with_state(|state| { + reply.send(state.connectivity_status).unwrap(); + }) + .await; }, AddManagedPeers(peers) => { // TODO: we should not have to implement behaviour of the actor in the mock // but should rather have a _good_ way to check the call to the mock // was made with the correct arguments. - let mut lock = self.state.managed_peers.lock().await; - for peer in peers { - if !lock.contains(&peer) { - lock.push(peer.clone()); - } - } + self.state + .with_state(|state| { + let managed_peers = &mut state.managed_peers; + for peer in peers { + if !managed_peers.contains(&peer) { + managed_peers.push(peer.clone()); + } + } + }) + .await }, RemovePeer(node_id) => { - let mut lock = self.state.managed_peers.lock().await; - if let Some(pos) = lock.iter().position(|n| n == &node_id) { - lock.remove(pos); - } + self.state + .with_state(|state| { + if let Some(pos) = state.managed_peers.iter().position(|n| n == &node_id) { + state.managed_peers.remove(pos); + } + }) + .await; }, SelectConnections(_, reply) => { reply.send(Ok(self.state.get_selected_connections().await)).unwrap(); }, GetConnection(node_id, reply) => { - reply - .send(self.state.active_conns.lock().await.get(&node_id).cloned()) - .unwrap(); + self.state + .with_state(|state| { + reply.send(state.active_conns.get(&node_id).cloned()).unwrap(); + }) + .await }, GetAllConnectionStates(_) => unimplemented!(), BanPeer(_, _, _) => {}, GetActiveConnections(reply) => { - reply - .send(self.state.active_conns.lock().await.values().cloned().collect()) - .unwrap(); + self.state + .with_state(|state| reply.send(state.active_conns.values().cloned().collect()).unwrap()) + .await; }, WaitStarted(reply) => reply.send(()).unwrap(), } diff --git a/comms/src/test_utils/mocks/mod.rs b/comms/src/test_utils/mocks/mod.rs index 5c3c888fa7..589fd4bf18 100644 --- a/comms/src/test_utils/mocks/mod.rs +++ b/comms/src/test_utils/mocks/mod.rs @@ -30,6 +30,7 @@ mod peer_connection; pub use peer_connection::{ create_dummy_peer_connection, create_peer_connection_mock_pair, + new_peer_connection_mock_pair, PeerConnectionMock, PeerConnectionMockState, }; diff --git a/comms/src/test_utils/mocks/peer_connection.rs b/comms/src/test_utils/mocks/peer_connection.rs index 6940a0c5e4..13b0c77bbf 100644 --- a/comms/src/test_utils/mocks/peer_connection.rs +++ b/comms/src/test_utils/mocks/peer_connection.rs @@ -32,9 +32,9 @@ use crate::{ multiplexing, multiplexing::{IncomingSubstreams, Substream, SubstreamCounter, Yamux}, peer_manager::{NodeId, Peer, PeerFeatures}, - test_utils::transport, + test_utils::{node_identity::build_node_identity, transport}, }; -use futures::{channel::mpsc, lock::Mutex, stream::Fuse, StreamExt}; +use futures::{channel::mpsc, lock::Mutex, StreamExt}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -58,7 +58,6 @@ pub fn create_dummy_peer_connection(node_id: NodeId) -> (PeerConnection, mpsc::R } pub async fn create_peer_connection_mock_pair( - buf_size: usize, peer1: Peer, peer2: Peer, ) -> ( @@ -68,15 +67,15 @@ pub async fn create_peer_connection_mock_pair( PeerConnectionMockState, ) { let rt_handle = Handle::current(); - let (tx1, rx1) = mpsc::channel(buf_size); - let (tx2, rx2) = mpsc::channel(buf_size); + let (tx1, rx1) = mpsc::channel(1); + let (tx2, rx2) = mpsc::channel(1); let (listen_addr, muxer_in, muxer_out) = transport::build_multiplexed_connections().await; // Start both mocks on current handle - let mock = PeerConnectionMock::new(rx1.fuse(), muxer_in); + let mock = PeerConnectionMock::new(rx1, muxer_in); let mock_state_in = mock.get_shared_state(); rt_handle.spawn(mock.run()); - let mock = PeerConnectionMock::new(rx2.fuse(), muxer_out); + let mock = PeerConnectionMock::new(rx2, muxer_out); let mock_state_out = mock.get_shared_state(); rt_handle.spawn(mock.run()); @@ -104,6 +103,17 @@ pub async fn create_peer_connection_mock_pair( ) } +pub async fn new_peer_connection_mock_pair() -> ( + PeerConnection, + PeerConnectionMockState, + PeerConnection, + PeerConnectionMockState, +) { + let peer1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE).to_peer(); + let peer2 = build_node_identity(PeerFeatures::COMMUNICATION_NODE).to_peer(); + create_peer_connection_mock_pair(peer1, peer2).await +} + #[derive(Debug, Clone)] pub struct PeerConnectionMockState { call_count: Arc, @@ -140,6 +150,10 @@ impl PeerConnectionMockState { self.substream_counter.clone() } + pub fn num_open_substreams(&self) -> usize { + self.substream_counter.get() + } + pub async fn next_incoming_substream(&self) -> Option { self.mux_incoming.lock().await.next().await } @@ -150,12 +164,12 @@ impl PeerConnectionMockState { } pub struct PeerConnectionMock { - receiver: Fuse>, + receiver: mpsc::Receiver, state: PeerConnectionMockState, } impl PeerConnectionMock { - pub fn new(receiver: Fuse>, muxer: Yamux) -> Self { + pub fn new(receiver: mpsc::Receiver, muxer: Yamux) -> Self { Self { receiver, state: PeerConnectionMockState::new(muxer), @@ -176,9 +190,16 @@ impl PeerConnectionMock { use PeerConnectionRequest::*; self.state.inc_call_count(); match req { - OpenSubstream(protocol, reply_tx) => match self.state.open_substream().await { + OpenSubstream { + protocol_id, + reply_tx, + tracing_id: _, + } => match self.state.open_substream().await { Ok(stream) => { - let negotiated_substream = NegotiatedSubstream { protocol, stream }; + let negotiated_substream = NegotiatedSubstream { + protocol: protocol_id, + stream, + }; reply_tx.send(Ok(negotiated_substream)).unwrap(); }, Err(err) => { @@ -186,6 +207,7 @@ impl PeerConnectionMock { }, }, Disconnect(_, reply_tx) => { + self.receiver.close(); reply_tx.send(self.state.disconnect().await).unwrap(); }, } diff --git a/comms/tests/greeting_service.rs b/comms/tests/greeting_service.rs new file mode 100644 index 0000000000..c13ae842e4 --- /dev/null +++ b/comms/tests/greeting_service.rs @@ -0,0 +1,166 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#![allow(dead_code)] +#![cfg(feature = "rpc")] + +use core::iter; +use futures::{channel::mpsc, stream, SinkExt, StreamExt}; +use std::{cmp, time::Duration}; +use tari_comms::{ + async_trait, + protocol::rpc::{Request, Response, RpcStatus, Streaming}, +}; +use tari_comms_rpc_macros::tari_rpc; +use tokio::{task, time}; + +#[tari_rpc(protocol_name = b"t/greeting/1", server_struct = GreetingServer, client_struct = GreetingClient)] +pub trait GreetingRpc: Send + Sync + 'static { + #[rpc(method = 1)] + async fn say_hello(&self, request: Request) -> Result, RpcStatus>; + #[rpc(method = 2)] + async fn get_greetings(&self, request: Request) -> Result, RpcStatus>; + #[rpc(method = 3)] + async fn reply_with_msg_of_size(&self, request: Request) -> Result>, RpcStatus>; + #[rpc(method = 4)] + async fn stream_large_items( + &self, + request: Request, + ) -> Result>, RpcStatus>; + #[rpc(method = 5)] + async fn slow_response(&self, request: Request) -> Result, RpcStatus>; +} + +pub struct GreetingService { + greetings: Vec, +} + +impl GreetingService { + pub const DEFAULT_GREETINGS: &'static [&'static str] = + &["Sawubona", "Jambo", "Bonjour", "Hello", "Molo", "Olá", "سلام", "你好"]; + + pub fn new(greetings: &[&str]) -> Self { + Self { + greetings: greetings.iter().map(ToString::to_string).collect(), + } + } +} + +impl Default for GreetingService { + fn default() -> Self { + Self::new(Self::DEFAULT_GREETINGS) + } +} + +#[async_trait] +impl GreetingRpc for GreetingService { + async fn say_hello(&self, request: Request) -> Result, RpcStatus> { + let msg = request.message(); + let greeting = self + .greetings + .get(msg.language as usize) + .ok_or_else(|| RpcStatus::bad_request(format!("{} is not a valid language identifier", msg.language)))?; + + let greeting = format!("{} {}", greeting, msg.name); + Ok(Response::new(SayHelloResponse { greeting })) + } + + async fn get_greetings(&self, request: Request) -> Result, RpcStatus> { + let num = *request.message(); + let (mut tx, rx) = mpsc::channel(num as usize); + let greetings = self.greetings[..cmp::min(num as usize + 1, self.greetings.len())].to_vec(); + task::spawn(async move { + let iter = greetings.into_iter().map(Ok); + let mut stream = stream::iter(iter) + // "Extra" Result::Ok is to satisfy send_all + .map(Ok); + tx.send_all(&mut stream).await.unwrap(); + }); + + Ok(Streaming::new(rx)) + } + + async fn reply_with_msg_of_size(&self, request: Request) -> Result>, RpcStatus> { + let size = request.into_message() as usize; + Ok(Response::new(iter::repeat(0).take(size).collect())) + } + + async fn stream_large_items( + &self, + request: Request, + ) -> Result>, RpcStatus> { + let req_id = request.context().request_id(); + let StreamLargeItemsRequest { + id, + item_size, + num_items, + } = request.into_message(); + let (mut tx, rx) = mpsc::channel(10); + let t = std::time::Instant::now(); + task::spawn(async move { + let item = iter::repeat(0u8).take(item_size as usize).collect::>(); + for (i, item) in iter::repeat_with(|| Ok(item.clone())) + .take(num_items as usize) + .enumerate() + { + tx.send(item).await.unwrap(); + log::info!( + "[{}] reqid: {} t={:.2?} sent {}/{}", + id, + req_id, + t.elapsed(), + i + 1, + num_items + ); + } + }); + Ok(Streaming::new(rx)) + } + + async fn slow_response(&self, request: Request) -> Result, RpcStatus> { + time::delay_for(Duration::from_secs(request.into_message())).await; + Ok(Response::new(())) + } +} + +#[derive(prost::Message)] +pub struct SayHelloRequest { + #[prost(string, tag = "1")] + pub name: String, + #[prost(uint32, tag = "2")] + pub language: u32, +} + +#[derive(prost::Message)] +pub struct SayHelloResponse { + #[prost(string, tag = "1")] + pub greeting: String, +} + +#[derive(prost::Message)] +pub struct StreamLargeItemsRequest { + #[prost(uint64, tag = "1")] + pub id: u64, + #[prost(uint64, tag = "2")] + pub num_items: u64, + #[prost(uint64, tag = "3")] + pub item_size: u64, +} diff --git a/comms/tests/helpers.rs b/comms/tests/helpers.rs new file mode 100644 index 0000000000..e67f6dd6a4 --- /dev/null +++ b/comms/tests/helpers.rs @@ -0,0 +1,62 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use rand::rngs::OsRng; +use std::sync::Arc; +use tari_comms::{peer_manager::PeerFeatures, types::CommsDatabase, CommsBuilder, NodeIdentity, UnspawnedCommsNode}; +use tari_shutdown::ShutdownSignal; +use tari_storage::{ + lmdb_store::{LMDBBuilder, LMDBConfig}, + LMDBWrapper, +}; +use tari_test_utils::{paths::create_temporary_data_path, random}; + +pub fn create_peer_storage() -> CommsDatabase { + let database_name = random::string(8); + let datastore = LMDBBuilder::new() + .set_path(create_temporary_data_path()) + .set_env_config(LMDBConfig::default()) + .set_max_number_of_databases(1) + .add_database(&database_name, lmdb_zero::db::CREATE) + .build() + .unwrap(); + + let peer_database = datastore.get_handle(&database_name).unwrap(); + LMDBWrapper::new(Arc::new(peer_database)) +} + +pub fn create_comms(signal: ShutdownSignal) -> UnspawnedCommsNode { + let node_identity = Arc::new(NodeIdentity::random( + &mut OsRng, + "/ip4/127.0.0.1/tcp/0".parse().unwrap(), + PeerFeatures::COMMUNICATION_NODE, + )); + + CommsBuilder::new() + .allow_test_addresses() + .with_listener_address("/ip4/127.0.0.1/tcp/0".parse().unwrap()) + .with_node_identity(node_identity) + .with_peer_storage(create_peer_storage(), None) + .with_shutdown_signal(signal) + .build() + .unwrap() +} diff --git a/comms/tests/rpc_stress.rs b/comms/tests/rpc_stress.rs new file mode 100644 index 0000000000..0376a7dddc --- /dev/null +++ b/comms/tests/rpc_stress.rs @@ -0,0 +1,282 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#![cfg(feature = "rpc")] + +// Run as normal, --nocapture for some extra output +// cargo test --package tari_comms --test rpc_stress run --all-features --release -- --exact --nocapture + +mod greeting_service; +use greeting_service::{GreetingClient, GreetingServer, GreetingService, StreamLargeItemsRequest}; + +mod helpers; +use helpers::create_comms; + +use futures::{future, StreamExt}; +use std::{future::Future, time::Duration}; +use tari_comms::{ + protocol::rpc::{RpcClientBuilder, RpcServer}, + transports::TcpTransport, + CommsNode, +}; +use tari_shutdown::{Shutdown, ShutdownSignal}; +use tokio::{task, time::Instant}; + +pub async fn spawn_node(signal: ShutdownSignal) -> CommsNode { + let rpc_server = RpcServer::builder() + .with_unlimited_simultaneous_sessions() + .finish() + .add_service(GreetingServer::new(GreetingService::default())); + + let comms = create_comms(signal) + .add_rpc_server(rpc_server) + .spawn_with_transport(TcpTransport::new()) + .await + .unwrap(); + + comms + .node_identity() + .set_public_address(comms.listening_address().clone()); + comms +} + +async fn run_stress_test(test_params: Params) { + let Params { + num_tasks, + num_concurrent_sessions, + deadline, + payload_size, + num_items, + } = test_params; + + let time = Instant::now(); + assert!( + num_tasks >= num_concurrent_sessions, + "concurrent tasks must be more than concurrent sessions, otherwise the (lazy) pool wont make the given number \ + of sessions" + ); + println!( + "RPC stress test will transfer a total of {} MiB of data", + (num_tasks * payload_size * num_items) / (1024 * 1024) + ); + log::info!( + "RPC stress test will transfer a total of {} MiB of data", + (num_tasks * payload_size * num_items) / (1024 * 1024) + ); + + let shutdown = Shutdown::new(); + let node1 = spawn_node(shutdown.to_signal()).await; + let node2 = spawn_node(shutdown.to_signal()).await; + + node1 + .peer_manager() + .add_peer(node2.node_identity().to_peer()) + .await + .unwrap(); + + let conn1_2 = node1 + .connectivity() + .dial_peer(node2.node_identity().node_id().clone()) + .await + .unwrap(); + + let client_pool = conn1_2.create_rpc_client_pool::( + num_concurrent_sessions, + RpcClientBuilder::new().with_deadline(deadline), + ); + + let mut tasks = Vec::with_capacity(num_tasks); + for i in 0..num_tasks { + let pool = client_pool.clone(); + tasks.push(task::spawn(async move { + let mut client = pool.get().await.unwrap(); + // let mut stream = client + // .get_greetings(GreetingService::DEFAULT_GREETINGS.len() as u32) + // .await + // .unwrap(); + // let mut count = 0; + // while let Some(Ok(_)) = stream.next().await { + // count += 1; + // } + // assert_eq!(count, GreetingService::DEFAULT_GREETINGS.len()); + + // let err = client.slow_response(5).await.unwrap_err(); + // unpack_enum!(RpcError::RequestFailed(err) = err); + // assert_eq!(err.status_code(), RpcStatusCode::Timeout); + + // let msg = client.reply_with_msg_of_size(1024).await.unwrap(); + // assert_eq!(msg.len(), 1024); + + let time = std::time::Instant::now(); + log::info!("[{}] start {:.2?}", i, time.elapsed(),); + let mut stream = client + .stream_large_items(StreamLargeItemsRequest { + id: i as u64, + num_items: num_items as u64, + item_size: payload_size as u64, + }) + .await + .unwrap(); + + log::info!("[{}] got stream {:.2?}", i, time.elapsed()); + let mut count = 0; + while let Some(r) = stream.next().await { + count += 1; + log::info!( + "[{}] (count = {}) consuming stream {:.2?} : {}", + i, + count, + time.elapsed(), + r.as_ref().err().map(ToString::to_string).unwrap_or_else(String::new) + ); + + let _ = r.unwrap(); + } + assert_eq!(count, num_items); + })); + } + + future::join_all(tasks).await.into_iter().for_each(Result::unwrap); + log::info!("Stress test took {:.2?}", time.elapsed()); +} + +struct Params { + pub num_tasks: usize, + pub num_concurrent_sessions: usize, + pub deadline: Duration, + pub payload_size: usize, + pub num_items: usize, +} + +async fn quick() { + run_stress_test(Params { + num_tasks: 10, + num_concurrent_sessions: 10, + deadline: Duration::from_secs(5), + payload_size: 1024, + num_items: 10, + }) + .await; +} + +async fn basic() { + run_stress_test(Params { + num_tasks: 10, + num_concurrent_sessions: 5, + deadline: Duration::from_secs(15), + payload_size: 1024 * 1024, + num_items: 4, + }) + .await; +} + +#[allow(dead_code)] +async fn many_small_messages() { + run_stress_test(Params { + num_tasks: 10, + num_concurrent_sessions: 10, + deadline: Duration::from_secs(5), + payload_size: 1024, + num_items: 10 * 1024, + }) + .await; +} + +async fn few_large_messages() { + run_stress_test(Params { + num_tasks: 10, + num_concurrent_sessions: 10, + deadline: Duration::from_secs(5), + payload_size: 1024 * 1024, + num_items: 10, + }) + .await; +} + +#[allow(dead_code)] +async fn payload_limit() { + run_stress_test(Params { + num_tasks: 50, + num_concurrent_sessions: 10, + deadline: Duration::from_secs(20), + payload_size: 4 * 1024 * 1024 - 100, + num_items: 2, + }) + .await; +} + +#[allow(dead_code)] +async fn high_contention() { + run_stress_test(Params { + num_tasks: 1000, + num_concurrent_sessions: 10, + deadline: Duration::from_secs(15), + payload_size: 1024 * 1024, + num_items: 4, + }) + .await; +} + +#[allow(dead_code)] +async fn high_concurrency() { + run_stress_test(Params { + num_tasks: 1000, + num_concurrent_sessions: 1000, + deadline: Duration::from_secs(15), + payload_size: 1024 * 1024, + num_items: 4, + }) + .await; +} + +#[allow(dead_code)] +async fn high_contention_high_concurrency() { + run_stress_test(Params { + num_tasks: 2000, + num_concurrent_sessions: 1000, + deadline: Duration::from_secs(15), + payload_size: 1024 * 1024, + num_items: 4, + }) + .await; +} + +#[tokio_macros::test] +async fn run() { + // let _ = env_logger::try_init(); + log_timing("quick", quick()).await; + log_timing("basic", basic()).await; + log_timing("many_small_messages", many_small_messages()).await; + log_timing("few_large_messages", few_large_messages()).await; + // log_timing("payload_limit", payload_limit()).await; + // log_timing("high_contention", high_contention()).await; + // log_timing("high_concurrency", high_concurrency()).await; + // log_timing("high_contention_high_concurrency", high_contention_high_concurrency()).await; +} + +async fn log_timing>(name: &str, fut: F) -> R { + let t = Instant::now(); + println!("'{}' is running...", name); + let ret = fut.await; + let elapsed = t.elapsed(); + println!("'{}' completed in {:.2}s", name, elapsed.as_secs_f32()); + ret +} diff --git a/comms/tests/substream_stress.rs b/comms/tests/substream_stress.rs new file mode 100644 index 0000000000..cbae6e8e52 --- /dev/null +++ b/comms/tests/substream_stress.rs @@ -0,0 +1,160 @@ +// Copyright 2021, The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +mod helpers; +use helpers::create_comms; + +use futures::{channel::mpsc, future, SinkExt, StreamExt}; +use std::time::Duration; +use tari_comms::{ + framing, + protocol::{ProtocolEvent, ProtocolId, ProtocolNotificationRx}, + transports::TcpTransport, + BytesMut, + CommsNode, + Substream, +}; +use tari_shutdown::{Shutdown, ShutdownSignal}; +use tari_test_utils::unpack_enum; +use tokio::{task, time::Instant}; + +const PROTOCOL_NAME: &[u8] = b"test/dummy/protocol"; + +pub async fn spawn_node(signal: ShutdownSignal) -> (CommsNode, ProtocolNotificationRx) { + let (notif_tx, notif_rx) = mpsc::channel(1); + let comms = create_comms(signal) + .add_protocol(&[ProtocolId::from_static(PROTOCOL_NAME)], notif_tx) + .spawn_with_transport(TcpTransport::new()) + .await + .unwrap(); + + comms + .node_identity() + .set_public_address(comms.listening_address().clone()); + (comms, notif_rx) +} + +async fn run_stress_test(num_substreams: usize, num_iterations: usize, payload_size: usize, frame_size: usize) { + let shutdown = Shutdown::new(); + let (node1, _) = spawn_node(shutdown.to_signal()).await; + let (node2, mut notif_rx) = spawn_node(shutdown.to_signal()).await; + + node1 + .peer_manager() + .add_peer(node2.node_identity().to_peer()) + .await + .unwrap(); + + let mut conn = node1 + .connectivity() + .dial_peer(node2.node_identity().node_id().clone()) + .await + .unwrap(); + + let sample = { + let mut buf = BytesMut::with_capacity(payload_size); + buf.fill(1); + buf.freeze() + }; + + task::spawn({ + let sample = sample.clone(); + async move { + while let Some(event) = notif_rx.next().await { + unpack_enum!(ProtocolEvent::NewInboundSubstream(_n, remote_substream) = event.event); + let mut remote_substream = framing::canonical(remote_substream, frame_size); + + task::spawn({ + let sample = sample.clone(); + async move { + let mut count = 0; + while let Some(r) = remote_substream.next().await { + r.unwrap(); + count += 1; + remote_substream.send(sample.clone()).await.unwrap(); + + if count == num_iterations { + break; + } + } + + assert_eq!(count, num_iterations); + } + }); + } + } + }); + + let mut substreams = Vec::with_capacity(num_substreams); + for _ in 0..num_substreams { + let substream = conn + .open_framed_substream(&ProtocolId::from_static(PROTOCOL_NAME), frame_size) + .await + .unwrap(); + substreams.push(substream); + } + + let tasks = substreams + .into_iter() + .enumerate() + .map(|(id, mut substream)| { + let sample = sample.clone(); + task::spawn(async move { + let mut count = 1; + // Send first to get the ball rolling + substream.send(sample.clone()).await.unwrap(); + let mut total_time = Duration::from_secs(0); + while let Some(r) = substream.next().await { + r.unwrap(); + count += 1; + let t = Instant::now(); + substream.send(sample.clone()).await.unwrap(); + total_time += t.elapsed(); + if count == num_iterations { + break; + } + } + + println!("[task {}] send time = {:.2?}", id, total_time,); + assert_eq!(count, num_iterations); + total_time + }) + }) + .collect::>(); + + let send_latencies = future::join_all(tasks) + .await + .into_iter() + .map(Result::unwrap) + .collect::>(); + let avg = send_latencies.iter().sum::().as_millis() / send_latencies.len() as u128; + println!("avg t = {}ms", avg); +} + +#[tokio_macros::test] +async fn many_at_frame_limit() { + const NUM_SUBSTREAMS: usize = 20; + const NUM_ITERATIONS_PER_STREAM: usize = 100; + const MAX_FRAME_SIZE: usize = 4 * 1024 * 1024; + const PAYLOAD_SIZE: usize = 4 * 1024 * 1024; + run_stress_test(NUM_SUBSTREAMS, NUM_ITERATIONS_PER_STREAM, PAYLOAD_SIZE, MAX_FRAME_SIZE).await; +} diff --git a/infrastructure/derive/Cargo.toml b/infrastructure/derive/Cargo.toml index 9997d53af0..f8d892f863 100644 --- a/infrastructure/derive/Cargo.toml +++ b/infrastructure/derive/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [lib] diff --git a/infrastructure/shutdown/Cargo.toml b/infrastructure/shutdown/Cargo.toml index e7d4cd1142..1102ec037a 100644 --- a/infrastructure/shutdown/Cargo.toml +++ b/infrastructure/shutdown/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/infrastructure/storage/Cargo.toml b/infrastructure/storage/Cargo.toml index 8835f466c7..5db74c5097 100644 --- a/infrastructure/storage/Cargo.toml +++ b/infrastructure/storage/Cargo.toml @@ -6,7 +6,7 @@ repository = "https://github.com/tari-project/tari" homepage = "https://tari.com" readme = "README.md" license = "BSD-3-Clause" -version = "0.9.1" +version = "0.9.5" edition = "2018" [dependencies] diff --git a/infrastructure/test_utils/Cargo.toml b/infrastructure/test_utils/Cargo.toml index 25b94e3e3e..bee18392c9 100644 --- a/infrastructure/test_utils/Cargo.toml +++ b/infrastructure/test_utils/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "tari_test_utils" description = "Utility functions used in Tari test functions" -version = "0.9.1" +version = "0.9.5" authors = ["The Tari Development Community"] edition = "2018" license = "BSD-3-Clause" @@ -9,6 +9,7 @@ license = "BSD-3-Clause" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +tari_shutdown = {version="*", path="../shutdown"} futures-test = { version = "^0.3.1" } futures = {version= "^0.3.1"} rand = "0.8" diff --git a/infrastructure/test_utils/src/enums.rs b/infrastructure/test_utils/src/enums.rs index d0f7829dfd..e3dae67e0b 100644 --- a/infrastructure/test_utils/src/enums.rs +++ b/infrastructure/test_utils/src/enums.rs @@ -58,13 +58,20 @@ macro_rules! unpack_enum { v => panic!("Unexpected enum variant '{:?}' given to unpack_enum", v), } }; - ($($enum_key:ident)::+ { $($idents:tt),* } = $enum:expr) => { + ($($enum_key:ident)::+ { $($idents:ident),+ ,.. } = $enum:expr) => { let ($(mut $idents),+) = match $enum { - $($enum_key)::+{$($idents),+} => ($($idents),+), + $($enum_key)::+{$($idents),+, .. } => ($($idents),+), v => panic!("Unexpected enum variant '{:?}' given to unpack_enum", v), }; }; - ($($enum_key:ident)::+ ( $($idents:tt),* ) = $enum:expr) => { + ($($enum_key:ident)::+ { $($idents:ident),+ } = $enum:expr) => { + let ($(mut $idents),+) = match $enum { + $($enum_key)::+{$($idents),+ } => ($($idents),+), + v => panic!("Unexpected enum variant '{:?}' given to unpack_enum", v), + }; + }; + + ($($enum_key:ident)::+ ( $($idents:ident),* ) = $enum:expr) => { let ($(mut $idents),+) = match $enum { $($enum_key)::+($($idents),+) => ($($idents),+), v => panic!("Unexpected enum variant '{:?}' given to unpack_enum", v), diff --git a/infrastructure/test_utils/src/futures/async_assert_eventually.rs b/infrastructure/test_utils/src/futures/async_assert_eventually.rs index a678da8ad0..cddfb29b37 100644 --- a/infrastructure/test_utils/src/futures/async_assert_eventually.rs +++ b/infrastructure/test_utils/src/futures/async_assert_eventually.rs @@ -85,4 +85,11 @@ macro_rules! async_assert { tokio::time::delay_for($interval).await; } }}; + ($check_expr:expr$(,)?) => {{ + async_assert!( + $check_expr, + max_attempts = 10, + interval = std::time::Duration::from_millis(100) + ) + }}; } diff --git a/infrastructure/test_utils/src/runtime.rs b/infrastructure/test_utils/src/runtime.rs index 8c9f9c987c..7b8c5faa57 100644 --- a/infrastructure/test_utils/src/runtime.rs +++ b/infrastructure/test_utils/src/runtime.rs @@ -20,8 +20,9 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use futures::FutureExt; +use futures::{future, FutureExt}; use std::{future::Future, pin::Pin}; +use tari_shutdown::Shutdown; use tokio::{runtime, runtime::Runtime, task, task::JoinError}; pub fn create_runtime() -> Runtime { @@ -35,6 +36,17 @@ pub fn create_runtime() -> Runtime { .expect("Could not create runtime") } +pub fn spawn_until_shutdown(fut: F) -> Shutdown +where F: Future + Send + 'static { + let shutdown = Shutdown::new(); + let signal = shutdown.to_signal(); + task::spawn(async move { + futures::pin_mut!(fut); + future::select(signal, fut).await; + }); + shutdown +} + /// Create a runtime and report if it panics. If there are tasks still running after the panic, this /// will carry on running forever. // #[deprecated(note = "use tokio_macros::test instead")] diff --git a/integration_tests/.eslintrc.json b/integration_tests/.eslintrc.json index b5a271328d..647576d58c 100644 --- a/integration_tests/.eslintrc.json +++ b/integration_tests/.eslintrc.json @@ -1,12 +1,22 @@ { "extends": ["eslint:recommended", "plugin:prettier/recommended"], - "plugins": ["prettier"], + "plugins": ["prettier", "@babel"], "env": { "browser": true, "commonjs": true, "node": true, "es2021": true }, + "parser": "@babel/eslint-parser", // Tell ESLint that you want to use the @babel/eslint-parser. + "parserOptions": { + "ecmaVersion": 2021, + "sourceType": "module", + "allowImportExportEverywhere": false, + "ecmaFeatures": { + "globalReturn": false + }, + "requireConfigFile": false + }, "rules": { "no-unused-vars": [ "error", diff --git a/integration_tests/features/BaseNodeAutoUpdate.feature b/integration_tests/features/BaseNodeAutoUpdate.feature index d27b50b782..4f7041ed60 100644 --- a/integration_tests/features/BaseNodeAutoUpdate.feature +++ b/integration_tests/features/BaseNodeAutoUpdate.feature @@ -1,6 +1,9 @@ @auto_update Feature: AutoUpdate + + # Not sure why this takes so long on CI + @long-running Scenario: Auto update finds a new update Given I have a node NODE_A with auto update enabled Then NODE_A has a new software update diff --git a/integration_tests/features/BaseNodeConnectivity.feature b/integration_tests/features/BaseNodeConnectivity.feature index a8cebb84f4..54f806cff8 100644 --- a/integration_tests/features/BaseNodeConnectivity.feature +++ b/integration_tests/features/BaseNodeConnectivity.feature @@ -14,3 +14,15 @@ Feature: Base Node Connectivity Then I wait for WALLET_A to have 1 node connections Then I wait for WALLET_A to have ONLINE connectivity Then SEED_A is connected to WALLET_A + + Scenario: Base node lists heights + Given I have 1 seed nodes + And I have a base node N1 connected to all seed nodes + When I mine 5 blocks on N1 + Then node N1 lists heights 1 to 5 + + Scenario: Base node lists headers + Given I have 1 seed nodes + And I have a base node BN1 connected to all seed nodes + When I mine 5 blocks on BN1 + Then node BN1 lists headers 1 to 5 with correct heights diff --git a/integration_tests/features/Mempool.feature b/integration_tests/features/Mempool.feature index 412cc084ec..36113cd9b9 100644 --- a/integration_tests/features/Mempool.feature +++ b/integration_tests/features/Mempool.feature @@ -169,3 +169,33 @@ Feature: Mempool Then SENDER has TX11 in MINED state Then SENDER has TX12 in MINED state Then SENDER has TX13 in MINED state + + @critical + Scenario: Mempool unconfirmed transactions + Given I have 1 seed nodes + And I have a base node BN1 connected to all seed nodes + When I mine a block on BN1 with coinbase CB1 + When I mine 5 blocks on BN1 + When I create a custom fee transaction TX1 spending CB1 to UTX1 with fee 80 + When I create a custom fee transaction TX2 spending CB1 to UTX1 with fee 80 + When I create a custom fee transaction TX3 spending CB1 to UTX1 with fee 80 + When I create a custom fee transaction TX4 spending CB1 to UTX1 with fee 80 + When I create a custom fee transaction TX5 spending CB1 to UTX1 with fee 80 + When I submit transaction TX1 to BN1 + When I submit transaction TX2 to BN1 + When I submit transaction TX3 to BN1 + When I submit transaction TX4 to BN1 + When I submit transaction TX5 to BN1 + Then I wait until base node BN1 has 5 unconfirmed transactions in its mempool + + @critical + Scenario: Mempool unconfirmed transaction to mined transaction + Given I have 1 seed nodes + And I have a base node BN1 connected to all seed nodes + When I mine a block on BN1 with coinbase CB1 + When I mine 2 blocks on BN1 + When I create a custom fee transaction TX1 spending CB1 to UTX1 with fee 80 + When I submit transaction TX1 to BN1 + Then I wait until base node BN1 has 1 unconfirmed transactions in its mempool + When I mine 1 blocks on BN1 + Then I wait until base node BN1 has 0 unconfirmed transactions in its mempool \ No newline at end of file diff --git a/integration_tests/features/Propagation.feature b/integration_tests/features/Propagation.feature index 339b9afc72..b1cc97eaeb 100644 --- a/integration_tests/features/Propagation.feature +++ b/integration_tests/features/Propagation.feature @@ -81,7 +81,7 @@ Feature: Block Propagation When I wait 10 seconds And mining node MINER mines 5 blocks When I wait 100 seconds - When I start LAG1 + When I start base node LAG1 # Wait for node to so start and get into listening mode When I wait 100 seconds Then node MINER is at height 6 diff --git a/integration_tests/features/Recovery.feature b/integration_tests/features/Recovery.feature new file mode 100644 index 0000000000..c123dfa48b --- /dev/null +++ b/integration_tests/features/Recovery.feature @@ -0,0 +1,17 @@ +@recovery +Feature: Recovery + + Scenario Outline: Blockchain database recovery + Given I have 2 seed nodes + And I have a base node B connected to all seed nodes + When I mine blocks on B + Then all nodes are at height + When I stop node B + And I run blockchain recovery on node B + And I start base node B + Then all nodes are at height + Examples: + | NumBlocks | + | 10 | + | 25 | + | 50 | \ No newline at end of file diff --git a/integration_tests/features/Reorgs.feature b/integration_tests/features/Reorgs.feature index a7a3f226f8..008df39e8e 100644 --- a/integration_tests/features/Reorgs.feature +++ b/integration_tests/features/Reorgs.feature @@ -57,10 +57,10 @@ Feature: Reorgs When mining node MINING1 mines 3 blocks with min difficulty 1 and max difficulty 20 And node NODE1 is at height 17 And I stop node NODE1 - And I start PNODE2 + And I start base node PNODE2 When mining node MINING2 mines 6 blocks with min difficulty 20 and max difficulty 1000000 And node PNODE2 is at height 20 - When I start NODE1 + When I start base node NODE1 Then all nodes are at height 20 @critical @reorg @@ -77,7 +77,7 @@ Feature: Reorgs And mining node MINING2 mines 19 blocks with min difficulty 20 and max difficulty 1000000 And node NODE2 is at height 20 And I stop node NODE2 - When I start NODE1 + When I start base node NODE1 And mining node MINING1 mines 3 blocks with min difficulty 1 and max difficulty 20 And node NODE1 is at height 4 When I create a transaction TX1 spending CB1 to UTX1 @@ -87,13 +87,56 @@ Feature: Reorgs And node NODE1 is at height 10 Given I have a pruned node PNODE1 connected to node NODE1 with pruning horizon set to 5 Then node PNODE1 is at height 10 - When I start NODE2 + When I start base node NODE2 Then all nodes are at height 20 # Because TX1 should have been re_orged out we should be able to spend CB1 again When I create a transaction TX2 spending CB1 to UTX2 When I submit transaction TX2 to PNODE1 Then PNODE1 has TX2 in MEMPOOL state + @critical @reorg + Scenario: Zero-conf reorg with spending + Given I have a base node NODE1 connected to all seed nodes + Given I have a base node NODE2 connected to node NODE1 + When I mine 14 blocks on NODE1 + When I mine a block on NODE1 with coinbase CB1 + When I mine 4 blocks on NODE1 + When I create a custom fee transaction TX1 spending CB1 to UTX1 with fee 100 + When I create a custom fee transaction TX11 spending UTX1 to UTX11 with fee 100 + When I submit transaction TX1 to NODE1 + When I submit transaction TX11 to NODE1 + When I mine 1 blocks on NODE1 + Then NODE1 has TX1 in MINED state + And NODE1 has TX11 in MINED state + And all nodes are at height 20 + And I stop node NODE1 + And node NODE2 is at height 20 + When I mine a block on NODE2 with coinbase CB2 + When I mine 3 blocks on NODE2 + When I create a custom fee transaction TX2 spending CB2 to UTX2 with fee 100 + When I create a custom fee transaction TX21 spending UTX2 to UTX21 with fee 100 + When I submit transaction TX2 to NODE2 + When I submit transaction TX21 to NODE2 + When I mine 1 blocks on NODE2 + Then node NODE2 is at height 25 + And NODE2 has TX2 in MINED state + And NODE2 has TX21 in MINED state + And I stop node NODE2 + When I start base node NODE1 + And node NODE1 is at height 20 + When I mine a block on NODE1 with coinbase CB3 + When I mine 3 blocks on NODE1 + When I create a custom fee transaction TX3 spending CB3 to UTX3 with fee 100 + When I create a custom fee transaction TX31 spending UTX3 to UTX31 with fee 100 + When I submit transaction TX3 to NODE1 + When I submit transaction TX31 to NODE1 + When I mine 1 blocks on NODE1 + Then NODE1 has TX3 in MINED state + And NODE1 has TX31 in MINED state + And node NODE1 is at height 25 + When I start base node NODE2 + Then all nodes are on the same chain tip + Scenario Outline: Massive multiple reorg # # Chain 1a: @@ -104,8 +147,7 @@ Feature: Reorgs And I have a base node NODE_A1 connected to seed SEED_A1 And I have a base node NODE_A2 connected to seed SEED_A1 When I mine blocks on SEED_A1 - Then node NODE_A1 is at height - Then node NODE_A2 is at height + Then all nodes are on the same chain at height # # Chain 1b: # Mine Y1 blocks (orphan_storage_capacity default set to 10) @@ -123,13 +165,8 @@ Feature: Reorgs And I connect node NODE_A1 to node NODE_A3 and wait 1 seconds And I connect node NODE_A2 to node NODE_A4 and wait 1 seconds And I connect node SEED_A1 to node SEED_A2 and wait seconds - Then node SEED_A1 is at the same height as node SEED_A2 When I mine 10 blocks on SEED_A1 - Then node SEED_A2 is at the same height as node SEED_A1 - Then node NODE_A1 is at the same height as node SEED_A1 - Then node NODE_A2 is at the same height as node SEED_A1 - Then node NODE_A3 is at the same height as node SEED_A1 - Then node NODE_A4 is at the same height as node SEED_A1 + Then all nodes are on the same chain tip # # Chain 2a: # Mine X2 blocks (orphan_storage_capacity default set to 10) @@ -158,7 +195,6 @@ Feature: Reorgs And I connect node NODE_B1 to node NODE_B3 and wait 1 seconds And I connect node NODE_B2 to node NODE_B4 and wait 1 seconds And I connect node SEED_B1 to node SEED_B2 and wait seconds - Then node SEED_B1 is at the same height as node SEED_B2 When I mine 10 blocks on SEED_B1 Then node SEED_B2 is at the same height as node SEED_B1 Then node NODE_B1 is at the same height as node SEED_B1 @@ -171,9 +207,8 @@ Feature: Reorgs And I connect node NODE_A1 to node NODE_B1 and wait 1 seconds And I connect node NODE_A3 to node NODE_B3 and wait 1 seconds And I connect node SEED_A1 to node SEED_B1 and wait seconds - Then node SEED_A1 is at the same height as node SEED_B1 When I mine 10 blocks on SEED_A1 - Then all nodes are at the same height as node SEED_A1 + Then all nodes are on the same chain tip @critical Examples: | X1 | Y1 | X2 | Y2 | SYNC_TIME | diff --git a/integration_tests/features/StressTest.feature b/integration_tests/features/StressTest.feature index a72d9f7a52..0425f45644 100644 --- a/integration_tests/features/StressTest.feature +++ b/integration_tests/features/StressTest.feature @@ -76,4 +76,4 @@ Feature: Stress Test # to the mempool Then while merge mining via PROXY all transactions in wallet WALLET_A are found to be Mined_Confirmed # Then wallet WALLET_B detects all transactions as Mined_Confirmed - Then while mining via NODE1 all transactions in wallet WALLET_B are found to be Mined_Confirmed \ No newline at end of file + Then while mining via NODE1 all transactions in wallet WALLET_B are found to be Mined_Confirmed diff --git a/integration_tests/features/Sync.feature b/integration_tests/features/Sync.feature index 71071dc23e..456942ac83 100644 --- a/integration_tests/features/Sync.feature +++ b/integration_tests/features/Sync.feature @@ -25,7 +25,7 @@ Feature: Block Sync # All nodes should sync to tip Then all nodes are at height 20 - @critical + @critical Scenario: Pruned mode simple sync Given I have 1 seed nodes Given I have a SHA3 miner NODE1 connected to all seed nodes @@ -36,7 +36,7 @@ Feature: Block Sync Given I have a pruned node PNODE1 connected to node NODE1 with pruning horizon set to 5 Then all nodes are at height 20 -@critical + @critical Scenario: When a new node joins the network, it should receive all peers Given I have 10 seed nodes And I have a base node NODE1 connected to all seed nodes @@ -59,10 +59,10 @@ Feature: Block Sync And mining node MINER1 mines 5 blocks with min difficulty 1 and max difficulty 1 Then node NODE1 is at height 10 Given I stop node NODE1 - And I start NODE2 + And I start base node NODE2 And mining node MINER2 mines 7 blocks with min difficulty 11 and max difficulty 100000 Then node NODE2 is at height 12 - When I start NODE1 + When I start base node NODE1 Then all nodes are on the same chain at height 12 @critical @reorg @long-running @@ -79,10 +79,10 @@ Feature: Block Sync And mining node MINER1 mines 1001 blocks with min difficulty 1 and max difficulty 10 Then node NODE1 is at height 1006 Given I stop node NODE1 - And I start NODE2 + And I start base node NODE2 And mining node MINER2 mines 1500 blocks with min difficulty 11 and max difficulty 100000 Then node NODE2 is at height 1505 - When I start NODE1 + When I start base node NODE1 Then all nodes are on the same chain at height 1505 @critical @@ -103,6 +103,25 @@ Feature: Block Sync When I mine 15 blocks on PNODE2 Then all nodes are at height 23 + Scenario: Node should not sync from pruned node + Given I have a base node NODE1 connected to all seed nodes + Given I have a pruned node PNODE1 connected to node NODE1 with pruning horizon set to 5 + When I mine 40 blocks on NODE1 + Then all nodes are at height 40 + When I stop node NODE1 + Given I have a base node NODE2 connected to node PNODE1 + Given I have a pruned node PNODE2 connected to node PNODE1 with pruning horizon set to 5 + When I mine 5 blocks on NODE2 + Then node NODE2 is at height 5 + Then node PNODE2 is at height 40 + When I start base node NODE1 + # We need for node to boot up and supply node 2 with blocks + And I connect node NODE2 to node NODE1 and wait 5 seconds + # NODE2 may initially try to sync from PNODE1 and PNODE2, then eventually try to sync from NODE1; mining blocks + # on NODE1 will make this test less flaky and force NODE2 to sync from NODE1 much quicker + When I mine 10 blocks on NODE1 + Then all nodes are at height 50 + Scenario Outline: Syncing node while also mining before tip sync Given I have a seed node SEED And I have wallet WALLET1 connected to seed node SEED @@ -113,7 +132,7 @@ Feature: Block Sync And I stop node SYNCER When mining node MINER mines blocks with min difficulty 1 and max difficulty 9999999999 Then node SEED is at height - When I start SYNCER + When I start base node SYNCER # Try to mine much faster than block sync, but still producing a lower accumulated difficulty And mining node MINER2 mines blocks with min difficulty 1 and max difficulty 10 # Allow reorg to filter through @@ -136,7 +155,7 @@ Feature: Block Sync | X1 | Y1 | SYNC_TIME | | 1000 | 50 | 60 | -Scenario: Pruned mode network only + Scenario: Pruned mode network only Given I have a base node NODE1 connected to all seed nodes Given I have a pruned node PNODE1 connected to node NODE1 with pruning horizon set to 5 Given I have a pruned node PNODE2 connected to node PNODE1 with pruning horizon set to 5 @@ -151,3 +170,24 @@ Scenario: Pruned mode network only Then node PNODE2 is at height 20 Given I have a pruned node PNODE3 connected to node PNODE1 with pruning horizon set to 5 Then node PNODE3 is at height 20 + + Scenario Outline: Force sync many nodes agains one peer + Given I have a base node BASE + And I have a SHA3 miner MINER connected to node BASE + And mining node MINER mines blocks + And I have base nodes with pruning horizon force syncing on node BASE + When I wait seconds + Then all nodes are at height + + @critical @long-running + Examples: + | NODES | BLOCKS | PRUNE_HORIZON | SYNC_TIME | + | 5 | 100 | 0 | 30 | + | 10 | 100 | 0 | 30 | + | 20 | 100 | 0 | 30 | + | 5 | 1001 | 0 | 60 | + | 10 | 1001 | 0 | 60 | + | 20 | 1001 | 0 | 60 | + | 5 | 1001 | 100 | 90 | + | 10 | 1001 | 100 | 90 | + | 20 | 1001 | 100 | 90 | diff --git a/integration_tests/features/TransactionInfo.feature b/integration_tests/features/TransactionInfo.feature index 5c30c5f3c7..24a632860c 100644 --- a/integration_tests/features/TransactionInfo.feature +++ b/integration_tests/features/TransactionInfo.feature @@ -12,7 +12,7 @@ Scenario: Get Transaction Info # We need to ensure the coinbase lock heights are gone; mine enough blocks When I merge mine 4 blocks via PROXY Then all nodes are at height 4 - Then I list all coinbase transactions for wallet WALLET_A + Then I list all COINBASE transactions for wallet WALLET_A When I wait for wallet WALLET_A to have at least 1002000 uT And I send 1000000 uT from wallet WALLET_A to wallet WALLET_B at fee 100 Then wallet WALLET_A detects all transactions are at least Pending diff --git a/integration_tests/features/WalletBaseNodeSwitch.feature b/integration_tests/features/WalletBaseNodeSwitch.feature deleted file mode 100644 index 403b35f674..0000000000 --- a/integration_tests/features/WalletBaseNodeSwitch.feature +++ /dev/null @@ -1,10 +0,0 @@ -Feature: Wallet Base Node Switch - - @doit - Scenario: As a user I want to change base node for a wallet - Given I have a base node Node1 connected to all seed nodes - And I have a base node Node2 connected to all seed nodes - And I have wallet Wallet connected to base node Node1 - When I stop wallet Wallet - And change base node of Wallet to Node2 - Then I wait for Wallet to connect to Node2 diff --git a/integration_tests/features/WalletCli.feature b/integration_tests/features/WalletCli.feature new file mode 100644 index 0000000000..92bfc364ee --- /dev/null +++ b/integration_tests/features/WalletCli.feature @@ -0,0 +1,131 @@ +Feature: Wallet CLI + + Scenario: As a user I want to change base node for a wallet via command line + Given I have a base node NODE1 connected to all seed nodes + And I have a base node NODE2 connected to all seed nodes + And I have wallet WALLET connected to base node NODE1 + Then I change base node of WALLET to NODE2 via command line + + Scenario: As a user I want to set and clear custom base node for a wallet via command line + Given I have a base node NODE1 + And I have a base node NODE2 + And I have wallet WALLET connected to base node NODE1 + Then I set custom base node of WALLET to NODE2 via command line + And I clear custom base node of wallet WALLET via command line + + Scenario: As a user I want to change password via command line + Given I have wallet WALLET connected to all seed nodes + When I stop wallet WALLET + And I change the password of wallet WALLET to changedpwd via command line + Then the password of wallet WALLET is not kensentme + Then the password of wallet WALLET is changedpwd + + Scenario: As a user I want to get balance via command line + Given I have a base node BASE + And I have wallet WALLET connected to base node BASE + And I have mining node MINE connected to base node BASE and wallet WALLET + And mining node MINE mines 5 blocks + Then I wait for wallet WALLET to have at least 1000000 uT + And I stop wallet WALLET + Then I get balance of wallet WALLET is at least 1000000 uT via command line + + Scenario: As a user I want to send tari via command line + Given I have a seed node SEED + And I have a base node BASE connected to seed SEED + And I have wallet SENDER connected to base node BASE + And I have wallet RECEIVER connected to base node BASE + And I have mining node MINE connected to base node BASE and wallet SENDER + And mining node MINE mines 5 blocks + Then I wait for wallet SENDER to have at least 1100000 uT + # TODO: Remove this wait when the wallet CLI commands involving transactions will only commence with a valid + # TODO: base node connection. + And I wait 30 seconds + And I stop wallet SENDER + And I send 1000000 uT from SENDER to RECEIVER via command line + Then wallet SENDER has at least 1 transactions that are all TRANSACTION_STATUS_BROADCAST and valid + Then wallet RECEIVER has at least 1 transactions that are all TRANSACTION_STATUS_BROADCAST and valid + And mining node MINE mines 5 blocks + Then I wait for wallet RECEIVER to have at least 1000000 uT + + Scenario: As a user I want to send one-sided via command line + Given I have a seed node SEED + And I have a base node BASE connected to seed SEED + And I have wallet SENDER connected to base node BASE + And I have wallet RECEIVER connected to base node BASE + And I have mining node MINE connected to base node BASE and wallet SENDER + And mining node MINE mines 5 blocks + Then I wait for wallet SENDER to have at least 1100000 uT + # TODO: Remove this wait when the wallet CLI commands involving transactions will only commence with a valid + # TODO: base node connection. + And I wait 30 seconds + And I stop wallet SENDER + And I send one-sided 1000000 uT from SENDER to RECEIVER via command line + Then wallet SENDER has at least 1 transactions that are all TRANSACTION_STATUS_BROADCAST and valid + And mining node MINE mines 5 blocks + Then I wait for wallet RECEIVER to have at least 1000000 uT + + Scenario: As a user I want to make-it-rain via command line + Given I have a seed node SEED + And I have a base node BASE connected to seed SEED + And I have wallet SENDER connected to base node BASE + And I have wallet RECEIVER connected to base node BASE + And I have mining node MINE connected to base node BASE and wallet SENDER + And mining node MINE mines 15 blocks + Then wallets SENDER should have 12 spendable coinbase outputs + # TODO: Remove this wait when the wallet CLI commands involving transactions will only commence with a valid + # TODO: base node connection. + And I wait 30 seconds + And I stop wallet SENDER + And I make it rain from wallet SENDER 1 tx / sec 10 sec 8000 uT 100 increment to RECEIVER via command line + Then wallet SENDER has at least 10 transactions that are all TRANSACTION_STATUS_BROADCAST and valid + Then wallet RECEIVER has at least 10 transactions that are all TRANSACTION_STATUS_BROADCAST and valid + And mining node MINE mines 5 blocks + Then I wait for wallet RECEIVER to have at least 84500 uT + + Scenario: As a user I want to coin-split via command line + Given I have a seed node SEED + And I have a base node BASE connected to seed SEED + And I have wallet WALLET connected to base node BASE + And I have mining node MINE connected to base node BASE and wallet WALLET + And mining node MINE mines 4 blocks + Then I wait for wallet WALLET to have at least 1100000 uT + # TODO: Remove this wait when the wallet CLI commands involving transactions will only commence with a valid + # TODO: base node connection. + And I wait 30 seconds + And I stop wallet WALLET + And I do coin split on wallet WALLET to 10000 uT 10 coins via command line + Then wallet WALLET has at least 1 transactions that are all TRANSACTION_STATUS_BROADCAST and valid + And mining node MINE mines 5 blocks + Then wallet WALLET has at least 1 transactions that are all TRANSACTION_STATUS_MINED_CONFIRMED and valid + And I stop wallet WALLET + Then I get count of utxos of wallet WALLET and it's at least 10 via command line + + Scenario: As a user I want to count utxos via command line + Given I have a base node BASE + And I have wallet WALLET connected to base node BASE + And I have mining node MINE connected to base node BASE and wallet WALLET + And mining node MINE mines 4 blocks + Then I wait for wallet WALLET to have at least 1000000 uT + And I stop wallet WALLET + Then I get count of utxos of wallet WALLET and it's at least 1 via command line + + Scenario: As a user I want to export utxos via command line + Given I have a base node BASE + And I have wallet WALLET connected to base node BASE + And I have mining node MINE connected to base node BASE and wallet WALLET + And mining node MINE mines 4 blocks + Then I wait for wallet WALLET to have at least 1000000 uT + And I export the utxos of wallet WALLET via command line + + Scenario: As a user I want to discover-peer via command line + Given I have a seed node SEED + And I have a base node BASE1 connected to seed SEED + And I have a base node BASE2 connected to seed SEED + And I have wallet WALLET connected to base node BASE1 + And I discover peer BASE2 on wallet WALLET via command line + Then WALLET is connected to BASE2 + + Scenario: As a user I want to run whois via command line + Given I have a base node BASE + And I have wallet WALLET connected to base node BASE + Then I run whois BASE on wallet WALLET via command line diff --git a/integration_tests/features/WalletFFI.feature b/integration_tests/features/WalletFFI.feature new file mode 100644 index 0000000000..11b905051e --- /dev/null +++ b/integration_tests/features/WalletFFI.feature @@ -0,0 +1,129 @@ +@wallet-ffi +Feature: Wallet FFI + + Scenario: As a client I want to send Tari to a Public Key + # It's a subtest of "As a client I want to retrieve a list of transactions I have made and received" + + Scenario: As a client I want to specify a custom fee when I send tari + # It's a subtest of "As a client I want to retrieve a list of transactions I have made and received" + + Scenario: As a client I want to receive Tari via my Public Key while I am online + # It's a subtest of "As a client I want to retrieve a list of transactions I have made and received" + + @long-running @broken + Scenario: As a client I want to receive Tari via my Public Key sent while I am offline when I come back online + Given I have a base node BASE + And I have wallet SENDER connected to base node BASE + And I have mining node MINER connected to base node BASE and wallet SENDER + And mining node MINER mines 4 blocks + Then I wait for wallet SENDER to have at least 1000000 uT + And I have a ffi wallet FFI_WALLET connected to base node BASE + And I stop wallet FFI_WALLET + And I wait 5 seconds + And I send 2000000 uT from wallet SENDER to wallet FFI_WALLET at fee 100 + And I wait 5 seconds + And I start wallet FFI_WALLET + And wallet SENDER detects all transactions are at least Broadcast + And mining node MINER mines 10 blocks + Then I wait for ffi wallet FFI_WALLET to have at least 1000000 uT + + @long-running + Scenario: As a client I want to retrieve a list of transactions I have made and received + Given I have a base node BASE + And I have wallet SENDER connected to base node BASE + And I have mining node MINER connected to base node BASE and wallet SENDER + And mining node MINER mines 4 blocks + Then I wait for wallet SENDER to have at least 1000000 uT + And I have a ffi wallet FFI_WALLET connected to base node BASE + And I send 2000000 uT from wallet SENDER to wallet FFI_WALLET at fee 100 + And wallet SENDER detects all transactions are at least Broadcast + And mining node MINER mines 10 blocks + Then I wait for ffi wallet FFI_WALLET to have at least 1000000 uT + And Check callbacks for finished inbound tx on ffi wallet FFI_WALLET + And I have wallet RECEIVER connected to base node BASE + And I send 1000000 uT from ffi wallet FFI_WALLET to wallet RECEIVER at fee 100 + And ffi wallet FFI_WALLET has 1 broadcast transaction + And mining node MINER mines 4 blocks + Then I wait for wallet RECEIVER to have at least 1000000 uT + And Check callbacks for finished outbound tx on ffi wallet FFI_WALLET + And I have 1 received and 1 send transaction in ffi wallet FFI_WALLET + And I start STXO validation on wallet FFI_WALLET + And I start UTXO validation on wallet FFI_WALLET + + # It's just calling the encrypt function, we don't test if it's actually encrypted + Scenario: As a client I want to be able to protect my wallet with a passphrase + Given I have a base node BASE + And I have a ffi wallet FFI_WALLET connected to base node BASE + And I set passphrase PASSPHRASE of ffi wallet FFI_WALLET + + Scenario: As a client I want to manage contacts + Given I have a base node BASE + And I have a ffi wallet FFI_WALLET connected to base node BASE + And I have wallet WALLET connected to base node BASE + And I add contact with alias ALIAS and pubkey WALLET to ffi wallet FFI_WALLET + Then I have contact with alias ALIAS and pubkey WALLET in ffi wallet FFI_WALLET + When I remove contact with alias ALIAS from ffi wallet FFI_WALLET + Then I don't have contact with alias ALIAS in ffi wallet FFI_WALLET + + Scenario: As a client I want to set the base node (should be persisted) + Given I have a base node BASE1 + Given I have a base node BASE2 + And I have a ffi wallet FFI_WALLET connected to base node BASE1 + And I set base node BASE2 for ffi wallet FFI_WALLET + Then BASE2 is connected to FFI_WALLET + And I stop wallet FFI_WALLET + And I wait 5 seconds + And I start wallet FFI_WALLET + Then BASE2 is connected to FFI_WALLET + + Scenario: As a client I want to see my public_key, emoji ID, address (whoami) + Given I have a base node BASE + And I have a ffi wallet FFI_WALLET connected to base node BASE + Then I want to get public key of ffi wallet FFI_WALLET + And I want to get emoji id of ffi wallet FFI_WALLET + + Scenario: As a client I want to get my balance + # It's a subtest of "As a client I want to retrieve a list of transactions I have made and received" + + @long-running + Scenario: As a client I want to cancel a transaction + Given I have a base node BASE + And I have wallet SENDER connected to base node BASE + And I have mining node MINER connected to base node BASE and wallet SENDER + And mining node MINER mines 4 blocks + Then I wait for wallet SENDER to have at least 1000000 uT + And I have a ffi wallet FFI_WALLET connected to base node BASE + And I send 2000000 uT from wallet SENDER to wallet FFI_WALLET at fee 100 + And wallet SENDER detects all transactions are at least Broadcast + And mining node MINER mines 10 blocks + Then I wait for ffi wallet FFI_WALLET to have at least 1000000 uT + And I have wallet RECEIVER connected to base node BASE + And I stop wallet RECEIVER + And I send 1000000 uT from ffi wallet FFI_WALLET to wallet RECEIVER at fee 100 + Then I wait for ffi wallet FFI_WALLET to have 1 pending outbound transaction + Then I cancel all transactions on ffi wallet FFI_WALLET and it will cancel 1 transaction + + @long-running + Scenario: As a client I want to be able to restore my wallet from seed words + Given I have a base node BASE + And I have wallet WALLET connected to base node BASE + And I have mining node MINER connected to base node BASE and wallet WALLET + And mining node MINER mines 4 blocks + Then I wait for wallet WALLET to have at least 1000000 uT + Then I recover wallet WALLET into ffi wallet FFI_WALLET from seed words on node BASE + And I wait for recovery of wallet FFI_WALLET to finish + And I wait for ffi wallet FFI_WALLET to have at least 1000000 uT + + Scenario: As a client I want to be able to initiate TXO and TX validation with the specifed base node. + # It's a subtest of "As a client I want to retrieve a list of transactions I have made and received" + + Scenario: As a client I want async feedback about the progress of sending and receiving a transaction + # It's a subtest of "As a client I want to retrieve a list of transactions I have made and received" + + Scenario: As a client I want async feedback about my connection status to the specifed Base Node + + Scenario: As a client I want async feedback about the wallet restoration process + # As a client I want to be able to restore my wallet from seed words + + Scenario: As a client I want async feedback about TXO and TX validation processes +# It's a subtest of "As a client I want to retrieve a list of transactions I have made and received" diff --git a/integration_tests/features/WalletMonitoring.feature b/integration_tests/features/WalletMonitoring.feature index 3c6ced3ba8..09875c8306 100644 --- a/integration_tests/features/WalletMonitoring.feature +++ b/integration_tests/features/WalletMonitoring.feature @@ -1,26 +1,80 @@ @coinbase_reorg Feature: Wallet Monitoring - @long-running Scenario: Wallets monitoring coinbase after a reorg # # Chain 1: - # Collects 10 coinbases into one wallet, send 7 transactions + # Collects 10 coinbases into one wallet # Given I have a seed node SEED_A # Add multiple base nodes to ensure more robust comms And I have a base node NODE_A1 connected to seed SEED_A And I have wallet WALLET_A1 connected to seed node SEED_A - And I have wallet WALLET_A2 connected to seed node SEED_A - And I have a merge mining proxy PROXY_A connected to SEED_A and WALLET_A1 with default config - When I merge mine 10 blocks via PROXY_A + And I have mining node MINING_A connected to base node SEED_A and wallet WALLET_A1 + And mining node MINING_A mines 10 blocks Then all nodes are at height 10 - And I list all coinbase transactions for wallet WALLET_A1 + And I list all COINBASE transactions for wallet WALLET_A1 Then wallet WALLET_A1 has 10 coinbase transactions + Then all COINBASE transactions for wallet WALLET_A1 are valid + Then wallet WALLET_A1 detects at least 7 coinbase transactions as Mined_Confirmed + When I wait 1 seconds + # + # Chain 2: + # Collects 10 coinbases into one wallet + # + And I have a seed node SEED_B + # Add multiple base nodes to ensure more robust comms + And I have a base node NODE_B1 connected to seed SEED_B + And I have wallet WALLET_B1 connected to seed node SEED_B + And I have mining node MINING_B connected to base node SEED_B and wallet WALLET_B1 + And mining node MINING_B mines 10 blocks + Then all nodes are at height 10 + And I list all COINBASE transactions for wallet WALLET_B1 + Then wallet WALLET_B1 has 10 coinbase transactions + Then all COINBASE transactions for wallet WALLET_B1 are valid + Then wallet WALLET_B1 detects at least 7 coinbase transactions as Mined_Confirmed + When I wait 1 seconds + # + # Connect Chain 1 and 2 + # + And I have a SHA3 miner NODE_C connected to all seed nodes + # Wait for the reorg to filter through + When I wait 30 seconds + Then all nodes are at height 10 + # When tip advances past required confirmations, invalid coinbases still being monitored will be cancelled. + And mining node NODE_C mines 6 blocks + Then all nodes are at height 16 + # Wait for coinbase statuses to change in the wallet + When I wait 30 seconds + And I list all COINBASE transactions for wallet WALLET_A1 + And I list all COINBASE transactions for wallet WALLET_B1 + Then the number of coinbase transactions for wallet WALLET_A1 and wallet WALLET_B1 are 3 less + # TODO: Uncomment this step when wallets can handle reorg +# Then all COINBASE transactions for wallet WALLET_A1 and wallet WALLET_B1 have consistent but opposing validity + + Scenario: Wallets monitoring normal transactions after a reorg + # + # Chain 1: + # Collects 10 coinbases into one wallet, send 7 transactions + # + Given I have a seed node SEED_A + # Add multiple base nodes to ensure more robust comms + And I have a base node NODE_A1 connected to seed SEED_A + And I have wallet WALLET_A1 connected to seed node SEED_A + And I have wallet WALLET_A2 connected to seed node SEED_A + And I have mining node MINING_A connected to base node SEED_A and wallet WALLET_A1 + And mining node MINING_A mines 10 blocks + Then node SEED_A is at height 10 + Then node NODE_A1 is at height 10 Then wallet WALLET_A1 detects at least 7 coinbase transactions as Mined_Confirmed # Use 7 of the 10 coinbase UTXOs in transactions (others require 3 confirmations) And I multi-send 7 transactions of 1000000 uT from wallet WALLET_A1 to wallet WALLET_A2 at fee 100 Then wallet WALLET_A1 detects all transactions are at least Broadcast + When I mine 100 blocks on SEED_A + Then node SEED_A is at height 110 + Then node NODE_A1 is at height 110 + Then wallet WALLET_A2 detects all transactions as Mined_Confirmed + Then all NORMAL transactions for wallet WALLET_A1 are valid When I wait 1 seconds # # Chain 2: @@ -31,15 +85,19 @@ Feature: Wallet Monitoring And I have a base node NODE_B1 connected to seed SEED_B And I have wallet WALLET_B1 connected to seed node SEED_B And I have wallet WALLET_B2 connected to seed node SEED_B - And I have a merge mining proxy PROXY_B connected to SEED_B and WALLET_B1 with default config - When I merge mine 10 blocks via PROXY_B - Then all nodes are at height 10 - And I list all coinbase transactions for wallet WALLET_B1 - Then wallet WALLET_B1 has 10 coinbase transactions + And I have mining node MINING_B connected to base node SEED_B and wallet WALLET_B1 + And mining node MINING_B mines 10 blocks + Then node SEED_B is at height 10 + Then node NODE_B1 is at height 10 Then wallet WALLET_B1 detects at least 7 coinbase transactions as Mined_Confirmed # Use 7 of the 10 coinbase UTXOs in transactions (others require 3 confirmations) And I multi-send 7 transactions of 1000000 uT from wallet WALLET_B1 to wallet WALLET_B2 at fee 100 Then wallet WALLET_B1 detects all transactions are at least Broadcast + When I mine 100 blocks on SEED_B + Then node SEED_B is at height 110 + Then node NODE_B1 is at height 110 + Then wallet WALLET_B2 detects all transactions as Mined_Confirmed + Then all NORMAL transactions for wallet WALLET_B1 are valid When I wait 1 seconds # # Connect Chain 1 and 2 @@ -47,15 +105,21 @@ Feature: Wallet Monitoring And I have a SHA3 miner NODE_C connected to all seed nodes # Wait for the reorg to filter through When I wait 30 seconds - Then all nodes are at height 10 + Then all nodes are at height 110 # When tip advances past required confirmations, invalid coinbases still being monitored will be cancelled. And mining node NODE_C mines 6 blocks - Then all nodes are at height 16 + Then all nodes are at height 116 # Wait for coinbase statuses to change in the wallet When I wait 30 seconds - And I list all coinbase transactions for wallet WALLET_A1 - And I list all coinbase transactions for wallet WALLET_B1 - Then the number of coinbase transactions for wallet WALLET_A1 and wallet WALLET_B1 are 3 less + And I list all NORMAL transactions for wallet WALLET_A1 + And I list all NORMAL transactions for wallet WALLET_B1 + # TODO: Uncomment this step when wallets can handle reorg +# Then all NORMAL transactions for wallet WALLET_A1 and wallet WALLET_B1 have consistent but opposing validity + And I list all NORMAL transactions for wallet WALLET_A2 + And I list all NORMAL transactions for wallet WALLET_B2 + # TODO: Uncomment this step when wallets can handle reorg +# Then all NORMAL transactions for wallet WALLET_A2 and wallet WALLET_B2 have consistent but opposing validity + When I wait 1 seconds Scenario Outline: Verify all coinbases in hybrid mining are accounted for Given I have a seed node SEED_A @@ -70,23 +134,24 @@ Feature: Wallet Monitoring And I have mining node MINER2 connected to base node NODE2 and wallet WALLET2 When I co-mine blocks via merge mining proxy PROXY1 and mining node MINER2 - Then node NODE1 is at the same height as node NODE2 - Then node SEED_A is at the same height as node NODE1 + # This wait is here to give a chance for re-orgs to settle out + Then I wait 30 seconds + Then all nodes are on the same chain at height And mining node MINER_SEED_A mines 5 blocks - Then all nodes are at the same height as node SEED_A + Then all nodes are on the same chain at height When I wait 1 seconds - Then wallets WALLET1,WALLET2 account for all valid spendable coinbase transactions on the blockchain - @critical + Then wallets WALLET1,WALLET2 should have spendable coinbase outputs + + @flaky Examples: - | numBlocks | - | 10 | - | 100 | + | numBlocks | endBlocks | + | 10 | 15 | + | 100 | 105 | @long-running Examples: - | numBlocks | - | 1000 | - | 4500 | - + | numBlocks | endBlocks | + | 1000 | 1005 | + | 4500 | 4505 | diff --git a/integration_tests/features/WalletPasswordChange.feature b/integration_tests/features/WalletPasswordChange.feature deleted file mode 100644 index 37625650a8..0000000000 --- a/integration_tests/features/WalletPasswordChange.feature +++ /dev/null @@ -1,8 +0,0 @@ -Feature: Wallet Password Change - - Scenario: As a user I want to change password - Given I have wallet Wallet connected to all seed nodes - When I stop wallet Wallet - And I change the password of wallet Wallet to changedpwd - Then the password of wallet Wallet is not kensentme - Then the password of wallet Wallet is changedpwd \ No newline at end of file diff --git a/integration_tests/features/WalletRecovery.feature b/integration_tests/features/WalletRecovery.feature index f52be08cce..bf6147472d 100644 --- a/integration_tests/features/WalletRecovery.feature +++ b/integration_tests/features/WalletRecovery.feature @@ -20,6 +20,24 @@ Feature: Wallet Recovery Then all nodes are at height 20 Then I wait for wallet WALLET_C to have at least 100000 uT + Scenario Outline: Multiple Wallet recovery from seed node + Given I have a seed node NODE + And I have wallet WALLET_A connected to all seed nodes + And I have a merge mining proxy PROXY connected to NODE and WALLET_A with default config + When I merge mine 15 blocks via PROXY + When I wait for wallet WALLET_A to have at least 55000000000 uT + Then all nodes are at height 15 + When I recover wallet WALLET_A into wallets connected to all seed nodes + When I wait for wallets to have at least 55000000000 uT + Then Wallet WALLET_A and wallets have the same balance + @critical + Examples: + | NumWallets | + | 1 | + | 2 | + | 5 | + | 10 | + # fails often on circle CI @critical @flaky Scenario: Recover one-sided payments diff --git a/integration_tests/features/WalletTransactions.feature b/integration_tests/features/WalletTransactions.feature index ab899f508a..e0ecc452b8 100644 --- a/integration_tests/features/WalletTransactions.feature +++ b/integration_tests/features/WalletTransactions.feature @@ -12,6 +12,7 @@ Feature: Wallet Transactions And I have wallet WALLET_B connected to all seed nodes Then I send a one-sided transaction of 1000000 uT from WALLET_A to WALLET_B at fee 100 Then I send a one-sided transaction of 1000000 uT from WALLET_A to WALLET_B at fee 100 + Then wallet WALLET_A detects all transactions are at least Broadcast When I merge mine 5 blocks via PROXY Then all nodes are at height 20 Then I wait for wallet WALLET_B to have at least 2000000 uT @@ -116,7 +117,31 @@ Feature: Wallet Transactions Then I wait for wallet WALLET_IMPORTED to have less than 1 uT Then I check if last imported transactions are invalid in wallet WALLET_IMPORTED -Scenario: Wallet should display all transactions made + @critical + Scenario: Wallet imports faucet UTXO + Given I have a seed node NODE + And I have 1 base nodes connected to all seed nodes + And I have wallet WALLET_A connected to all seed nodes + And I have a merge mining proxy PROXY connected to NODE and WALLET_A with default config + When I merge mine 5 blocks via PROXY + Then all nodes are at height 5 + Then I wait for wallet WALLET_A to have at least 10000000000 uT + When I have wallet WALLET_B connected to all seed nodes + And I send 1000000 uT from wallet WALLET_A to wallet WALLET_B at fee 100 + When I merge mine 5 blocks via PROXY + Then all nodes are at height 10 + Then I wait for wallet WALLET_B to have at least 1000000 uT + Then I stop wallet WALLET_B + When I have wallet WALLET_C connected to all seed nodes + Then I import WALLET_B unspent outputs as faucet outputs to WALLET_C + Then I wait for wallet WALLET_C to have at least 1000000 uT + And I send 500000 uT from wallet WALLET_C to wallet WALLET_A at fee 100 + Then wallet WALLET_C detects all transactions are at least Broadcast + When I merge mine 5 blocks via PROXY + Then all nodes are at height 15 + Then I wait for wallet WALLET_C to have at least 400000 uT + + Scenario: Wallet should display all transactions made Given I have a seed node NODE And I have 1 base nodes connected to all seed nodes And I have wallet WALLET_A connected to all seed nodes @@ -137,3 +162,47 @@ Scenario: Wallet should display all transactions made Then I check if wallet WALLET_B has 5 transactions Then I restart wallet WALLET_B Then I check if wallet WALLET_B has 5 transactions + + @critical + Scenario: Wallet SAF negotiation and cancellation with offline peers + Given I have a seed node NODE + And I have 1 base nodes connected to all seed nodes + And I have wallet WALLET_A connected to all seed nodes + And I have mining node MINER connected to base node NODE and wallet WALLET_A + And mining node MINER mines 5 blocks + Then all nodes are at height 5 + Then I wait for wallet WALLET_A to have at least 10000000000 uT + And I have non-default wallet WALLET_SENDER connected to all seed nodes using StoreAndForwardOnly + And I send 100000000 uT from wallet WALLET_A to wallet WALLET_SENDER at fee 100 + When wallet WALLET_SENDER detects all transactions are at least Broadcast + And mining node MINER mines 5 blocks + Then all nodes are at height 10 + Then I wait for wallet WALLET_SENDER to have at least 100000000 uT + And I have wallet WALLET_RECV connected to all seed nodes + And I stop wallet WALLET_RECV + And I send 1000000 uT from wallet WALLET_SENDER to wallet WALLET_RECV at fee 100 + When wallet WALLET_SENDER detects last transaction is Pending + Then I stop wallet WALLET_SENDER + And I start wallet WALLET_RECV + And I wait for 5 seconds + When wallet WALLET_RECV detects all transactions are at least Pending + Then I cancel last transaction in wallet WALLET_RECV + Then I stop wallet WALLET_RECV + And I start wallet WALLET_SENDER + # This is a weirdness that I haven't been able to figure out. When you start WALLET_SENDER on the line above it + # requests SAF messages from the base nodes the base nodes get the request and attempt to send the stored messages + # but the connection fails. It requires a second reconnection and request for the SAF messages to be delivered. + And I wait for 5 seconds + Then I restart wallet WALLET_SENDER + And I wait for 5 seconds + Then I restart wallet WALLET_SENDER + When wallet WALLET_SENDER detects all transactions are at least Broadcast + And mining node MINER mines 5 blocks + Then all nodes are at height 15 + When wallet WALLET_SENDER detects all transactions as Mined_Confirmed + And I start wallet WALLET_RECV + And I wait for 5 seconds + Then I restart wallet WALLET_RECV + And I wait for 5 seconds + Then I restart wallet WALLET_RECV + Then I wait for wallet WALLET_RECV to have at least 1000000 uT \ No newline at end of file diff --git a/integration_tests/features/support/steps.js b/integration_tests/features/support/steps.js index dd8c00fab1..fcedbaeeff 100644 --- a/integration_tests/features/support/steps.js +++ b/integration_tests/features/support/steps.js @@ -11,7 +11,7 @@ const { getTransactionOutputHash, sleep, consoleLogBalance, - consoleLogCoinbaseDetails, + consoleLogTransactionDetails, withTimeout, } = require("../../helpers/util"); const { ConnectivityStatus, PaymentType } = require("../../helpers/types"); @@ -280,6 +280,9 @@ Given( wallet.setPeerSeeds([this.seeds[seedName].peerAddress()]); await wallet.startNew(); this.addWallet(walletName, wallet); + let walletClient = await this.getWallet(walletName).connectClient(); + let walletInfo = await walletClient.identify(); + this.addWalletPubkey(walletName, walletInfo.public_key); } ); @@ -296,6 +299,9 @@ Given( wallet.setPeerSeeds([this.seedAddresses()]); await wallet.startNew(); this.addWallet(name, wallet); + let walletClient = await this.getWallet(name).connectClient(); + let walletInfo = await walletClient.identify(); + this.addWalletPubkey(name, walletInfo.public_key); } ); @@ -344,6 +350,9 @@ Given( wallet.setPeerSeeds([this.seedAddresses()]); await wallet.startNew(); this.addWallet(name, wallet); + let walletClient = await this.getWallet(name).connectClient(); + let walletInfo = await walletClient.identify(); + this.addWalletPubkey(name, walletInfo.public_key); } ); @@ -354,35 +363,12 @@ Given( // mechanism: DirectOnly, StoreAndForwardOnly, DirectAndStoreAndForward const promises = []; for (let i = 0; i < n; i++) { - if (i < 10) { - const wallet = new WalletProcess( - "Wallet_0" + String(i), - false, - { routingMechanism: mechanism }, - this.logFilePathWallet - ); - console.log(wallet.name, wallet.options); - wallet.setPeerSeeds([this.seedAddresses()]); - promises.push( - wallet - .startNew() - .then(() => this.addWallet("Wallet_0" + String(i), wallet)) - ); - } else { - const wallet = new WalletProcess( - "Wallet_0" + String(i), - false, - { routingMechanism: mechanism }, - this.logFilePathWallet - ); - console.log(wallet.name, wallet.options); - wallet.setPeerSeeds([this.seedAddresses()]); - promises.push( - wallet - .startNew() - .then(() => this.addWallet("Wallet_" + String(i), wallet)) - ); - } + let name = "Wallet_" + String(n).padStart(2, "0"); + promises.push( + this.createAndAddWallet(name, [this.seedAddresses()], { + routingMechanism: mechanism, + }) + ); } await Promise.all(promises); } @@ -409,8 +395,83 @@ Given( seedWords ); walletB.setPeerSeeds([this.seedAddresses()]); - walletB.startNew(); // Do not 'await' here + await walletB.startNew(); this.addWallet(walletNameB, walletB); + let walletClient = await this.getWallet(walletNameB).connectClient(); + let walletInfo = await walletClient.identify(); + this.addWalletPubkey(walletNameB, walletInfo.public_key); + } +); + +Given( + /I recover wallet (.*) into (\d+) wallets connected to all seed nodes/, + { timeout: 120 * 1000 }, + async function (walletNameA, numwallets) { + const seedWords = this.getWallet(walletNameA).getSeedWords(); + for (let i = 1; i <= numwallets; i++) { + console.log( + "Recover " + + walletNameA + + " into wallet " + + i + + ", seed words:\n " + + seedWords + ); + const wallet = new WalletProcess( + i, + false, + {}, + this.logFilePathWallet, + seedWords + ); + wallet.setPeerSeeds([this.seedAddresses()]); + await wallet.startNew(); + this.addWallet(i, wallet); + let walletClient = await this.getWallet(i.toString()).connectClient(); + let walletInfo = await walletClient.identify(); + this.addWalletPubkey(wallet, walletInfo.public_key); + } + } +); + +Then( + /I wait for (\d+) wallets to have at least (\d+) uT/, + { timeout: 710 * 1000 }, + async function (numwallets, amount) { + for (let i = 1; i <= numwallets; i++) { + const walletClient = await this.getWallet(i.toString()).connectClient(); + console.log("\n"); + console.log( + "Waiting for wallet " + i + " balance to be at least " + amount + " uT" + ); + + await waitFor( + async () => walletClient.isBalanceAtLeast(amount), + true, + 700 * 1000, + 5 * 1000, + 5 + ); + consoleLogBalance(await walletClient.getBalance()); + if (!(await walletClient.isBalanceAtLeast(amount))) { + console.log("Balance not adequate!"); + } + expect(await walletClient.isBalanceAtLeast(amount)).to.equal(true); + } + } +); + +Then( + /Wallet (.*) and (\d+) wallets have the same balance/, + { timeout: 120 * 1000 }, + async function (wallet, numwallets) { + const walletClient = await this.getWallet(wallet).connectClient(); + let balance = await walletClient.getBalance(); + for (let i = 1; i <= numwallets; i++) { + const walletClient2 = await this.getWallet(i.toString()).connectClient(); + let balance2 = await walletClient2.getBalance(); + expect(balance === balance2); + } } ); @@ -419,6 +480,11 @@ When(/I stop wallet (.*)/, async function (walletName) { await wallet.stop(); }); +When(/I start wallet (.*)/, async function (walletName) { + let wallet = this.getWallet(walletName); + await wallet.start(); +}); + When(/I restart wallet (.*)/, async function (walletName) { let wallet = this.getWallet(walletName); await wallet.stop(); @@ -430,7 +496,7 @@ When( async function (walletNameA, walletNameB) { let walletA = this.getWallet(walletNameA); let walletB = this.getWallet(walletNameB); - let clientB = walletB.getClient(); + let clientB = await walletB.connectClient(); await walletA.exportSpentOutputs(); let spent_outputs = await walletA.readExportedOutputs(); @@ -444,7 +510,7 @@ When( async function (walletNameA, walletNameB) { let walletA = this.getWallet(walletNameA); let walletB = this.getWallet(walletNameB); - let clientB = walletB.getClient(); + let clientB = await walletB.connectClient(); await walletA.exportUnspentOutputs(); let outputs = await walletA.readExportedOutputs(); @@ -453,11 +519,25 @@ When( } ); +When( + /I import (.*) unspent outputs as faucet outputs to (.*)/, + async function (walletNameA, walletNameB) { + let walletA = this.getWallet(walletNameA); + let walletB = this.getWallet(walletNameB); + let clientB = await walletB.connectClient(); + + await walletA.exportUnspentOutputs(); + let outputs = await walletA.readExportedOutputsAsFaucetOutputs(); + let result = await clientB.importUtxos(outputs); + lastResult = result.tx_ids; + } +); + When( /I check if last imported transactions are invalid in wallet (.*)/, async function (walletName) { let wallet = this.getWallet(walletName); - let client = wallet.getClient(); + let client = await wallet.connectClient(); let found_txs = await client.getCompletedTransactions(); //console.log("Found: ", found_txs); let found_count = 0; @@ -480,7 +560,7 @@ When( /I check if wallet (.*) has (.*) transactions/, async function (walletName, count) { let wallet = this.getWallet(walletName); - let client = wallet.getClient(); + let client = await wallet.connectClient(); let txs = await client.getCompletedTransactions(); expect(count).to.equal(txs.length.toString()); } @@ -490,7 +570,7 @@ When( /I check if last imported transactions are valid in wallet (.*)/, async function (walletName) { let wallet = this.getWallet(walletName); - let client = wallet.getClient(); + let client = await wallet.connectClient(); let found_txs = await client.getCompletedTransactions(); let found_count = 0; @@ -686,10 +766,18 @@ Then("Proxy response for block header by hash is valid", function () { assert(lastResult.result.status, "OK"); }); -When(/I start (.*)/, { timeout: 20 * 1000 }, async function (name) { +When(/I start base node (.*)/, { timeout: 20 * 1000 }, async function (name) { await this.startNode(name); }); +When( + /I run blockchain recovery on node (\S*)/, + { timeout: 120 * 1000 }, + async function (name) { + await this.startNode(name, ["--rebuild-db"]); + } +); + When(/I stop node (.*)/, async function (name) { await this.stopNode(name); }); @@ -761,6 +849,7 @@ Then( await this.forEachClientAsync(async (client, name) => { await waitFor(async () => client.getTipHeight(), height, 115 * 1000); const currTip = await client.getTipHeader(); + console.log("the node is at tip ", currTip); expect(currTip.height).to.equal(height); if (!tipHash) { tipHash = currTip.hash.toString("hex"); @@ -776,6 +865,41 @@ Then( } ); +Then( + "all nodes are on the same chain tip", + { timeout: 1200 * 1000 }, + async function () { + await waitFor( + async () => { + let tipHash = null; + let height = null; + let result = true; + await this.forEachClientAsync(async (client, name) => { + await waitFor(async () => client.getTipHeight(), 115 * 1000); + const currTip = await client.getTipHeader(); + if (!tipHash) { + tipHash = currTip.hash.toString("hex"); + height = currTip.height; + console.log(`Node ${name} is at tip: #${height}, ${tipHash}`); + } else { + const currTipHash = currTip.hash.toString("hex"); + console.log( + `Node ${name} is at tip: #${currTip.height},${currTipHash} (should be #${height},${tipHash})` + ); + result = + result && currTipHash == tipHash && currTip.height == height; + } + }); + return result; + }, + true, + 600 * 1000, + 5 * 1000, + 5 + ); + } +); + Then( "all nodes are at height {int}", { timeout: 1200 * 1000 }, @@ -986,7 +1110,7 @@ Then(/(.*) has (.*) in (.*) state/, async function (node, txn, pool) { const client = this.getClient(node); const sig = this.transactions[txn].body.kernels[0].excess_sig; await waitFor( - async () => client.transactionStateResult(sig), + async () => await client.transactionStateResult(sig), pool, 1200 * 1000 ); @@ -1007,7 +1131,7 @@ Then( await this.forEachClientAsync( async (client, name) => { await waitFor( - async () => client.transactionStateResult(sig), + async () => await client.transactionStateResult(sig), pool, 1200 * 1000 ); @@ -1044,6 +1168,20 @@ Then(/node (.*) is at tip (.*)/, async function (node, name) { ); }); +Then( + /node (.*) lists headers (\d+) to (\d+) with correct heights/, + async function (node, start, end) { + const client = this.getClient(node); + const fromHeight = end; + const numHeaders = end - start + 1; // inclusive + const headers = await client.getHeaders(fromHeight, numHeaders); + const heights = headers.map((header) => parseInt(header.height)); + for (let height = start; height <= end; height++) { + expect(heights).to.contain(height); + } + } +); + When( /I mine a block on (.*) with coinbase (.*)/, { timeout: 600 * 1000 }, @@ -1118,7 +1256,7 @@ When( { timeout: 600 * 1000 }, async function (numBlocks, walletName, nodeName) { const nodeClient = this.getClient(nodeName); - const walletClient = this.getWallet(walletName).getClient(); + const walletClient = await this.getWallet(walletName).connectClient(); for (let i = 0; i < numBlocks; i++) { await nodeClient.mineBlock(walletClient); } @@ -1145,11 +1283,12 @@ When( { timeout: 1200 * 1000 }, async function (numBlocks, mmProxy, node, wallet) { this.lastResult = this.tipHeight; - const baseNodeMiningPromise = this.baseNodeMineBlocksUntilHeightIncreasedBy( - node, - wallet, - numBlocks - ); + const baseNodeMiningPromise = + await this.baseNodeMineBlocksUntilHeightIncreasedBy( + node, + wallet, + numBlocks + ); const mergeMiningPromise = this.mergeMineBlocksUntilHeightIncreasedBy( mmProxy, numBlocks @@ -1297,7 +1436,7 @@ Then( /I wait for wallet (.*) to have at least (.*) uT/, { timeout: 710 * 1000 }, async function (wallet, amount) { - const walletClient = this.getWallet(wallet).getClient(); + const walletClient = await this.getWallet(wallet).connectClient(); console.log("\n"); console.log( "Waiting for " + wallet + " balance to be at least " + amount + " uT" @@ -1322,7 +1461,7 @@ Then( /I wait for wallet (.*) to have less than (.*) uT/, { timeout: 710 * 1000 }, async function (wallet, amount) { - let walletClient = this.getWallet(wallet).getClient(); + let walletClient = await this.getWallet(wallet).connectClient(); console.log("\n"); console.log( "Waiting for " + wallet + " balance to less than " + amount + " uT" @@ -1347,11 +1486,11 @@ Then( /wallet (.*) and wallet (.*) have the same balance/, { timeout: 65 * 1000 }, async function (walletNameA, walletNameB) { - const walletClientA = this.getWallet(walletNameA).getClient(); + const walletClientA = await this.getWallet(walletNameA).connectClient(); var balanceA = await walletClientA.getBalance(); console.log("\n", walletNameA, "balance:"); consoleLogBalance(balanceA); - const walletClientB = this.getWallet(walletNameB).getClient(); + const walletClientB = await this.getWallet(walletNameB).connectClient(); for (let i = 1; i <= 12; i++) { await waitFor( async () => walletClientB.isBalanceAtLeast(balanceA.available_balance), @@ -1370,34 +1509,75 @@ Then( } ); -async function send_tari(sourceWallet, destWallet, tariAmount, feePerGram) { - const sourceWalletClient = sourceWallet.getClient(); - const destInfo = await destWallet.getClient().identify(); +async function send_tari( + sourceWallet, + destWalletName, + destWalletPubkey, + tariAmount, + feePerGram, + oneSided = false, + message = "", + printMessage = true +) { + const sourceWalletClient = await sourceWallet.connectClient(); console.log( sourceWallet.name + " sending " + tariAmount + - "uT to " + - destWallet.name + + "uT one-sided(" + + oneSided + + ") to " + + destWalletName + " `" + - destInfo.public_key + + destWalletPubkey + "`" ); + if (printMessage) { + console.log(message); + } let success = false; let retries = 1; const retries_limit = 25; let lastResult; while (!success && retries <= retries_limit) { - lastResult = await sourceWalletClient.transfer({ - recipients: [ - { - address: destInfo.public_key, - amount: tariAmount, - fee_per_gram: feePerGram, - message: "msg", - }, - ], - }); + await waitFor( + async () => { + try { + if (!oneSided) { + lastResult = await sourceWalletClient.transfer({ + recipients: [ + { + address: destWalletPubkey, + amount: tariAmount, + fee_per_gram: feePerGram, + message: message, + }, + ], + }); + } else { + lastResult = await sourceWalletClient.transfer({ + recipients: [ + { + address: destWalletPubkey, + amount: tariAmount, + fee_per_gram: feePerGram, + message: message, + payment_type: PaymentType.ONE_SIDED, + }, + ], + }); + } + } catch (error) { + console.log(error); + return false; + } + return true; + }, + true, + 20 * 1000, + 5 * 1000, + 5 + ); success = lastResult.results[0].is_success; if (!success) { const wait_seconds = 5; @@ -1423,11 +1603,16 @@ When( /I send (.*) uT from wallet (.*) to wallet (.*) at fee (.*)/, { timeout: 25 * 5 * 1000 }, async function (tariAmount, source, dest, feePerGram) { - const sourceInfo = await this.getWallet(source).getClient().identify(); - const destInfo = await this.getWallet(dest).getClient().identify(); + const sourceWallet = this.getWallet(source); + const sourceClient = await sourceWallet.connectClient(); + const sourceInfo = await sourceClient.identify(); + + const destPublicKey = this.getWalletPubkey(dest); + this.lastResult = await send_tari( - this.getWallet(source), - this.getWallet(dest), + sourceWallet, + dest, + destPublicKey, tariAmount, feePerGram ); @@ -1437,7 +1622,7 @@ When( this.lastResult.results[0].transaction_id ); this.addTransaction( - destInfo.public_key, + destPublicKey, this.lastResult.results[0].transaction_id ); console.log( @@ -1455,12 +1640,15 @@ When( { timeout: 25 * 5 * 1000 }, async function (number, tariAmount, source, dest, fee) { console.log("\n"); - const sourceInfo = await this.getWallet(source).getClient().identify(); - const destInfo = await this.getWallet(dest).getClient().identify(); + const sourceClient = await this.getWallet(source).connectClient(); + const sourceInfo = await sourceClient.identify(); + const destClient = await this.getWallet(dest).connectClient(); + const destInfo = await destClient.identify(); for (let i = 0; i < number; i++) { this.lastResult = await send_tari( this.getWallet(source), - this.getWallet(dest), + destInfo.name, + destInfo.public_key, tariAmount, fee ); @@ -1483,17 +1671,19 @@ When( /I multi-send (.*) uT from wallet (.*) to all wallets at fee (.*)/, { timeout: 25 * 5 * 1000 }, async function (tariAmount, source, fee) { - const sourceWalletClient = this.getWallet(source).getClient(); + const sourceWalletClient = await this.getWallet(source).connectClient(); const sourceInfo = await sourceWalletClient.identify(); for (const wallet in this.wallets) { if (this.getWallet(source).name === this.getWallet(wallet).name) { continue; } - const destInfo = await this.getWallet(wallet).getClient().identify(); + const destClient = await this.getWallet(wallet).connectClient(); + const destInfo = await destClient.identify(); this.lastResult = await send_tari( this.getWallet(source), - this.getWallet(wallet), + destInfo.name, + destInfo.public_key, tariAmount, fee ); @@ -1516,9 +1706,9 @@ When( /I transfer (.*) uT from (.*) to (.*) and (.*) at fee (.*)/, { timeout: 25 * 5 * 1000 }, async function (tariAmount, source, dest1, dest2, feePerGram) { - const sourceClient = this.getWallet(source).getClient(); - const destClient1 = this.getWallet(dest1).getClient(); - const destClient2 = this.getWallet(dest2).getClient(); + const sourceClient = await this.getWallet(source).connectClient(); + const destClient1 = await this.getWallet(dest1).connectClient(); + const destClient2 = await this.getWallet(dest2).connectClient(); const sourceInfo = await sourceClient.identify(); const dest1Info = await destClient1.identify(); @@ -1534,23 +1724,39 @@ When( let success = false; let retries = 1; const retries_limit = 25; + let lastResult; while (!success && retries <= retries_limit) { - lastResult = await sourceClient.transfer({ - recipients: [ - { - address: dest1Info.public_key, - amount: tariAmount, - fee_per_gram: feePerGram, - message: "msg", - }, - { - address: dest2Info.public_key, - amount: tariAmount, - fee_per_gram: feePerGram, - message: "msg", - }, - ], - }); + await waitFor( + async () => { + try { + lastResult = await sourceClient.transfer({ + recipients: [ + { + address: dest1Info.public_key, + amount: tariAmount, + fee_per_gram: feePerGram, + message: "msg", + }, + { + address: dest2Info.public_key, + amount: tariAmount, + fee_per_gram: feePerGram, + message: "msg", + }, + ], + }); + } catch (error) { + console.log(error); + return false; + } + return true; + }, + true, + 20 * 1000, + 5 * 1000, + 5 + ); + success = lastResult.results[0].is_success && lastResult.results[1].is_success; if (!success) { @@ -1596,10 +1802,12 @@ When( /I transfer (.*) uT to self from wallet (.*) at fee (.*)/, { timeout: 25 * 5 * 1000 }, async function (tariAmount, source, feePerGram) { - const sourceInfo = await this.getWallet(source).getClient().identify(); + const sourceClient = await this.getWallet(source).connectClient(); + const sourceInfo = await sourceClient.identify(); this.lastResult = await send_tari( this.getWallet(source), - this.getWallet(source), + sourceInfo.name, + sourceInfo.public_key, tariAmount, feePerGram ); @@ -1622,19 +1830,35 @@ When( /I transfer (.*) uT from (.*) to ([A-Za-z0-9,]+) at fee (.*)/, async function (amount, source, dests, feePerGram) { const wallet = this.getWallet(source); - const client = wallet.getClient(); - const destWallets = dests - .split(",") - .map((dest) => this.getWallet(dest).getClient()); + const client = await wallet.connectClient(); + const destWallets = await Promise.all( + dests.split(",").map((dest) => this.getWallet(dest).connectClient()) + ); console.log("Starting Transfer of", amount, "to"); - const recipients = destWallets.map((w) => ({ - address: w.public_key, - amount: amount, - fee_per_gram: feePerGram, - message: "msg", - })); - const output = await client.transfer({ recipients }); + let output; + await waitFor( + async () => { + try { + const recipients = destWallets.map((w) => ({ + address: w.public_key, + amount: amount, + fee_per_gram: feePerGram, + message: "msg", + })); + output = await client.transfer({ recipients }); + } catch (error) { + console.log(error); + return false; + } + return true; + }, + true, + 20 * 1000, + 5 * 1000, + 5 + ); + console.log("output", output); lastResult = output; } @@ -1642,22 +1866,54 @@ When( When( /I send a one-sided transaction of (.*) uT from (.*) to (.*) at fee (.*)/, + { timeout: 65 * 1000 }, async function (amount, source, dest, feePerGram) { - let wallet = this.getWallet(source); - let sourceClient = wallet.getClient(); - let destPubkey = this.getWalletPubkey(dest); - - lastResult = await sourceClient.transfer({ - recipients: [ - { - address: destPubkey, - amount: amount, - fee_per_gram: feePerGram, - message: "msg", - payment_type: PaymentType.ONE_SIDED, - }, - ], - }); + const sourceWallet = this.getWallet(source); + const sourceClient = await sourceWallet.connectClient(); + const sourceInfo = await sourceClient.identify(); + + const destPublicKey = this.getWalletPubkey(dest); + + const oneSided = true; + const lastResult = await send_tari( + sourceWallet, + dest, + destPublicKey, + amount, + feePerGram, + oneSided + ); + expect(lastResult.results[0].is_success).to.equal(true); + + this.addTransaction( + sourceInfo.public_key, + lastResult.results[0].transaction_id + ); + } +); + +When( + /I cancel last transaction in wallet (.*)/, + { timeout: 25 * 5 * 1000 }, + async function (walletName) { + const wallet = this.getWallet(walletName); + const walletClient = await wallet.connectClient(); + + let lastTxId = this.lastResult.results[0].transaction_id; + console.log( + "Attempting to cancel transaction ", + lastTxId, + "from wallet", + walletName + ); + + let result = await walletClient.cancelTransaction(lastTxId); + console.log( + "Cancellation successful? ", + result.success, + result.failure_message + ); + assert(result.success, true); } ); @@ -1670,10 +1926,12 @@ When(/I wait (.*) seconds/, { timeout: 600 * 1000 }, async function (int) { Then( /Batch transfer of (.*) transactions was a success from (.*) to ([A-Za-z0-9,]+)/, async function (txCount, walletListStr) { - const clients = walletListStr.split(",").map((s) => { - const wallet = this.getWallet(s); - return wallet.getClient(); - }); + const clients = await Promise.all( + walletListStr.split(",").map((s) => { + const wallet = this.getWallet(s); + return wallet.connectClient(); + }) + ); const resultObj = lastResult.results; console.log(resultObj); @@ -1724,7 +1982,7 @@ Then( // Note: This initial step can take a long time if network conditions are not favourable // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -1752,17 +2010,15 @@ Then( " to register at least Pending in the wallet ..." ); await waitFor( - async () => wallet.getClient().isTransactionAtLeastPending(txIds[i]), + async () => await walletClient.isTransactionAtLeastPending(txIds[i]), true, 3700 * 1000, 5 * 1000, 5 ); - const transactionPending = await wallet - .getClient() - .isTransactionAtLeastPending(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const transactionPending = await walletClient.isTransactionAtLeastPending( + txIds[i] + ); expect(transactionPending).to.equal(true); } } @@ -1776,7 +2032,7 @@ Then( // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed for (const walletName in this.wallets) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -1804,30 +2060,57 @@ Then( " to register at least Pending in the wallet ..." ); await waitFor( - async () => wallet.getClient().isTransactionAtLeastPending(txIds[i]), + async () => walletClient.isTransactionAtLeastPending(txIds[i]), true, 3700 * 1000, 5 * 1000, 5 ); - const transactionPending = await wallet - .getClient() - .isTransactionAtLeastPending(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const transactionPending = + await walletClient.isTransactionAtLeastPending(txIds[i]); expect(transactionPending).to.equal(true); } } } ); +Then( + /wallet (.*) detects last transaction is Pending/, + { timeout: 3800 * 1000 }, + async function (walletName) { + const wallet = this.getWallet(walletName); + const walletClient = await wallet.connectClient(); + + let lastTxId = this.lastResult.results[0].transaction_id; + console.log( + "Waiting for Transaction ", + lastTxId, + "to be pending in wallet", + walletName + ); + + await waitFor( + async () => walletClient.isTransactionPending(lastTxId), + true, + 3700 * 1000, + 5 * 1000, + 5 + ); + const transactionPending = await walletClient.isTransactionPending( + lastTxId + ); + + expect(transactionPending).to.equal(true); + } +); + Then( /wallet (.*) detects all transactions are at least Completed/, { timeout: 1200 * 1000 }, async function (walletName) { // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -1856,17 +2139,14 @@ Then( " to register at least Completed in the wallet ..." ); await waitFor( - async () => wallet.getClient().isTransactionAtLeastCompleted(txIds[i]), + async () => walletClient.isTransactionAtLeastCompleted(txIds[i]), true, 600 * 1000, 5 * 1000, 5 ); - const transactionCompleted = await wallet - .getClient() - .isTransactionAtLeastCompleted(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const transactionCompleted = + await walletClient.isTransactionAtLeastCompleted(txIds[i]); expect(transactionCompleted).to.equal(true); } } @@ -1879,7 +2159,7 @@ Then( // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed for (const walletName in this.wallets) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -1908,18 +2188,14 @@ Then( " to register at least Completed in the wallet ..." ); await waitFor( - async () => - wallet.getClient().isTransactionAtLeastCompleted(txIds[i]), + async () => walletClient.isTransactionAtLeastCompleted(txIds[i]), true, 1100 * 1000, 5 * 1000, 5 ); - const transactionCompleted = await wallet - .getClient() - .isTransactionAtLeastCompleted(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const transactionCompleted = + await walletClient.isTransactionAtLeastCompleted(txIds[i]); expect(transactionCompleted).to.equal(true); } } @@ -1932,7 +2208,7 @@ Then( async function (walletName) { // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); let txIds = this.transactionsMap.get(walletInfo.public_key); @@ -1962,17 +2238,14 @@ Then( " to register at least Broadcast in the wallet ..." ); await waitFor( - async () => wallet.getClient().isTransactionAtLeastBroadcast(txIds[i]), + async () => walletClient.isTransactionAtLeastBroadcast(txIds[i]), true, 600 * 1000, 5 * 1000, 5 ); - const transactionBroadcasted = await wallet - .getClient() - .isTransactionAtLeastBroadcast(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const transactionBroadcasted = + await walletClient.isTransactionAtLeastBroadcast(txIds[i]); expect(transactionBroadcasted).to.equal(true); } } @@ -1985,7 +2258,7 @@ Then( // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed for (const walletName in this.wallets) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2014,18 +2287,14 @@ Then( " to register at least Broadcast in the wallet ..." ); await waitFor( - async () => - wallet.getClient().isTransactionAtLeastBroadcast(txIds[i]), + async () => walletClient.isTransactionAtLeastBroadcast(txIds[i]), true, 1100 * 1000, 5 * 1000, 5 ); - const transactionBroadcasted = await wallet - .getClient() - .isTransactionAtLeastBroadcast(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const transactionBroadcasted = + await walletClient.isTransactionAtLeastBroadcast(txIds[i]); expect(transactionBroadcasted).to.equal(true); } } @@ -2038,7 +2307,7 @@ Then( async function (walletName) { // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2066,18 +2335,14 @@ Then( " to be detected as Mined_Unconfirmed in the wallet ..." ); await waitFor( - async () => - wallet.getClient().isTransactionAtLeastMinedUnconfirmed(txIds[i]), + async () => walletClient.isTransactionAtLeastMinedUnconfirmed(txIds[i]), true, 600 * 1000, 5 * 1000, 5 ); - const isTransactionAtLeastMinedUnconfirmed = await wallet - .getClient() - .isTransactionAtLeastMinedUnconfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionAtLeastMinedUnconfirmed = + await walletClient.isTransactionAtLeastMinedUnconfirmed(txIds[i]); expect(isTransactionAtLeastMinedUnconfirmed).to.equal(true); } } @@ -2090,7 +2355,7 @@ Then( // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed for (const walletName in this.wallets) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2118,17 +2383,14 @@ Then( ); await waitFor( async () => - wallet.getClient().isTransactionAtLeastMinedUnconfirmed(txIds[i]), + walletClient.isTransactionAtLeastMinedUnconfirmed(txIds[i]), true, 1100 * 1000, 5 * 1000, 5 ); - const isTransactionAtLeastMinedUnconfirmed = await wallet - .getClient() - .isTransactionAtLeastMinedUnconfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionAtLeastMinedUnconfirmed = + await walletClient.isTransactionAtLeastMinedUnconfirmed(txIds[i]); expect(isTransactionAtLeastMinedUnconfirmed).to.equal(true); } } @@ -2141,7 +2403,7 @@ Then( async function (walletName) { // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2169,17 +2431,14 @@ Then( " to be detected as Mined_Unconfirmed in the wallet ..." ); await waitFor( - async () => wallet.getClient().isTransactionMinedUnconfirmed(txIds[i]), + async () => walletClient.isTransactionMinedUnconfirmed(txIds[i]), true, 600 * 1000, 5 * 1000, 5 ); - const isTransactionMinedUnconfirmed = await wallet - .getClient() - .isTransactionMinedUnconfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionMinedUnconfirmed = + await walletClient.isTransactionMinedUnconfirmed(txIds[i]); expect(isTransactionMinedUnconfirmed).to.equal(true); } } @@ -2192,7 +2451,7 @@ Then( // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed for (const walletName in this.wallets) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2220,18 +2479,14 @@ Then( " to be detected as Mined_Unconfirmed in the wallet ..." ); await waitFor( - async () => - wallet.getClient().isTransactionMinedUnconfirmed(txIds[i]), + async () => walletClient.isTransactionMinedUnconfirmed(txIds[i]), true, 1100 * 1000, 5 * 1000, 5 ); - const isTransactionMinedUnconfirmed = await wallet - .getClient() - .isTransactionMinedUnconfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionMinedUnconfirmed = + await walletClient.isTransactionMinedUnconfirmed(txIds[i]); expect(isTransactionMinedUnconfirmed).to.equal(true); } } @@ -2244,7 +2499,7 @@ Then( async function (walletName) { // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2272,17 +2527,14 @@ Then( " to be detected as Mined_Confirmed in the wallet ..." ); await waitFor( - async () => wallet.getClient().isTransactionMinedConfirmed(txIds[i]), + async () => walletClient.isTransactionMinedConfirmed(txIds[i]), true, 600 * 1000, 5 * 1000, 5 ); - const isTransactionMinedConfirmed = await wallet - .getClient() - .isTransactionMinedConfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionMinedConfirmed = + await walletClient.isTransactionMinedConfirmed(txIds[i]); expect(isTransactionMinedConfirmed).to.equal(true); } } @@ -2293,14 +2545,13 @@ Then( { timeout: 1200 * 1000 }, async function (nodeName, walletName) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const nodeClient = this.getClient(nodeName); - const txIds = this.transactionsMap.get(walletInfo.public_key); if (txIds === undefined) { console.log("\nNo transactions for " + walletName + "!"); - expect(false).to.equal(true); + throw new Error("No transactions for " + walletName + "!"); } console.log( "\nDetecting", @@ -2336,11 +2587,8 @@ Then( 5 * 1000, 5 ); - const isTransactionMinedConfirmed = await wallet - .getClient() - .isTransactionMinedConfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionMinedConfirmed = + await walletClient.isTransactionMinedConfirmed(txIds[i]); expect(isTransactionMinedConfirmed).to.equal(true); } } @@ -2351,7 +2599,7 @@ Then( { timeout: 3600 * 1000 }, async function (mmProxy, walletName) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2393,11 +2641,8 @@ Then( 5 * 1000, 5 ); - const isTransactionMinedConfirmed = await wallet - .getClient() - .isTransactionMinedConfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionMinedConfirmed = + await walletClient.isTransactionMinedConfirmed(txIds[i]); expect(isTransactionMinedConfirmed).to.equal(true); } } @@ -2410,7 +2655,7 @@ Then( // Pending -> Completed -> Broadcast -> Mined Unconfirmed -> Mined Confirmed for (const walletName in this.wallets) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); const txIds = this.transactionsMap.get(walletInfo.public_key); @@ -2438,17 +2683,14 @@ Then( " to be detected as Mined_Confirmed in the wallet ..." ); await waitFor( - async () => wallet.getClient().isTransactionMinedConfirmed(txIds[i]), + async () => walletClient.isTransactionMinedConfirmed(txIds[i]), true, 1100 * 1000, 5 * 1000, 5 ); - const isTransactionMinedConfirmed = await wallet - .getClient() - .isTransactionMinedConfirmed(txIds[i]); - // let txnDetails = await wallet.getClient().getTransactionDetails(txIds[i]); - // consoleLogTransactionDetails(txnDetails, txIds[i]); + const isTransactionMinedConfirmed = + await walletClient.isTransactionMinedConfirmed(txIds[i]); expect(isTransactionMinedConfirmed).to.equal(true); } } @@ -2456,19 +2698,27 @@ Then( ); When( - /I list all coinbase transactions for wallet (.*)/, + /I list all (.*) transactions for wallet (.*)/, { timeout: 20 * 1000 }, - async function (walletName) { + async function (transaction_type, walletName) { const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); - console.log("\nListing all coinbase transactions: ", walletName); - const transactions = await walletClient.getAllCoinbaseTransactions(); + const walletClient = await wallet.connectClient(); + var transactions; + var type; + if (transaction_type === "NORMAL") { + transactions = await walletClient.getAllNormalTransactions(); + type = "NORMAL"; + } else { + transactions = await walletClient.getAllCoinbaseTransactions(); + type = "COINBASE"; + } + console.log("\nListing all `" + type + "` transactions: ", walletName); if (transactions.length > 0) { for (let i = 0; i < transactions.length; i++) { - consoleLogCoinbaseDetails(transactions[i]); + consoleLogTransactionDetails(transactions[i]); } } else { - console.log(" No coinbase transactions found!"); + console.log(" No `" + type + "` transactions found!"); } } ); @@ -2477,7 +2727,7 @@ Then( /wallet (.*) has (.*) coinbase transactions/, { timeout: 20 * 1000 }, async function (walletName, count) { - const walletClient = this.getWallet(walletName).getClient(); + const walletClient = await this.getWallet(walletName).connectClient(); const transactions = await walletClient.getAllCoinbaseTransactions(); expect(transactions.length).to.equal(Number(count)); this.resultStack.push([walletName, transactions.length]); @@ -2488,7 +2738,7 @@ Then( /wallet (.*) detects at least (.*) coinbase transactions as Mined_Confirmed/, { timeout: 605 * 1000 }, async function (walletName, count) { - const walletClient = this.getWallet(walletName).getClient(); + const walletClient = await this.getWallet(walletName).connectClient(); await waitFor( async () => walletClient.areCoinbasesConfirmedAtLeast(count), true, @@ -2503,12 +2753,12 @@ Then( ); Then( - /wallets ([A-Za-z0-9,]+) account for all valid spendable coinbase transactions on the blockchain/, + /wallets ([A-Za-z0-9,]+) should have (.*) spendable coinbase outputs/, { timeout: 610 * 1000 }, - async function (wallets) { - const walletClients = wallets - .split(",") - .map((wallet) => this.getWallet(wallet).getClient()); + async function (wallets, amountOfCoinBases) { + const walletClients = await Promise.all( + wallets.split(",").map((wallet) => this.getWallet(wallet).connectClient()) + ); let coinbaseCount = 0; for (const client of walletClients) { coinbaseCount += await client.countAllCoinbaseTransactions(); @@ -2522,7 +2772,7 @@ Then( console.log(client.name, "count", count); spendableCoinbaseCount += count; } - return spendableCoinbaseCount === this.lastResult; + return spendableCoinbaseCount.toString() === amountOfCoinBases; }, true, 600 * 1000, @@ -2538,10 +2788,75 @@ Then( "with", spendableCoinbaseCount, "being valid and Mined_Confirmed, expected", - this.lastResult, + amountOfCoinBases, "\n" ); - expect(spendableCoinbaseCount).to.equal(this.lastResult); + expect(spendableCoinbaseCount.toString()).to.equal(amountOfCoinBases); + } +); + +Then( + /wallet (.*) has at least (.*) transactions that are all (.*) and valid/, + { timeout: 610 * 1000 }, + async function (walletName, numberOfTransactions, transactionStatus) { + const walletClient = await this.getWallet(walletName).connectClient(); + console.log( + walletName + + ": waiting for " + + numberOfTransactions + + " transactions to be " + + transactionStatus + + " and valid..." + ); + var transactions; + var numberCorrect; + var statusCorrect; + await waitFor( + async () => { + numberCorrect = true; + statusCorrect = true; + transactions = await walletClient.getAllNormalTransactions(); + if (transactions.length < parseInt(numberOfTransactions)) { + console.log( + "Has", + transactions.length, + "transactions, need", + numberOfTransactions + ); + numberCorrect = false; + return false; + } + for (let i = 0; i < transactions.length; i++) { + if ( + transactions[i]["status"] !== transactionStatus || + !transactions[i]["valid"] + ) { + console.log( + "Transaction " + + i + + 1 + + " has " + + transactions[i]["status"] + + " and is valid(" + + transactions[i]["valid"] + + ")" + ); + statusCorrect = false; + return false; + } + } + return true; + }, + true, + 600 * 1000, + 5 * 1000, + 5 + ); + + if (transactions === undefined) { + expect("\nNo transactions found!").to.equal(""); + } + expect(numberCorrect && statusCorrect).to.equal(true); } ); @@ -2549,9 +2864,9 @@ Then( /the number of coinbase transactions for wallet (.*) and wallet (.*) are (.*) less/, { timeout: 20 * 1000 }, async function (walletNameA, walletNameB, count) { - const walletClientA = this.getWallet(walletNameA).getClient(); + const walletClientA = await this.getWallet(walletNameA).connectClient(); const transactionsA = await walletClientA.getAllCoinbaseTransactions(); - const walletClientB = this.getWallet(walletNameB).getClient(); + const walletClientB = await this.getWallet(walletNameB).connectClient(); const transactionsB = await walletClientB.getAllCoinbaseTransactions(); if (this.resultStack.length >= 2) { const walletStats = [this.resultStack.pop(), this.resultStack.pop()]; @@ -2575,6 +2890,78 @@ Then( } ); +Then( + /all (.*) transactions for wallet (.*) and wallet (.*) have consistent but opposing validity/, + { timeout: 20 * 1000 }, + async function (transaction_type, walletNameA, walletNameB) { + let walletClientA = await this.getWallet(walletNameA).connectClient(); + let walletClientB = await this.getWallet(walletNameB).connectClient(); + var transactionsA; + var transactionsB; + var type; + if (transaction_type === "NORMAL") { + transactionsA = await walletClientA.getAllNormalTransactions(); + transactionsB = await walletClientB.getAllNormalTransactions(); + type = "NORMAL"; + } else { + transactionsA = await walletClientA.getAllCoinbaseTransactions(); + transactionsB = await walletClientB.getAllCoinbaseTransactions(); + type = "COINBASE"; + } + if (transactionsA === undefined || transactionsB === undefined) { + expect("\nNo `" + type + "` transactions found!").to.equal(""); + } + let validA = transactionsA[0]["valid"]; + for (let i = 0; i < transactionsA.length; i++) { + if (validA !== transactionsA[i]["valid"]) { + expect( + "\n" + + walletNameA + + "'s `" + + type + + "` transactions do not have a consistent validity status" + ).to.equal(""); + } + } + let validB = transactionsB[0]["valid"]; + for (let i = 0; i < transactionsB.length; i++) { + if (validB !== transactionsB[i]["valid"]) { + expect( + "\n" + + walletNameB + + "'s `" + + type + + "` transactions do not have a consistent validity status" + ).to.equal(""); + } + } + expect(validA).to.equal(!validB); + } +); + +Then( + /all (.*) transactions for wallet (.*) are valid/, + { timeout: 20 * 1000 }, + async function (transaction_type, walletName) { + let walletClient = await this.getWallet(walletName).connectClient(); + var transactions; + var type; + if (transaction_type === "NORMAL") { + transactions = await walletClient.getAllNormalTransactions(); + type = "NORMAL"; + } else { + transactions = await walletClient.getAllCoinbaseTransactions(); + type = "COINBASE"; + } + if (transactions === undefined) { + expect("\nNo `" + type + "` transactions found!").to.equal(""); + } + for (let i = 0; i < transactions.length; i++) { + expect(transactions[i]["valid"]).to.equal(true); + } + } +); + When(/I request the difficulties of a node (.*)/, async function (node) { const client = this.getClient(node); const difficulties = await client.getNetworkDifficulties(2, 0, 2); @@ -2599,7 +2986,7 @@ When( let splitsLeft = splitNum; const wallet = this.getWallet(walletName); - const walletClient = wallet.getClient(); + const walletClient = await wallet.connectClient(); const walletInfo = await walletClient.identify(); console.log( @@ -2615,13 +3002,28 @@ When( for (let i = 0; i < numberOfSplits; i++) { const splits = Math.min(499, splitsLeft); splitsLeft -= splits; - const result = await walletClient.coin_split({ - amount_per_split: splitValue, - split_count: splits, - fee_per_gram: feePerGram, - message: "Cucumber coinsplit", - lockheight: 0, - }); + let result; + await waitFor( + async () => { + try { + result = await walletClient.coin_split({ + amount_per_split: splitValue, + split_count: splits, + fee_per_gram: feePerGram, + message: "Cucumber coinsplit", + lockheight: 0, + }); + } catch (error) { + console.log(error); + return false; + } + return true; + }, + true, + 4700 * 1000, + 5 * 1000, + 5 + ); console.log( "Coin split", i + 1, @@ -2639,11 +3041,20 @@ When( When( /I send (.*) transactions of (.*) uT each from wallet (.*) to wallet (.*) at fee_per_gram (.*)/, { timeout: 43200 * 1000 }, - async function (numTransactions, amount, sourceWallet, dest, feePerGram) { + async function ( + numTransactions, + amount, + sourceWallet, + destWallet, + feePerGram + ) { console.log("\n"); - const sourceWalletClient = this.getWallet(sourceWallet).getClient(); + const sourceWalletClient = await this.getWallet( + sourceWallet + ).connectClient(); const sourceInfo = await sourceWalletClient.identify(); - const destInfo = await this.getWallet(dest).getClient().identify(); + const destWalletClient = await this.getWallet(destWallet).connectClient(); + const destInfo = await destWalletClient.identify(); console.log( "Sending", @@ -2651,23 +3062,21 @@ When( "transactions from", sourceWallet, "to", - dest + destWallet ); let batch = 1; for (let i = 0; i < numTransactions; i++) { - const message = - "Transaction from " + sourceWallet + " to " + dest + " " + i; - const result = await sourceWalletClient.transfer({ - recipients: [ - { - address: destInfo.public_key, - amount: amount, - fee_per_gram: feePerGram, - message: message, - }, - ], - }); + const result = await send_tari( + this.getWallet(sourceWallet), + destInfo.name, + destInfo.public_key, + amount, + feePerGram, + false, + "Transaction from " + sourceWallet + " to " + destWallet + " " + i, + false + ); expect(result.results[0].is_success).to.equal(true); this.addTransaction( sourceInfo.public_key, @@ -2690,7 +3099,7 @@ When( ); Given( - /I change the password of wallet (.*) to (.*)/, + /I change the password of wallet (.*) to (.*) via command line/, { timeout: 20 * 1000 }, async function (name, newPassword) { let wallet = this.getWallet(name); @@ -2717,8 +3126,8 @@ When( /I wait for (.*) to connect to (.*)/, { timeout: 30 * 1000 }, async function (firstNode, secondNode) { - const firstNodeClient = this.getNodeOrWalletClient(firstNode); - const secondNodeClient = this.getNodeOrWalletClient(secondNode); + const firstNodeClient = await this.getNodeOrWalletClient(firstNode); + const secondNodeClient = await this.getNodeOrWalletClient(secondNode); const secondNodeIdentity = await secondNodeClient.identify(); await waitForPredicate(async () => { @@ -2732,8 +3141,8 @@ Then( /(.*) is connected to (.*)/, { timeout: 30 * 1000 }, async function (firstNode, secondNode) { - const firstNodeClient = this.getNodeOrWalletClient(firstNode); - const secondNodeClient = this.getNodeOrWalletClient(secondNode); + const firstNodeClient = await this.getNodeOrWalletClient(firstNode); + const secondNodeClient = await this.getNodeOrWalletClient(secondNode); const secondNodeIdentity = await secondNodeClient.identify(); let peers = await firstNodeClient.listConnectedPeers(); assert(peers.some((p) => secondNodeIdentity.public_key === p.public_key)); @@ -2744,7 +3153,7 @@ When( /I wait for (.*) to have (.*) connectivity/, { timeout: 30 * 1000 }, async function (nodeName, expectedStatus) { - const node = this.getNodeOrWalletClient(nodeName); + const node = await this.getNodeOrWalletClient(nodeName); const expected = ConnectivityStatus[expectedStatus.toUpperCase()]; assert( expected !== undefined, @@ -2761,7 +3170,7 @@ When( /I wait for (.*) to have (\d+) node connections/, { timeout: 30 * 1000 }, async function (nodeName, numConnections) { - const node = this.getNodeOrWalletClient(nodeName); + const node = await this.getNodeOrWalletClient(nodeName); numConnections = +numConnections; await waitForPredicate(async () => { let info = await node.getNetworkStatus(); @@ -2776,11 +3185,527 @@ When( ); Given( - /change base node of (.*) to (.*)/, + "I change base node of {word} to {word} via command line", { timeout: 20 * 1000 }, async function (wallet_name, base_node_name) { let wallet = this.getWallet(wallet_name); let base_node = this.getNode(base_node_name); - await wallet.setBaseNode(base_node.peerAddress().replace("::", " ")); + let output = await wallet.runCommand( + `set-base-node ${base_node.peerAddress().replace("::", " ")}` + ); + let parse = output.buffer.match(/Setting base node peer\.\.\./); + expect(parse, "Parsing the output buffer failed").to.not.be.null; + } +); + +async function wallet_run_command( + wallet, + command, + message = "", + printMessage = true +) { + if (message === "") { + message = "Wallet CLI command:\n '" + command + "'"; + } + if (printMessage) { + console.log(message); + } + let output; + await waitFor( + async () => { + try { + output = await wallet.runCommand(command); + } catch (error) { + console.log(error); + return false; + } + return true; + }, + true, + 45 * 1000, + 5 * 1000, + 5 + ); + return output; +} + +Then( + "I get balance of wallet {word} is at least {int} uT via command line", + { timeout: 180 * 1000 }, + async function (name, amount) { + let wallet = this.getWallet(name); + let output = await wallet_run_command(wallet, "get-balance"); + let parse = output.buffer.match(/Available balance: (\d*.\d*) T/); + expect(parse, "Parsing the output buffer failed").to.not.be.null; + expect(parseFloat(parse[1])).to.be.greaterThanOrEqual(amount / 1000000); + } +); + +When( + "I send {int} uT from {word} to {word} via command line", + { timeout: 180 * 1000 }, + async function (amount, sender, receiver) { + let wallet = this.getWallet(sender); + let dest_pubkey = this.getWalletPubkey(receiver); + await wallet_run_command( + wallet, + `send-tari ${amount} ${dest_pubkey} test message` + ); + // await wallet.sendTari(dest_pubkey, amount, "test message"); + } +); + +When( + "I send one-sided {int} uT from {word} to {word} via command line", + { timeout: 180 * 1000 }, + async function (amount, sender, receiver) { + let wallet = this.getWallet(sender); + let dest_pubkey = this.getWalletPubkey(receiver); + await wallet_run_command( + wallet, + `send-one-sided ${amount} ${dest_pubkey} test message` + ); + // await wallet.sendOneSided(dest_pubkey, amount, "test message"); + } +); + +Then( + "I make it rain from wallet {word} {int} tx / sec {int} sec {int} uT {int} increment to {word} via command line", + { timeout: 300 * 1000 }, + async function (sender, freq, duration, amount, amount_inc, receiver) { + let wallet = this.getWallet(sender); + let dest_pubkey = this.getWalletPubkey(receiver); + await wallet_run_command( + wallet, + `make-it-rain ${freq} ${duration} ${amount} ${amount_inc} now ${dest_pubkey} negotiated test message` + ); + } +); + +Then( + "I get count of utxos of wallet {word} and it's at least {int} via command line", + { timeout: 180 * 1000 }, + async function (name, amount) { + let wallet = this.getWallet(name); + let output = await wallet_run_command(wallet, `count-utxos`); + let parse = output.buffer.match(/Total number of UTXOs: (\d+)/); + expect(parse, "Parsing the output buffer failed").to.not.be.null; + expect(parseInt(parse[1])).to.be.greaterThanOrEqual(amount); + } +); + +When( + "I do coin split on wallet {word} to {int} uT {int} coins via command line", + { timeout: 180 * 1000 }, + async function (name, amount_per_coin, number_of_coins) { + let wallet = this.getWallet(name); + await wallet_run_command( + wallet, + `coin-split ${amount_per_coin} ${number_of_coins}` + ); + } +); + +When( + "I discover peer {word} on wallet {word} via command line", + { timeout: 180 * 1000 }, + async function (node, name) { + let wallet = this.getWallet(name); + let peer = this.getNode(node).peerAddress().split("::")[0]; + let output = await wallet_run_command(wallet, `discover-peer ${peer}`); + let parse = output.buffer.match(/Discovery succeeded/); + expect(parse, "Parsing the output buffer failed").to.not.be.null; + } +); + +When( + "I run whois {word} on wallet {word} via command line", + { timeout: 60 * 1000 }, + async function (who, name) { + await sleep(5000); + let wallet = this.getWallet(name); + let pubkey = this.getNode(who).peerAddress().split("::")[0]; + let output = await wallet_run_command(wallet, `whois ${pubkey}`); + let parse = output.buffer.match(/Public Key: (.+)\n/); + expect(parse, "Parsing the output buffer failed").to.not.be.null; + expect(parse[1]).to.be.equal(pubkey); + } +); + +When( + "I set custom base node of {word} to {word} via command line", + { timeout: 60 * 1000 }, + async function (wallet_name, base_node_name) { + let wallet = this.getWallet(wallet_name); + let base_node = this.getNode(base_node_name); + let output = await wallet_run_command( + wallet, + `set-custom-base-node ${base_node.peerAddress().replace("::", " ")}` + ); + let parse = output.buffer.match( + /Custom base node peer saved in wallet database\./ + ); + expect(parse, "Parsing the output buffer failed").to.not.be.null; + } +); + +When( + "I clear custom base node of wallet {word} via command line", + { timeout: 60 * 1000 }, + async function (name) { + let wallet = this.getWallet(name); + let output = await wallet_run_command(wallet, "clear-custom-base-node"); + let parse = output.buffer.match( + /Custom base node peer cleared from wallet database./ + ); + expect(parse, "Parsing the output buffer failed").to.not.be.null; + } +); + +When( + "I export the utxos of wallet {word} via command line", + { timeout: 60 * 1000 }, + async function (name) { + let wallet = this.getWallet(name); + let output = await wallet_run_command(wallet, "export-utxos"); + let parse_cnt = output.buffer.match(/Total number of UTXOs: (\d+)/); + expect(parse_cnt, "Parsing the output buffer failed").to.not.be.null; + let utxo_cnt = parseInt(parse_cnt[1]); + for (let i = 1; i <= utxo_cnt; ++i) { + let regex = new RegExp(`${i}. Value: \\d*.\\d* T`); + expect(output.buffer.match(regex), "Parsing the output buffer failed").to + .not.be.null; + } + } +); + +When( + "I have a ffi wallet {word} connected to base node {word}", + { timeout: 20 * 1000 }, + async function (name, node) { + let wallet = await this.createAndAddFFIWallet(name); + let peer = this.nodes[node].peerAddress().split("::"); + await wallet.addBaseNodePeer(peer[0], peer[1]); + } +); + +Then( + "I want to get public key of ffi wallet {word}", + { timeout: 20 * 1000 }, + async function (name) { + let wallet = this.getWallet(name); + let public_key = await wallet.getPublicKey(); + expect(public_key.length).to.be.equal( + 64, + `Public key has wrong length : ${public_key}` + ); + } +); + +Then( + /I wait until base node (.*) has (.*) unconfirmed transactions in its mempool/, + { timeout: 180 * 1000 }, + async function (baseNode, numTransactions) { + const client = this.getClient(baseNode); + await waitFor( + async () => { + let stats = await client.getMempoolStats(); + return stats.unconfirmed_txs; + }, + numTransactions, + 120 * 1000 + ); + + let stats = await client.getMempoolStats(); + console.log( + "Base node", + baseNode, + "has ", + stats.unconfirmed_txs, + " unconfirmed transaction in its mempool" + ); + expect(stats.unconfirmed_txs).to.equal(numTransactions); + } +); + +Then( + "I want to get emoji id of ffi wallet {word}", + { timeout: 20 * 1000 }, + async function (name) { + let wallet = this.getWallet(name); + let emoji_id = await wallet.getEmojiId(); + expect(emoji_id.length).to.be.equal( + 22 * 3, // 22 emojis, 3 bytes per one emoji + `Emoji id has wrong length : ${emoji_id}` + ); + } +); + +Then( + "I wait for ffi wallet {word} to have at least {int} uT", + { timeout: 60 * 1000 }, + async function (name, amount) { + let wallet = this.getWallet(name); + let retries = 1; + let balance = 0; + const retries_limit = 12; + while (retries <= retries_limit) { + balance = await wallet.getBalance(); + if (balance >= amount) { + break; + } + await sleep(5000); + ++retries; + } + expect(balance, "Balance is not enough").to.be.greaterThanOrEqual(amount); + } +); + +When( + "I send {int} uT from ffi wallet {word} to wallet {word} at fee {int}", + { timeout: 20 * 1000 }, + async function (amount, sender, receiver, fee) { + await this.getWallet(sender).sendTransaction( + await this.getWalletPubkey(receiver), + amount, + fee, + `Send from ffi ${sender} to ${receiver} at fee ${fee}` + ); + } +); + +When( + "I set passphrase {word} of ffi wallet {word}", + { timeout: 20 * 1000 }, + async function (passphrase, name) { + let wallet = this.getWallet(name); + await wallet.applyEncryption(passphrase); + } +); + +Then( + "I have {int} received and {int} send transaction in ffi wallet {word}", + { timeout: 120 * 1000 }, + async function (received, send, name) { + let wallet = this.getWallet(name); + let [outbound, inbound] = await wallet.getCompletedTransactions(); + let retries = 1; + const retries_limit = 23; + while ( + (inbound != received || outbound != send) && + retries <= retries_limit + ) { + await sleep(5000); + [outbound, inbound] = await wallet.getCompletedTransactions(); + ++retries; + } + expect(outbound, "Outbound transaction count mismatch").to.be.equal(send); + expect(inbound, "Inbound transaction count mismatch").to.be.equal(received); + } +); + +Then( + "ffi wallet {word} has {int} broadcast transaction", + { timeout: 120 * 1000 }, + async function (name, count) { + let wallet = this.getWallet(name); + let broadcast = await wallet.getBroadcastTransactionsCount(); + let retries = 1; + const retries_limit = 24; + while (broadcast != count && retries <= retries_limit) { + await sleep(5000); + broadcast = await wallet.getBroadcastTransactionsCount(); + ++retries; + } + expect(broadcast, "Number of broadcasted messages mismatch").to.be.equal( + count + ); + } +); + +When( + "I add contact with alias {word} and pubkey {word} to ffi wallet {word}", + { timeout: 20 * 1000 }, + async function (alias, wallet_name, ffi_wallet_name) { + let ffi_wallet = this.getWallet(ffi_wallet_name); + await ffi_wallet.addContact(alias, await this.getWalletPubkey(wallet_name)); + } +); + +Then( + "I have contact with alias {word} and pubkey {word} in ffi wallet {word}", + { timeout: 20 * 1000 }, + async function (alias, wallet_name, ffi_wallet_name) { + let ffi_wallet = this.getWallet(ffi_wallet_name); + expect(await this.getWalletPubkey(wallet_name)).to.be.equal( + await ffi_wallet.getContact(alias) + ); + } +); + +When( + "I remove contact with alias {word} from ffi wallet {word}", + { timeout: 20 * 1000 }, + async function (alias, walllet_name) { + let wallet = this.getWallet(walllet_name); + await wallet.removeContact(alias); + } +); + +Then( + "I don't have contact with alias {word} in ffi wallet {word}", + { timeout: 20 * 1000 }, + async function (alias, wallet_name) { + let wallet = this.getWallet(wallet_name); + expect(await wallet.getContact("alias")).to.be.undefined; + } +); + +Then( + /node (.*) lists heights (\d+) to (\d+)/, + async function (node, first, last) { + const client = this.getClient(node); + const start = first; + const end = last; + let heights = []; + + for (let i = start; i <= end; i++) { + heights.push(i); + } + const blocks = await client.getBlocks(heights); + const results = blocks.map((result) => + parseInt(result.block.header.height) + ); + let i = 0; // for ordering check + for (let height = start; height <= end; height++) { + expect(results[i]).equal(height); + i++; + } + } +); + +When( + "I set base node {word} for ffi wallet {word}", + async function (node, wallet_name) { + let wallet = this.getWallet(wallet_name); + let peer = this.nodes[node].peerAddress().split("::"); + await wallet.addBaseNodePeer(peer[0], peer[1]); + } +); + +Then( + "I wait for ffi wallet {word} to have {int} pending outbound transaction(s)", + { timeout: 120 * 1000 }, + async function (wallet_name, count) { + let wallet = this.getWallet(wallet_name); + let broadcast = await wallet.getOutboundTransactionsCount(); + let retries = 1; + const retries_limit = 24; + while (broadcast != count && retries <= retries_limit) { + await sleep(5000); + broadcast = await wallet.getOutboundTransactionsCount(); + ++retries; + } + expect(broadcast, "Number of pending messages mismatch").to.be.equal(count); + } +); + +Then( + "I cancel all transactions on ffi wallet {word} and it will cancel {int} transaction", + async function (wallet_name, count) { + const wallet = this.getWallet(wallet_name); + expect( + await wallet.cancelAllOutboundTransactions(), + "Number of cancelled transactions" + ).to.be.equal(count); + } +); + +Then( + "I recover wallet {word} into ffi wallet {word} from seed words on node {word}", + { timeout: 20 * 1000 }, + async function (wallet_name, ffi_wallet_name, node) { + let wallet = this.getWallet(wallet_name); + const seed_words_text = wallet.getSeedWords(); + await wallet.stop(); + await sleep(1000); + let ffi_wallet = await this.createAndAddFFIWallet( + ffi_wallet_name, + seed_words_text + ); + let peer = this.nodes[node].peerAddress().split("::"); + await ffi_wallet.addBaseNodePeer(peer[0], peer[1]); + await ffi_wallet.startRecovery(peer[0]); + } +); + +Then( + "I wait for recovery of wallet {word} to finish", + { timeout: 600 * 1000 }, + async function (wallet_name) { + const wallet = this.getWallet(wallet_name); + while (wallet.recoveryInProgress) { + await sleep(1000); + } + expect(wallet.recoveryProgress[1]).to.be.greaterThan(0); + expect(wallet.recoveryProgress[0]).to.be.equal(wallet.recoveryProgress[1]); + } +); + +Then("I start STXO validation on wallet {word}", async function (wallet_name) { + const wallet = this.getWallet(wallet_name); + await wallet.startStxoValidation(); + while (!wallet.stxo_validation_complete) { + await sleep(1000); + } + expect(wallet.stxo_validation_result).to.be.equal(0); +}); + +Then("I start UTXO validation on wallet {word}", async function (wallet_name) { + const wallet = this.getWallet(wallet_name); + await wallet.startUtxoValidation(); + while (!wallet.utxo_validation_complete) { + await sleep(1000); + } + expect(wallet.utxo_validation_result).to.be.equal(0); +}); + +Then( + "Check callbacks for finished inbound tx on ffi wallet {word}", + async function (wallet_name) { + const wallet = this.getWallet(wallet_name); + expect(wallet.receivedTransaction).to.be.greaterThanOrEqual(1); + expect(wallet.transactionBroadcast).to.be.greaterThanOrEqual(1); + wallet.clearCallbackCounters(); + } +); + +Then( + "Check callbacks for finished outbound tx on ffi wallet {word}", + async function (wallet_name) { + const wallet = this.getWallet(wallet_name); + expect(wallet.receivedTransactionReply).to.be.greaterThanOrEqual(1); + expect(wallet.transactionBroadcast).to.be.greaterThanOrEqual(1); + wallet.clearCallbackCounters(); + } +); + +When( + "I have {int} base nodes with pruning horizon {int} force syncing on node {word}", + { timeout: 190 * 1000 }, + async function (nodes_count, horizon, force_sync_to) { + const promises = []; + const force_sync_address = this.getNode(force_sync_to).peerAddress(); + for (let i = 0; i < nodes_count; i++) { + const base_node = this.createNode(`BaseNode${i}`, { + pruningHorizon: horizon, + }); + base_node.setPeerSeeds([force_sync_address]); + base_node.setForceSyncPeers([force_sync_address]); + promises.push( + base_node.startNew().then(() => this.addNode(`BaseNode${i}`, base_node)) + ); + } + await Promise.all(promises); } ); diff --git a/integration_tests/features/support/world.js b/integration_tests/features/support/world.js index a186ee5d4b..6ca3d0f699 100644 --- a/integration_tests/features/support/world.js +++ b/integration_tests/features/support/world.js @@ -3,6 +3,7 @@ const { setWorldConstructor, After, BeforeAll } = require("cucumber"); const BaseNodeProcess = require("../../helpers/baseNodeProcess"); const MergeMiningProxyProcess = require("../../helpers/mergeMiningProxyProcess"); const WalletProcess = require("../../helpers/walletProcess"); +const WalletFFIClient = require("../../helpers/walletFFIClient"); const MiningNodeProcess = require("../../helpers/miningNodeProcess"); const glob = require("glob"); const fs = require("fs"); @@ -17,6 +18,7 @@ class CustomWorld { this.proxies = {}; this.miners = {}; this.wallets = {}; + this.walletsFFI = {}; this.walletPubkeys = {}; this.clients = {}; this.headers = {}; @@ -48,7 +50,7 @@ class CustomWorld { ); await proc.startNew(); this.seeds[name] = proc; - this.clients[name] = proc.createGrpcClient(); + this.clients[name] = await proc.createGrpcClient(); } seedAddresses() { @@ -72,12 +74,12 @@ class CustomWorld { node.setPeerSeeds([addresses]); } await node.startNew(); - this.addNode(name, node); + await this.addNode(name, node); } - addNode(name, process) { + async addNode(name, process) { this.nodes[name] = process; - this.clients[name] = process.createGrpcClient(); + this.clients[name] = await process.createGrpcClient(); } addMiningNode(name, process) { @@ -88,21 +90,38 @@ class CustomWorld { this.proxies[name] = process; } - async createAndAddWallet(name, nodeAddresses) { - const wallet = new WalletProcess(name, false, {}, this.logFilePathWallet); + async createAndAddWallet(name, nodeAddresses, options = {}) { + const wallet = new WalletProcess( + name, + false, + options, + this.logFilePathWallet + ); wallet.setPeerSeeds([nodeAddresses]); await wallet.startNew(); this.addWallet(name, wallet); - let walletClient = wallet.getClient(); + let walletClient = await wallet.connectClient(); let walletInfo = await walletClient.identify(); this.walletPubkeys[name] = walletInfo.public_key; } + async createAndAddFFIWallet(name, seed_words) { + const wallet = new WalletFFIClient(name); + await wallet.startNew(seed_words); + this.walletsFFI[name] = wallet; + this.walletPubkeys[name] = await wallet.getPublicKey(); + return wallet; + } + addWallet(name, process) { this.wallets[name] = process; } + addWalletPubkey(name, pubkey) { + this.walletPubkeys[name] = pubkey; + } + addOutput(name, output) { this.outputs[name] = output; } @@ -115,10 +134,15 @@ class CustomWorld { ); } - baseNodeMineBlocksUntilHeightIncreasedBy(baseNode, wallet, numBlocks) { + async baseNodeMineBlocksUntilHeightIncreasedBy(baseNode, wallet, numBlocks) { + let w = null; + if (wallet) { + let tmp = this.getWallet(wallet); + w = await tmp.connectClient(); + } const promise = this.getClient(baseNode).mineBlocksUntilHeightIncreasedBy( numBlocks, - wallet ? this.getWallet(wallet).getClient() : null + w ); return promise; } @@ -157,48 +181,69 @@ class CustomWorld { } getClient(name) { - return this.clients[name]; + const client = this.clients[name]; + if (!client) { + throw new Error(`Node client not found with name '${name}'`); + } + return client; } getNode(name) { - return this.nodes[name] || this.seeds[name]; + const node = this.nodes[name] || this.seeds[name]; + if (!node) { + throw new Error(`Node not found with name '${name}'`); + } + return node; } getMiningNode(name) { - return this.miners[name]; + const miner = this.miners[name]; + if (!miner) { + throw new Error(`Miner not found with name '${name}'`); + } + return miner; } getWallet(name) { - return this.wallets[name]; + const wallet = this.wallets[name] || this.walletsFFI[name]; + if (!wallet) { + throw new Error(`Wallet not found with name '${name}'`); + } + return wallet; } getWalletPubkey(name) { return this.walletPubkeys[name]; } - getNodeOrWalletClient(name) { - let client = this.getClient(name.trim()); + async getNodeOrWalletClient(name) { + let client = this.clients[name.trim()]; if (client) { client.isNode = true; client.isWallet = false; return client; } - let wallet = this.getWallet(name.trim()); + let wallet = this.wallets[name.trim()]; if (wallet) { - let client = wallet.getClient(); + let client = await wallet.connectClient(); client.isNode = false; client.isWallet = true; return client; } + let ffi_wallet = this.walletsFFI[name.trim()]; + if (ffi_wallet) { + return ffi_wallet; + } + return null; } async getOrCreateWallet(name) { - const wallet = this.getWallet(name); + const wallet = this.wallets[name]; if (wallet) { return wallet; } await this.createAndAddWallet(name, this.seedAddresses()); - return this.getWallet(name); + return this.wallets[name]; } getProxy(name) { @@ -225,17 +270,18 @@ class CustomWorld { return new Promise((resolve, reject) => { for (let promise of promises) { - Promise.resolve(promise).then( - () => { + Promise.resolve(promise) + .then(() => { succeeded += 1; console.log(`${succeeded} of ${total} (need ${total - canFail})`); if (succeeded >= total - canFail) resolve(); - }, - () => { + }) + .catch((err) => { + console.error(err); failed += 1; - if (failed > canFail) reject("Too many failed."); - } - ); + if (failed > canFail) + reject(`Too many failed. Expected at most ${canFail} failures`); + }); } }); } @@ -245,9 +291,9 @@ class CustomWorld { await node.stop(); } - async startNode(name) { + async startNode(name, args) { const node = this.seeds[name] || this.nodes[name]; - await node.start(); + await node.start(args); } addTransaction(pubKey, txId) { @@ -292,6 +338,8 @@ BeforeAll({ timeout: 1200000 }, async function () { await miningNode.init(1, 1, 1, 1, true, 1); await miningNode.compile(); + console.log("Compiling wallet FFI..."); + await WalletFFIClient.Init(); console.log("Finished compilation."); }); @@ -301,6 +349,7 @@ After(async function (testCase) { await stopAndHandleLogs(this.nodes, testCase, this); await stopAndHandleLogs(this.proxies, testCase, this); await stopAndHandleLogs(this.wallets, testCase, this); + await stopAndHandleLogs(this.walletsFFI, testCase, this); await stopAndHandleLogs(this.miners, testCase, this); }); diff --git a/integration_tests/generate_report.js b/integration_tests/generate_report.js index b3adbbf89a..30c43143ea 100644 --- a/integration_tests/generate_report.js +++ b/integration_tests/generate_report.js @@ -1,9 +1,10 @@ const reporter = require("cucumber-html-reporter"); +const args = process.argv.slice(2); const options = { theme: "bootstrap", - jsonFile: "cucumber_output/tests.cucumber", - output: "temp/reports/cucumber_report.html", + jsonFile: args[0] || "cucumber_output/tests.cucumber", + output: args[1] || "temp/reports/cucumber_report.html", reportSuiteAsScenarios: true, scenarioTimestamp: true, launchReport: true, diff --git a/integration_tests/helpers/baseNodeClient.js b/integration_tests/helpers/baseNodeClient.js index c2682bc105..5ca483f65a 100644 --- a/integration_tests/helpers/baseNodeClient.js +++ b/integration_tests/helpers/baseNodeClient.js @@ -1,24 +1,20 @@ const expect = require("chai").expect; -const grpc = require("grpc"); +const grpc = require("@grpc/grpc-js"); const protoLoader = require("@grpc/proto-loader"); -const grpc_promise = require("grpc-promise"); const TransactionBuilder = require("./transactionBuilder"); const { SHA3 } = require("sha3"); -const { toLittleEndian, byteArrayToHex } = require("./util"); +const { toLittleEndian, byteArrayToHex, tryConnect } = require("./util"); const { PowAlgo } = require("./types"); const cloneDeep = require("clone-deep"); +const grpcPromise = require("grpc-promise"); class BaseNodeClient { - constructor(clientOrPort) { - if (typeof clientOrPort === "number") { - this.client = this.createGrpcClient(clientOrPort); - } else { - this.client = clientOrPort; - } + constructor() { + this.client = null; this.blockTemplates = {}; } - createGrpcClient(port) { + async connect(port) { const PROTO_PATH = __dirname + "/../../applications/tari_app_grpc/proto/base_node.proto"; const packageDefinition = protoLoader.loadSync(PROTO_PATH, { @@ -30,12 +26,17 @@ class BaseNodeClient { }); const protoDescriptor = grpc.loadPackageDefinition(packageDefinition); const tari = protoDescriptor.tari.rpc; - const client = new tari.BaseNode( - "127.0.0.1:" + port, - grpc.credentials.createInsecure() + this.client = await tryConnect( + () => + new tari.BaseNode( + "127.0.0.1:" + port, + grpc.credentials.createInsecure() + ) ); - grpc_promise.promisifyAll(client); - return client; + + grpcPromise.promisifyAll(this.client, { + metadata: new grpc.Metadata(), + }); } getHeaderAt(height) { @@ -76,6 +77,12 @@ class BaseNodeClient { }); } + async getHeaders(from_height, num_headers, sorting = 0) { + return await this.client + .listHeaders() + .sendMessage({ from_height, num_headers, sorting }); + } + getTipHeight() { return this.client .getTipInfo() @@ -426,6 +433,21 @@ class BaseNodeClient { num_node_connections: +resp.num_node_connections, }; } + + static async create(port) { + const client = new BaseNodeClient(); + await client.connect(port); + return client; + } + + async getMempoolStats() { + const mempoolStats = await this.client.getMempoolStats().sendMessage({}); + return mempoolStats; + } + + async getBlocks(heights) { + return await this.client.getBlocks().sendMessage({ heights }); + } } module.exports = BaseNodeClient; diff --git a/integration_tests/helpers/baseNodeProcess.js b/integration_tests/helpers/baseNodeProcess.js index 4551e4c030..179225f48b 100644 --- a/integration_tests/helpers/baseNodeProcess.js +++ b/integration_tests/helpers/baseNodeProcess.js @@ -13,18 +13,23 @@ class BaseNodeProcess { this.name = name; this.logFilePath = logFilePath ? path.resolve(logFilePath) : logFilePath; this.nodeFile = nodeFile; - this.options = options; + this.options = Object.assign( + { + baseDir: "./temp/base_nodes", + }, + options || {} + ); this.excludeTestEnvars = excludeTestEnvars; } async init() { - this.port = await getFreePort(19000, 25000); - this.grpcPort = await getFreePort(19000, 25000); + this.port = await getFreePort(); + this.grpcPort = await getFreePort(); this.name = `Basenode${this.port}-${this.name}`; this.nodeFile = this.nodeFile || "nodeid.json"; do { - this.baseDir = `./temp/base_nodes/${dateFormat( + this.baseDir = `${this.options.baseDir}/${dateFormat( new Date(), "yyyymmddHHMM" )}/${this.name}`; @@ -85,6 +90,10 @@ class BaseNodeProcess { this.peerSeeds = addresses.join(","); } + setForceSyncPeers(addresses) { + this.forceSyncPeers = addresses.join(","); + } + getGrpcAddress() { const address = "127.0.0.1:" + this.grpcPort; // console.log("Base Node GRPC Address:",address); @@ -111,8 +120,11 @@ class BaseNodeProcess { this.grpcPort, this.port, "127.0.0.1:8080", + "127.0.0.1:8085", this.options, - this.peerSeeds + this.peerSeeds, + "DirectAndStoreAndForward", + this.forceSyncPeers ); } @@ -174,14 +186,15 @@ class BaseNodeProcess { async startAndConnect() { await this.startNew(); - return this.createGrpcClient(); + return await this.createGrpcClient(); } - async start() { + async start(opts = []) { const args = ["--base-path", "."]; if (this.logFilePath) { args.push("--log-config", this.logFilePath); } + args.push(...opts); return await this.run(await this.compile(), args); } @@ -200,8 +213,8 @@ class BaseNodeProcess { }); } - createGrpcClient() { - return new BaseNodeClient(this.grpcPort); + async createGrpcClient() { + return await BaseNodeClient.create(this.grpcPort); } } diff --git a/integration_tests/helpers/config.js b/integration_tests/helpers/config.js index 3743561a22..ef06a75449 100644 --- a/integration_tests/helpers/config.js +++ b/integration_tests/helpers/config.js @@ -57,7 +57,7 @@ function mapEnvs(options) { return res; } -function baseEnvs(peerSeeds = []) { +function baseEnvs(peerSeeds = [], forceSyncPeers = []) { const envs = { RUST_BACKTRACE: 1, TARI_BASE_NODE__NETWORK: "localnet", @@ -101,6 +101,9 @@ function baseEnvs(peerSeeds = []) { TARI_MINING_NODE__VALIDATE_TIP_TIMEOUT_SEC: 2, TARI_WALLET__SCAN_FOR_UTXO_INTERVAL: 5, }; + if (forceSyncPeers.length != 0) { + envs.TARI_BASE_NODE__LOCALNET__FORCE_SYNC_PEERS = forceSyncPeers; + } if (peerSeeds.length != 0) { envs.TARI_BASE_NODE__LOCALNET__PEER_SEEDS = peerSeeds; } else { @@ -124,11 +127,13 @@ function createEnv( baseNodeGrpcPort = "8080", baseNodePort = "8081", proxyFullAddress = "127.0.0.1:8084", + transcoderFullAddress = "127.0.0.1:8085", options, peerSeeds = [], - _txnSendingMechanism = "DirectAndStoreAndForward" + _txnSendingMechanism = "DirectAndStoreAndForward", + forceSyncPeers = [] ) { - const envs = baseEnvs(peerSeeds); + const envs = baseEnvs(peerSeeds, forceSyncPeers); const network = options && options.network ? options.network.toUpperCase() : "LOCALNET"; @@ -149,6 +154,9 @@ function createEnv( configEnvs[ `TARI_MERGE_MINING_PROXY__${network}__PROXY_HOST_ADDRESS` ] = `${proxyFullAddress}`; + configEnvs[ + `TARI_STRATUM_TRANSCODER__${network}__TRANSCODER_HOST_ADDRESS` + ] = `${transcoderFullAddress}`; configEnvs[`TARI_BASE_NODE__${network}__TRANSPORT`] = "tcp"; configEnvs[`TARI_WALLET__${network}__TRANSPORT`] = "tcp"; configEnvs[`TARI_WALLET__${network}__TCP_LISTENER_ADDRESS`] = diff --git a/integration_tests/helpers/ffi/byteVector.js b/integration_tests/helpers/ffi/byteVector.js new file mode 100644 index 0000000000..51f5d338bd --- /dev/null +++ b/integration_tests/helpers/ffi/byteVector.js @@ -0,0 +1,29 @@ +const WalletFFI = require("./walletFFI"); + +class ByteVector { + #byte_vector_ptr; + + constructor(byte_vector_ptr) { + this.#byte_vector_ptr = byte_vector_ptr; + } + + static async fromBuffer(buffer) { + let buf = Buffer.from(buffer, "utf-8"); // get the bytes + let len = buf.length; // get the length + return new ByteVector(await WalletFFI.byteVectorCreate(buf, len)); + } + + getLength() { + return WalletFFI.byteVectorGetLength(this.#byte_vector_ptr); + } + + getAt(position) { + return WalletFFI.byteVectorGetAt(this.#byte_vector_ptr, position); + } + + destroy() { + return WalletFFI.byteVectorDestroy(this.#byte_vector_ptr); + } +} + +module.exports = ByteVector; diff --git a/integration_tests/helpers/ffi/completedTransaction.js b/integration_tests/helpers/ffi/completedTransaction.js new file mode 100644 index 0000000000..a7a21c28cd --- /dev/null +++ b/integration_tests/helpers/ffi/completedTransaction.js @@ -0,0 +1,23 @@ +const WalletFFI = require("./walletFFI"); + +class CompletedTransaction { + #tari_completed_transaction_ptr; + + constructor(tari_completed_transaction_ptr) { + this.#tari_completed_transaction_ptr = tari_completed_transaction_ptr; + } + + isOutbound() { + return WalletFFI.completedTransactionIsOutbound( + this.#tari_completed_transaction_ptr + ); + } + + destroy() { + return WalletFFI.completedTransactionDestroy( + this.#tari_completed_transaction_ptr + ); + } +} + +module.exports = CompletedTransaction; diff --git a/integration_tests/helpers/ffi/completedTransactions.js b/integration_tests/helpers/ffi/completedTransactions.js new file mode 100644 index 0000000000..d2d4c96156 --- /dev/null +++ b/integration_tests/helpers/ffi/completedTransactions.js @@ -0,0 +1,39 @@ +const CompletedTransaction = require("./completedTransaction"); +const WalletFFI = require("./walletFFI"); + +class CompletedTransactions { + #tari_completed_transactions_ptr; + + constructor(tari_completed_transactions_ptr) { + this.#tari_completed_transactions_ptr = tari_completed_transactions_ptr; + } + + static async fromWallet(wallet) { + return new CompletedTransactions( + await WalletFFI.walletGetCompletedTransactions(wallet) + ); + } + + getLength() { + return WalletFFI.completedTransactionsGetLength( + this.#tari_completed_transactions_ptr + ); + } + + async getAt(position) { + return new CompletedTransaction( + await WalletFFI.completedTransactionsGetAt( + this.#tari_completed_transactions_ptr, + position + ) + ); + } + + destroy() { + return WalletFFI.completedTransactionsDestroy( + this.#tari_completed_transactions_ptr + ); + } +} + +module.exports = CompletedTransactions; diff --git a/integration_tests/helpers/ffi/contact.js b/integration_tests/helpers/ffi/contact.js new file mode 100644 index 0000000000..184c684a2b --- /dev/null +++ b/integration_tests/helpers/ffi/contact.js @@ -0,0 +1,33 @@ +const PublicKey = require("./publicKey"); +const WalletFFI = require("./walletFFI"); + +class Contact { + #tari_contact_ptr; + + constructor(tari_contact_ptr) { + this.#tari_contact_ptr = tari_contact_ptr; + } + + getPtr() { + return this.#tari_contact_ptr; + } + + async getAlias() { + const alias = await WalletFFI.contactGetAlias(this.#tari_contact_ptr); + const result = alias.readCString(); + await WalletFFI.stringDestroy(alias); + return result; + } + + async getPubkey() { + return new PublicKey( + await WalletFFI.contactGetPublicKey(this.#tari_contact_ptr) + ); + } + + destroy() { + return WalletFFI.contactDestroy(this.#tari_contact_ptr); + } +} + +module.exports = Contact; diff --git a/integration_tests/helpers/ffi/contacts.js b/integration_tests/helpers/ffi/contacts.js new file mode 100644 index 0000000000..d8803874ab --- /dev/null +++ b/integration_tests/helpers/ffi/contacts.js @@ -0,0 +1,30 @@ +const Contact = require("./contact"); +const WalletFFI = require("./walletFFI"); + +class Contacts { + #tari_contacts_ptr; + + constructor(tari_contacts_ptr) { + this.#tari_contacts_ptr = tari_contacts_ptr; + } + + static async fromWallet(wallet) { + return new Contacts(await WalletFFI.walletGetContacts(wallet)); + } + + getLength() { + return WalletFFI.contactsGetLength(this.#tari_contacts_ptr); + } + + async getAt(position) { + return new Contact( + await WalletFFI.contactsGetAt(this.#tari_contacts_ptr, position) + ); + } + + destroy() { + return WalletFFI.contactsDestroy(this.#tari_contacts_ptr); + } +} + +module.exports = Contacts; diff --git a/integration_tests/helpers/ffi/pendingInboundTransaction.js b/integration_tests/helpers/ffi/pendingInboundTransaction.js new file mode 100644 index 0000000000..32cae202fb --- /dev/null +++ b/integration_tests/helpers/ffi/pendingInboundTransaction.js @@ -0,0 +1,24 @@ +const WalletFFI = require("./walletFFI"); + +class PendingInboundTransaction { + #tari_pending_inbound_transaction_ptr; + + constructor(tari_pending_inbound_transaction_ptr) { + this.#tari_pending_inbound_transaction_ptr = + tari_pending_inbound_transaction_ptr; + } + + getStatus() { + return WalletFFI.pendingInboundTransactionGetStatus( + this.#tari_pending_inbound_transaction_ptr + ); + } + + destroy() { + return WalletFFI.pendingInboundTransactionDestroy( + this.#tari_pending_inbound_transaction_ptr + ); + } +} + +module.exports = PendingInboundTransaction; diff --git a/integration_tests/helpers/ffi/pendingInboundTransactions.js b/integration_tests/helpers/ffi/pendingInboundTransactions.js new file mode 100644 index 0000000000..6246b03429 --- /dev/null +++ b/integration_tests/helpers/ffi/pendingInboundTransactions.js @@ -0,0 +1,40 @@ +const PendingInboundTransaction = require("./pendingInboundTransaction"); +const WalletFFI = require("./walletFFI"); + +class PendingInboundTransactions { + #tari_pending_inbound_transactions_ptr; + + constructor(tari_pending_inbound_transactions_ptr) { + this.#tari_pending_inbound_transactions_ptr = + tari_pending_inbound_transactions_ptr; + } + + static async fromWallet(wallet) { + return new PendingInboundTransactions( + await WalletFFI.walletGetPendingInboundTransactions(wallet) + ); + } + + getLength() { + return WalletFFI.pendingInboundTransactionsGetLength( + this.#tari_pending_inbound_transactions_ptr + ); + } + + async getAt(position) { + return new PendingInboundTransaction( + await WalletFFI.pendingInboundTransactionsGetAt( + this.#tari_pending_inbound_transactions_ptr, + position + ) + ); + } + + destroy() { + return WalletFFI.pendingInboundTransactionsDestroy( + this.#tari_pending_inbound_transactions_ptr + ); + } +} + +module.exports = PendingInboundTransactions; diff --git a/integration_tests/helpers/ffi/pendingOutboundTransaction.js b/integration_tests/helpers/ffi/pendingOutboundTransaction.js new file mode 100644 index 0000000000..eed2d722bb --- /dev/null +++ b/integration_tests/helpers/ffi/pendingOutboundTransaction.js @@ -0,0 +1,30 @@ +const WalletFFI = require("./walletFFI"); + +class PendingOutboundTransaction { + #tari_pending_outbound_transaction_ptr; + + constructor(tari_pending_outbound_transaction_ptr) { + this.#tari_pending_outbound_transaction_ptr = + tari_pending_outbound_transaction_ptr; + } + + getTransactionId() { + return WalletFFI.pendingOutboundTransactionGetTransactionId( + this.#tari_pending_outbound_transaction_ptr + ); + } + + getStatus() { + return WalletFFI.pendingOutboundTransactionGetStatus( + this.#tari_pending_outbound_transaction_ptr + ); + } + + destroy() { + return WalletFFI.pendingOutboundTransactionDestroy( + this.#tari_pending_outbound_transaction_ptr + ); + } +} + +module.exports = PendingOutboundTransaction; diff --git a/integration_tests/helpers/ffi/pendingOutboundTransactions.js b/integration_tests/helpers/ffi/pendingOutboundTransactions.js new file mode 100644 index 0000000000..28e408563d --- /dev/null +++ b/integration_tests/helpers/ffi/pendingOutboundTransactions.js @@ -0,0 +1,40 @@ +const PendingOutboundTransaction = require("./pendingOutboundTransaction"); +const WalletFFI = require("./walletFFI"); + +class PendingOutboundTransactions { + #tari_pending_outbound_transactions_ptr; + + constructor(tari_pending_outbound_transactions_ptr) { + this.#tari_pending_outbound_transactions_ptr = + tari_pending_outbound_transactions_ptr; + } + + static async fromWallet(wallet) { + return new PendingOutboundTransactions( + await WalletFFI.walletGetPendingOutboundTransactions(wallet) + ); + } + + getLength() { + return WalletFFI.pendingOutboundTransactionsGetLength( + this.#tari_pending_outbound_transactions_ptr + ); + } + + async getAt(position) { + return new PendingOutboundTransaction( + await WalletFFI.pendingOutboundTransactionsGetAt( + this.#tari_pending_outbound_transactions_ptr, + position + ) + ); + } + + destroy() { + return WalletFFI.pendingOutboundTransactionsDestroy( + this.#tari_pending_outbound_transactions_ptr + ); + } +} + +module.exports = PendingOutboundTransactions; diff --git a/integration_tests/helpers/ffi/publicKey.js b/integration_tests/helpers/ffi/publicKey.js new file mode 100644 index 0000000000..1165aa193d --- /dev/null +++ b/integration_tests/helpers/ffi/publicKey.js @@ -0,0 +1,65 @@ +const WalletFFI = require("./walletFFI"); +const ByteVector = require("./byteVector"); +const utf8 = require("utf8"); + +class PublicKey { + #tari_public_key_ptr; + + constructor(public_key) { + this.#tari_public_key_ptr = public_key; + } + + static fromPubkey(public_key) { + return new PublicKey(public_key); + } + + static async fromWallet(wallet) { + return new PublicKey(await WalletFFI.walletGetPublicKey(wallet)); + } + + static async fromString(public_key_hex) { + let sanitize = utf8.encode(public_key_hex); // Make sure it's not UTF-16 encoded (JS default) + return new PublicKey(await WalletFFI.publicKeyFromHex(sanitize)); + } + + static async fromBytes(bytes) { + return new PublicKey(await WalletFFI.publicKeyCreate(bytes)); + } + + getPtr() { + return this.#tari_public_key_ptr; + } + + async getBytes() { + return new ByteVector( + await WalletFFI.publicKeyGetBytes(this.#tari_public_key_ptr) + ); + } + + async getHex() { + const bytes = await this.getBytes(); + const length = await bytes.getLength(); + let byte_array = new Uint8Array(length); + for (let i = 0; i < length; ++i) { + byte_array[i] = await bytes.getAt(i); + } + await bytes.destroy(); + let buffer = Buffer.from(byte_array, 0); + return buffer.toString("hex"); + } + + async getEmojiId() { + const emoji_id = await WalletFFI.publicKeyToEmojiId( + this.#tari_public_key_ptr + ); + const result = emoji_id.readCString(); + await WalletFFI.stringDestroy(emoji_id); + return result; + } + + destroy() { + return WalletFFI.publicKeyDestroy(this.#tari_public_key_ptr); + } +} + +module.exports = PublicKey; diff --git a/integration_tests/helpers/ffi/seedWords.js b/integration_tests/helpers/ffi/seedWords.js new file mode 100644 index 0000000000..86c05cab48 --- /dev/null +++ b/integration_tests/helpers/ffi/seedWords.js @@ -0,0 +1,46 @@ +const WalletFFI = require("./walletFFI"); + +class SeedWords { + #tari_seed_words_ptr; + + constructor(tari_seed_words_ptr) { + this.#tari_seed_words_ptr = tari_seed_words_ptr; + } + + static async fromString(seed_words_text) { + const seed_words = await WalletFFI.seedWordsCreate(); + const seed_words_list = seed_words_text.split(" "); + for (const seed_word of seed_words_list) { + await WalletFFI.seedWordsPushWord(seed_words, seed_word); + } + return new SeedWords(seed_words); + } + + static async fromWallet(wallet) { + return new SeedWords(await WalletFFI.walletGetSeedWords(wallet)); + } + + getLength() { + return WalletFFI.seedWordsGetLength(this.#tari_seed_words_ptr); + } + + getPtr() { + return this.#tari_seed_words_ptr; + } + + async getAt(position) { + const seed_word = await WalletFFI.seedWordsGetAt( + this.#tari_seed_words_ptr, + position + ); + const result = seed_word.readCString(); + await WalletFFI.stringDestroy(seed_word); + return result; + } + + destroy() { + return WalletFFI.seedWordsDestroy(this.#tari_seed_words_ptr); + } +} + +module.exports = SeedWords; diff --git a/integration_tests/helpers/ffi/walletFFI.js b/integration_tests/helpers/ffi/walletFFI.js new file mode 100644 index 0000000000..7816253500 --- /dev/null +++ b/integration_tests/helpers/ffi/walletFFI.js @@ -0,0 +1,2074 @@ +/** + * This library was AUTO-GENERATED. Do not modify manually! + */ + +const { expect } = require("chai"); +const ffi = require("ffi-napi"); +const ref = require("ref-napi"); +const dateFormat = require("dateformat"); +const { spawn } = require("child_process"); +const fs = require("fs"); + +class WalletFFI { + static byte_vector = ref.types.void; + static byte_vector_ptr = ref.refType(this.byte_vector); + static tari_comms_config = ref.types.void; + static tari_comms_config_ptr = ref.refType(this.tari_comms_config); + static tari_private_key = ref.types.void; + static tari_private_key_ptr = ref.refType(this.tari_private_key); + static tari_wallet = ref.types.void; + static tari_wallet_ptr = ref.refType(this.tari_wallet); + static tari_public_key = ref.types.void; + static tari_public_key_ptr = ref.refType(this.tari_public_key); + static tari_contacts = ref.types.void; + static tari_contacts_ptr = ref.refType(this.tari_contacts); + static tari_contact = ref.types.void; + static tari_contact_ptr = ref.refType(this.tari_contact); + static tari_completed_transactions = ref.types.void; + static tari_completed_transactions_ptr = ref.refType( + this.tari_completed_transactions + ); + static tari_completed_transaction = ref.types.void; + static tari_completed_transaction_ptr = ref.refType( + this.tari_completed_transaction + ); + static tari_pending_outbound_transactions = ref.types.void; + static tari_pending_outbound_transactions_ptr = ref.refType( + this.tari_pending_outbound_transactions + ); + static tari_pending_outbound_transaction = ref.types.void; + static tari_pending_outbound_transaction_ptr = ref.refType( + this.tari_pending_outbound_transaction + ); + static tari_pending_inbound_transactions = ref.types.void; + static tari_pending_inbound_transactions_ptr = ref.refType( + this.tari_pending_inbound_transactions + ); + static tari_pending_inbound_transaction = ref.types.void; + static tari_pending_inbound_transaction_ptr = ref.refType( + this.tari_pending_inbound_transaction + ); + static tari_transport_type = ref.types.void; + static tari_transport_type_ptr = ref.refType(this.tari_transport_type); + static tari_seed_words = ref.types.void; + static tari_seed_words_ptr = ref.refType(this.tari_seed_words); + static emoji_set = ref.types.void; + static emoji_set_ptr = ref.refType(this.emoji_set); + static tari_excess = ref.types.void; + static tari_excess_ptr = ref.refType(this.tari_excess); + static tari_excess_public_nonce = ref.types.void; + static tari_excess_public_nonce_ptr = ref.refType( + this.tari_excess_public_nonce + ); + static tari_excess_signature = ref.types.void; + static tari_excess_signature_ptr = ref.refType(this.tari_excess_signature); + + static #fn; + static error = ref.alloc(ref.types.int); + static recovery_in_progress = ref.alloc(ref.types.bool); + static NULL = ref.NULL; + static #loaded = false; + static #ps = null; + + static checkAsyncRes(resolve, reject, error_name) { + return (err, res) => { + if (err) reject(err); + expect(this.error.deref()).to.equal(0, `Error in ${error_name}`); + resolve(res); + }; + } + + static compile() { + return new Promise((resolve, _reject) => { + const cmd = "cargo"; + const args = [ + "build", + "--release", + "--package", + "tari_wallet_ffi", + "-Z", + "unstable-options", + "--out-dir", + process.cwd() + "/temp/out", + ]; + const baseDir = `./temp/base_nodes/${dateFormat( + new Date(), + "yyyymmddHHMM" + )}/WalletFFI-compile`; + if (!fs.existsSync(baseDir)) { + fs.mkdirSync(baseDir, { recursive: true }); + fs.mkdirSync(baseDir + "/log", { recursive: true }); + } + const ps = spawn(cmd, args, { + cwd: baseDir, + env: { ...process.env }, + }); + ps.on("close", (_code) => { + resolve(ps); + }); + ps.stderr.on("data", (data) => { + console.log("stderr : ", data.toString()); + }); + ps.on("error", (error) => { + console.log("error : ", error.toString()); + }); + expect(ps.error).to.be.an("undefined"); + this.#ps = ps; + }); + } + + static async Init() { + if (this.#loaded) { + return; + } + + this.#loaded = true; + await this.compile(); + const outputProcess = `${process.cwd()}/temp/out/${ + process.platform === "win32" ? "" : "lib" + }tari_wallet_ffi`; + + // Init callbacks + + this.createCallbackReceivedTransaction = (callback) => + ffi.Callback( + "void", + [this.tari_pending_inbound_transaction_ptr], + callback + ); + this.createCallbackReceivedTransactionReply = (callback) => + ffi.Callback("void", [this.tari_completed_transaction_ptr], callback); + this.createCallbackReceivedFinalizedTransaction = (callback) => + ffi.Callback("void", [this.tari_completed_transaction_ptr], callback); + this.createCallbackTransactionBroadcast = (callback) => + ffi.Callback("void", [this.tari_completed_transaction_ptr], callback); + this.createCallbackTransactionMined = (callback) => + ffi.Callback("void", [this.tari_completed_transaction_ptr], callback); + this.createCallbackTransactionMinedUnconfirmed = (callback) => + ffi.Callback( + "void", + [this.tari_completed_transaction_ptr, "uint64"], + callback + ); + this.createCallbackDirectSendResult = (callback) => + ffi.Callback("void", ["uint64", "bool"], callback); + this.createCallbackStoreAndForwardSendResult = (callback) => + ffi.Callback("void", ["uint64", "bool"], callback); + this.createCallbackTransactionCancellation = (callback) => + ffi.Callback("void", [this.tari_completed_transaction_ptr], callback); + this.createCallbackUtxoValidationComplete = (callback) => + ffi.Callback("void", ["uint64", "uchar"], callback); + this.createCallbackStxoValidationComplete = (callback) => + ffi.Callback("void", ["uint64", "uchar"], callback); + this.createCallbackInvalidTxoValidationComplete = (callback) => + ffi.Callback("void", ["uint64", "uchar"], callback); + this.createCallbackTransactionValidationComplete = (callback) => + ffi.Callback("void", ["uint64", "uchar"], callback); + this.createCallbackSafMessageReceived = (callback) => + ffi.Callback("void", [], callback); + this.createRecoveryProgressCallback = (callback) => + ffi.Callback("void", ["uchar", "uint64", "uint64"], callback); + // Load the library + this.#fn = ffi.Library(outputProcess, { + transport_memory_create: [this.tari_transport_type_ptr, []], + transport_tcp_create: [this.tari_transport_type_ptr, ["string", "int*"]], + transport_tor_create: [ + this.tari_transport_type_ptr, + ["string", this.byte_vector_ptr, "ushort", "string", "string", "int*"], + ], + transport_memory_get_address: [ + "char*", + [this.tari_transport_type_ptr, "int*"], + ], + transport_type_destroy: ["void", [this.tari_transport_type_ptr]], + string_destroy: ["void", ["string"]], + byte_vector_create: [this.byte_vector_ptr, ["uchar*", "uint", "int*"]], + byte_vector_get_at: ["uchar", [this.byte_vector_ptr, "uint", "int*"]], + byte_vector_get_length: ["uint", [this.byte_vector_ptr, "int*"]], + byte_vector_destroy: ["void", [this.byte_vector_ptr]], + public_key_create: [ + this.tari_public_key_ptr, + [this.byte_vector_ptr, "int*"], + ], + public_key_get_bytes: [ + this.byte_vector_ptr, + [this.tari_public_key_ptr, "int*"], + ], + public_key_from_private_key: [ + this.tari_public_key_ptr, + [this.tari_private_key_ptr, "int*"], + ], + public_key_from_hex: [this.tari_public_key_ptr, ["string", "int*"]], + public_key_destroy: ["void", [this.tari_public_key_ptr]], + public_key_to_emoji_id: ["char*", [this.tari_public_key_ptr, "int*"]], + emoji_id_to_public_key: [this.tari_public_key_ptr, ["string", "int*"]], + private_key_create: [ + this.tari_private_key_ptr, + [this.byte_vector_ptr, "int*"], + ], + private_key_generate: [this.tari_private_key_ptr, []], + private_key_get_bytes: [ + this.byte_vector_ptr, + [this.tari_private_key_ptr, "int*"], + ], + private_key_from_hex: [this.tari_private_key_ptr, ["string", "int*"]], + private_key_destroy: ["void", [this.tari_private_key_ptr]], + seed_words_create: [this.tari_seed_words_ptr, []], + seed_words_get_length: ["uint", [this.tari_seed_words_ptr, "int*"]], + seed_words_get_at: ["char*", [this.tari_seed_words_ptr, "uint", "int*"]], + seed_words_push_word: [ + "uchar", + [this.tari_seed_words_ptr, "string", "int*"], + ], + seed_words_destroy: ["void", [this.tari_seed_words_ptr]], + contact_create: [ + this.tari_contact_ptr, + ["string", this.tari_public_key_ptr, "int*"], + ], + contact_get_alias: ["char*", [this.tari_contact_ptr, "int*"]], + contact_get_public_key: [ + this.tari_public_key_ptr, + [this.tari_contact_ptr, "int*"], + ], + contact_destroy: ["void", [this.tari_contact_ptr]], + contacts_get_length: ["uint", [this.tari_contacts_ptr, "int*"]], + contacts_get_at: [ + this.tari_contact_ptr, + [this.tari_contacts_ptr, "uint", "int*"], + ], + contacts_destroy: ["void", [this.tari_contacts_ptr]], + completed_transaction_get_destination_public_key: [ + this.tari_public_key_ptr, + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_source_public_key: [ + this.tari_public_key_ptr, + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_amount: [ + "uint64", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_fee: [ + "uint64", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_message: [ + "char*", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_status: [ + "int", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_transaction_id: [ + "uint64", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_timestamp: [ + "uint64", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_is_valid: [ + "bool", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_is_outbound: [ + "bool", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_confirmations: [ + "uint64", + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_destroy: [ + "void", + [this.tari_completed_transaction_ptr], + ], + completed_transaction_get_excess: [ + this.tari_excess_ptr, + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_public_nonce: [ + this.tari_excess_public_nonce_ptr, + [this.tari_completed_transaction_ptr, "int*"], + ], + completed_transaction_get_signature: [ + this.tari_excess_signature_ptr, + [this.tari_completed_transaction_ptr, "int*"], + ], + excess_destroy: ["void", [this.tari_excess_ptr]], + nonce_destroy: ["void", [this.tari_excess_public_nonce_ptr]], + signature_destroy: ["void", [this.tari_excess_signature_ptr]], + completed_transactions_get_length: [ + "uint", + [this.tari_completed_transactions_ptr, "int*"], + ], + completed_transactions_get_at: [ + this.tari_completed_transaction_ptr, + [this.tari_completed_transactions_ptr, "uint", "int*"], + ], + completed_transactions_destroy: [ + "void", + [this.tari_completed_transactions_ptr], + ], + pending_outbound_transaction_get_transaction_id: [ + "uint64", + [this.tari_pending_outbound_transaction_ptr, "int*"], + ], + pending_outbound_transaction_get_destination_public_key: [ + this.tari_public_key_ptr, + [this.tari_pending_outbound_transaction_ptr, "int*"], + ], + pending_outbound_transaction_get_amount: [ + "uint64", + [this.tari_pending_outbound_transaction_ptr, "int*"], + ], + pending_outbound_transaction_get_fee: [ + "uint64", + [this.tari_pending_outbound_transaction_ptr, "int*"], + ], + pending_outbound_transaction_get_message: [ + "char*", + [this.tari_pending_outbound_transaction_ptr, "int*"], + ], + pending_outbound_transaction_get_timestamp: [ + "uint64", + [this.tari_pending_outbound_transaction_ptr, "int*"], + ], + pending_outbound_transaction_get_status: [ + "int", + [this.tari_pending_outbound_transaction_ptr, "int*"], + ], + pending_outbound_transaction_destroy: [ + "void", + [this.tari_pending_outbound_transaction_ptr], + ], + pending_outbound_transactions_get_length: [ + "uint", + [this.tari_pending_outbound_transactions_ptr, "int*"], + ], + pending_outbound_transactions_get_at: [ + this.tari_pending_outbound_transaction_ptr, + [this.tari_pending_outbound_transactions_ptr, "uint", "int*"], + ], + pending_outbound_transactions_destroy: [ + "void", + [this.tari_pending_outbound_transactions_ptr], + ], + pending_inbound_transaction_get_transaction_id: [ + "uint64", + [this.tari_pending_inbound_transaction_ptr, "int*"], + ], + pending_inbound_transaction_get_source_public_key: [ + this.tari_public_key_ptr, + [this.tari_pending_inbound_transaction_ptr, "int*"], + ], + pending_inbound_transaction_get_message: [ + "char*", + [this.tari_pending_inbound_transaction_ptr, "int*"], + ], + pending_inbound_transaction_get_amount: [ + "uint64", + [this.tari_pending_inbound_transaction_ptr, "int*"], + ], + pending_inbound_transaction_get_timestamp: [ + "uint64", + [this.tari_pending_inbound_transaction_ptr, "int*"], + ], + pending_inbound_transaction_get_status: [ + "int", + [this.tari_pending_inbound_transaction_ptr, "int*"], + ], + pending_inbound_transaction_destroy: [ + "void", + [this.tari_pending_inbound_transaction_ptr], + ], + pending_inbound_transactions_get_length: [ + "uint", + [this.tari_pending_inbound_transactions_ptr, "int*"], + ], + pending_inbound_transactions_get_at: [ + this.tari_pending_inbound_transaction_ptr, + [this.tari_pending_inbound_transactions_ptr, "uint", "int*"], + ], + pending_inbound_transactions_destroy: [ + "void", + [this.tari_pending_inbound_transactions_ptr], + ], + comms_config_create: [ + this.tari_comms_config_ptr, + [ + "string", + this.tari_transport_type_ptr, + "string", + "string", + "uint64", + "uint64", + "string", + "int*", + ], + ], + comms_config_destroy: ["void", [this.tari_comms_config_ptr]], + wallet_create: [ + this.tari_wallet_ptr, + [ + this.tari_comms_config_ptr, + "string", + "uint", + "uint", + "string", + this.tari_seed_words_ptr, + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "pointer", + "bool*", + "int*", + ], + ], + wallet_sign_message: ["char*", [this.tari_wallet_ptr, "string", "int*"]], + wallet_verify_message_signature: [ + "bool", + [ + this.tari_wallet_ptr, + this.tari_public_key_ptr, + "string", + "string", + "int*", + ], + ], + wallet_add_base_node_peer: [ + "bool", + [this.tari_wallet_ptr, this.tari_public_key_ptr, "string", "int*"], + ], + wallet_upsert_contact: [ + "bool", + [this.tari_wallet_ptr, this.tari_contact_ptr, "int*"], + ], + wallet_remove_contact: [ + "bool", + [this.tari_wallet_ptr, this.tari_contact_ptr, "int*"], + ], + wallet_get_available_balance: ["uint64", [this.tari_wallet_ptr, "int*"]], + wallet_get_pending_incoming_balance: [ + "uint64", + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_pending_outgoing_balance: [ + "uint64", + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_fee_estimate: [ + "uint64", + [this.tari_wallet_ptr, "uint64", "uint64", "uint64", "uint64", "int*"], + ], + wallet_get_num_confirmations_required: [ + "uint64", + [this.tari_wallet_ptr, "int*"], + ], + wallet_set_num_confirmations_required: [ + "void", + [this.tari_wallet_ptr, "uint64", "int*"], + ], + wallet_send_transaction: [ + "uint64", + [ + this.tari_wallet_ptr, + this.tari_public_key_ptr, + "uint64", + "uint64", + "string", + "int*", + ], + ], + wallet_get_contacts: [ + this.tari_contacts_ptr, + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_completed_transactions: [ + this.tari_completed_transactions_ptr, + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_pending_outbound_transactions: [ + this.tari_pending_outbound_transactions_ptr, + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_public_key: [ + this.tari_public_key_ptr, + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_pending_inbound_transactions: [ + this.tari_pending_inbound_transactions_ptr, + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_cancelled_transactions: [ + this.tari_completed_transactions_ptr, + [this.tari_wallet_ptr, "int*"], + ], + wallet_get_completed_transaction_by_id: [ + this.tari_completed_transaction_ptr, + [this.tari_wallet_ptr, "uint64", "int*"], + ], + wallet_get_pending_outbound_transaction_by_id: [ + this.tari_pending_outbound_transaction_ptr, + [this.tari_wallet_ptr, "uint64", "int*"], + ], + wallet_get_pending_inbound_transaction_by_id: [ + this.tari_pending_inbound_transaction_ptr, + [this.tari_wallet_ptr, "uint64", "int*"], + ], + wallet_get_cancelled_transaction_by_id: [ + this.tari_completed_transaction_ptr, + [this.tari_wallet_ptr, "uint64", "int*"], + ], + wallet_import_utxo: [ + "uint64", + [ + this.tari_wallet_ptr, + "uint64", + this.tari_private_key_ptr, + this.tari_public_key_ptr, + "string", + "int*", + ], + ], + wallet_start_utxo_validation: ["uint64", [this.tari_wallet_ptr, "int*"]], + wallet_start_stxo_validation: ["uint64", [this.tari_wallet_ptr, "int*"]], + wallet_start_invalid_txo_validation: [ + "uint64", + [this.tari_wallet_ptr, "int*"], + ], + wallet_start_transaction_validation: [ + "uint64", + [this.tari_wallet_ptr, "int*"], + ], + wallet_restart_transaction_broadcast: [ + "bool", + [this.tari_wallet_ptr, "int*"], + ], + wallet_set_low_power_mode: ["void", [this.tari_wallet_ptr, "int*"]], + wallet_set_normal_power_mode: ["void", [this.tari_wallet_ptr, "int*"]], + wallet_cancel_pending_transaction: [ + "bool", + [this.tari_wallet_ptr, "uint64", "int*"], + ], + wallet_coin_split: [ + "uint64", + [ + this.tari_wallet_ptr, + "uint64", + "uint64", + "uint64", + "string", + "uint64", + "int*", + ], + ], + wallet_get_seed_words: [ + this.tari_seed_words_ptr, + [this.tari_wallet_ptr, "int*"], + ], + wallet_apply_encryption: [ + "void", + [this.tari_wallet_ptr, "string", "int*"], + ], + wallet_remove_encryption: ["void", [this.tari_wallet_ptr, "int*"]], + wallet_set_key_value: [ + "bool", + [this.tari_wallet_ptr, "string", "string", "int*"], + ], + wallet_get_value: ["char*", [this.tari_wallet_ptr, "string", "int*"]], + wallet_clear_value: ["bool", [this.tari_wallet_ptr, "string", "int*"]], + wallet_is_recovery_in_progress: ["bool", [this.tari_wallet_ptr, "int*"]], + wallet_start_recovery: [ + "bool", + [this.tari_wallet_ptr, this.tari_public_key_ptr, "pointer", "int*"], + ], + wallet_destroy: ["void", [this.tari_wallet_ptr]], + file_partial_backup: ["void", ["string", "string", "int*"]], + log_debug_message: ["void", ["string"]], + get_emoji_set: [this.emoji_set_ptr, []], + emoji_set_destroy: ["void", [this.emoji_set_ptr]], + emoji_set_get_at: [ + this.byte_vector_ptr, + [this.emoji_set_ptr, "uint", "int*"], + ], + emoji_set_get_length: ["uint", [this.emoji_set_ptr, "int*"]], + }); + } + + static transportMemoryCreate() { + return new Promise((resolve, reject) => + this.#fn.transport_memory_create.async( + this.checkAsyncRes(resolve, reject, "transportMemoryCreate") + ) + ); + } + + static transportTcpCreate(listener_address) { + return new Promise((resolve, reject) => + this.#fn.transport_tcp_create.async( + listener_address, + this.error, + this.checkAsyncRes(resolve, reject, "transportTcpCreate") + ) + ); + } + + static transportTorCreate( + control_server_address, + tor_cookie, + tor_port, + socks_username, + socks_password + ) { + return new Promise((resolve, reject) => + this.#fn.transport_tor_create.async( + control_server_address, + tor_cookie, + tor_port, + socks_username, + socks_password, + this.error, + this.checkAsyncRes(resolve, reject, "transportTorCreate") + ) + ); + } + + static transportMemoryGetAddress(transport) { + return new Promise((resolve, reject) => + this.#fn.transport_memory_get_address.async( + transport, + this.error, + this.checkAsyncRes(resolve, reject, "transportMemoryGetAddress") + ) + ); + } + + static transportTypeDestroy(transport) { + return new Promise((resolve, reject) => + this.#fn.transport_type_destroy.async( + transport, + this.checkAsyncRes(resolve, reject, "transportTypeDestroy") + ) + ); + } + + static stringDestroy(s) { + return new Promise((resolve, reject) => + this.#fn.string_destroy.async( + s, + this.checkAsyncRes(resolve, reject, "stringDestroy") + ) + ); + } + + static byteVectorCreate(byte_array, element_count) { + return new Promise((resolve, reject) => + this.#fn.byte_vector_create.async( + byte_array, + element_count, + this.error, + this.checkAsyncRes(resolve, reject, "byteVectorCreate") + ) + ); + } + + static byteVectorGetAt(ptr, i) { + return new Promise((resolve, reject) => + this.#fn.byte_vector_get_at.async( + ptr, + i, + this.error, + this.checkAsyncRes(resolve, reject, "byteVectorGetAt") + ) + ); + } + + static byteVectorGetLength(vec) { + return new Promise((resolve, reject) => + this.#fn.byte_vector_get_length.async( + vec, + this.error, + this.checkAsyncRes(resolve, reject, "byteVectorGetLength") + ) + ); + } + + static byteVectorDestroy(bytes) { + return new Promise((resolve, reject) => + this.#fn.byte_vector_destroy.async( + bytes, + this.checkAsyncRes(resolve, reject, "byteVectorDestroy") + ) + ); + } + + static publicKeyCreate(bytes) { + return new Promise((resolve, reject) => + this.#fn.public_key_create.async( + bytes, + this.error, + this.checkAsyncRes(resolve, reject, "publicKeyCreate") + ) + ); + } + + static publicKeyGetBytes(public_key) { + return new Promise((resolve, reject) => + this.#fn.public_key_get_bytes.async( + public_key, + this.error, + this.checkAsyncRes(resolve, reject, "publicKeyGetBytes") + ) + ); + } + + static publicKeyFromPrivateKey(secret_key) { + return new Promise((resolve, reject) => + this.#fn.public_key_from_private_key.async( + secret_key, + this.error, + this.checkAsyncRes(resolve, reject, "publicKeyFromPrivateKey") + ) + ); + } + + static publicKeyFromHex(hex) { + return new Promise((resolve, reject) => + this.#fn.public_key_from_hex.async( + hex, + this.error, + this.checkAsyncRes(resolve, reject, "publicKeyFromHex") + ) + ); + } + + static publicKeyDestroy(pk) { + return new Promise((resolve, reject) => + this.#fn.public_key_destroy.async( + pk, + this.checkAsyncRes(resolve, reject, "publicKeyDestroy") + ) + ); + } + + static publicKeyToEmojiId(pk) { + return new Promise((resolve, reject) => + this.#fn.public_key_to_emoji_id.async( + pk, + this.error, + this.checkAsyncRes(resolve, reject, "publicKeyToEmojiId") + ) + ); + } + + static emojiIdToPublicKey(emoji) { + return new Promise((resolve, reject) => + this.#fn.emoji_id_to_public_key.async( + emoji, + this.error, + this.checkAsyncRes(resolve, reject, "emojiIdToPublicKey") + ) + ); + } + + static privateKeyCreate(bytes) { + return new Promise((resolve, reject) => + this.#fn.private_key_create.async( + bytes, + this.error, + this.checkAsyncRes(resolve, reject, "privateKeyCreate") + ) + ); + } + + static privateKeyGenerate() { + return new Promise((resolve, reject) => + this.#fn.private_key_generate.async( + this.checkAsyncRes(resolve, reject, "privateKeyGenerate") + ) + ); + } + + static privateKeyGetBytes(private_key) { + return new Promise((resolve, reject) => + this.#fn.private_key_get_bytes.async( + private_key, + this.error, + this.checkAsyncRes(resolve, reject, "privateKeyGetBytes") + ) + ); + } + + static privateKeyFromHex(hex) { + return new Promise((resolve, reject) => + this.#fn.private_key_from_hex.async( + hex, + this.error, + this.checkAsyncRes(resolve, reject, "privateKeyFromHex") + ) + ); + } + + static privateKeyDestroy(pk) { + return new Promise((resolve, reject) => + this.#fn.private_key_destroy.async( + pk, + this.checkAsyncRes(resolve, reject, "privateKeyDestroy") + ) + ); + } + + static seedWordsCreate() { + return new Promise((resolve, reject) => + this.#fn.seed_words_create.async( + this.checkAsyncRes(resolve, reject, "seedWordsCreate") + ) + ); + } + + static seedWordsGetLength(seed_words) { + return new Promise((resolve, reject) => + this.#fn.seed_words_get_length.async( + seed_words, + this.error, + this.checkAsyncRes(resolve, reject, "seedWordsGetLength") + ) + ); + } + + static seedWordsGetAt(seed_words, position) { + return new Promise((resolve, reject) => + this.#fn.seed_words_get_at.async( + seed_words, + position, + this.error, + this.checkAsyncRes(resolve, reject, "seedWordsGetAt") + ) + ); + } + + static seedWordsPushWord(seed_words, word) { + return new Promise((resolve, reject) => + this.#fn.seed_words_push_word.async( + seed_words, + word, + this.error, + this.checkAsyncRes(resolve, reject, "seedWordsPushWord") + ) + ); + } + + static seedWordsDestroy(seed_words) { + return new Promise((resolve, reject) => + this.#fn.seed_words_destroy.async( + seed_words, + this.checkAsyncRes(resolve, reject, "seedWordsDestroy") + ) + ); + } + + static contactCreate(alias, public_key) { + return new Promise((resolve, reject) => + this.#fn.contact_create.async( + alias, + public_key, + this.error, + this.checkAsyncRes(resolve, reject, "contactCreate") + ) + ); + } + + static contactGetAlias(contact) { + return new Promise((resolve, reject) => + this.#fn.contact_get_alias.async( + contact, + this.error, + this.checkAsyncRes(resolve, reject, "contactGetAlias") + ) + ); + } + + static contactGetPublicKey(contact) { + return new Promise((resolve, reject) => + this.#fn.contact_get_public_key.async( + contact, + this.error, + this.checkAsyncRes(resolve, reject, "contactGetPublicKey") + ) + ); + } + + static contactDestroy(contact) { + return new Promise((resolve, reject) => + this.#fn.contact_destroy.async( + contact, + this.checkAsyncRes(resolve, reject, "contactDestroy") + ) + ); + } + + static contactsGetLength(contacts) { + return new Promise((resolve, reject) => + this.#fn.contacts_get_length.async( + contacts, + this.error, + this.checkAsyncRes(resolve, reject, "contactsGetLength") + ) + ); + } + + static contactsGetAt(contacts, position) { + return new Promise((resolve, reject) => + this.#fn.contacts_get_at.async( + contacts, + position, + this.error, + this.checkAsyncRes(resolve, reject, "contactsGetAt") + ) + ); + } + + static contactsDestroy(contacts) { + return new Promise((resolve, reject) => + this.#fn.contacts_destroy.async( + contacts, + this.checkAsyncRes(resolve, reject, "contactsDestroy") + ) + ); + } + + static completedTransactionGetDestinationPublicKey(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_destination_public_key.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "completedTransactionGetDestinationPublicKey" + ) + ) + ); + } + + static completedTransactionGetSourcePublicKey(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_source_public_key.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "completedTransactionGetSourcePublicKey" + ) + ) + ); + } + + static completedTransactionGetAmount(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_amount.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionGetAmount") + ) + ); + } + + static completedTransactionGetFee(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_fee.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionGetFee") + ) + ); + } + + static completedTransactionGetMessage(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_message.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionGetMessage") + ) + ); + } + + static completedTransactionGetStatus(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_status.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionGetStatus") + ) + ); + } + + static completedTransactionGetTransactionId(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_transaction_id.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "completedTransactionGetTransactionId" + ) + ) + ); + } + + static completedTransactionGetTimestamp(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_timestamp.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionGetTimestamp") + ) + ); + } + + static completedTransactionIsValid(tx) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_is_valid.async( + tx, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionIsValid") + ) + ); + } + + static completedTransactionIsOutbound(tx) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_is_outbound.async( + tx, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionIsOutbound") + ) + ); + } + + static completedTransactionGetConfirmations(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_confirmations.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "completedTransactionGetConfirmations" + ) + ) + ); + } + + static completedTransactionDestroy(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_destroy.async( + transaction, + this.checkAsyncRes(resolve, reject, "completedTransactionDestroy") + ) + ); + } + + static completedTransactionGetExcess(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_excess.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionGetExcess") + ) + ); + } + + static completedTransactionGetPublicNonce(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_public_nonce.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "completedTransactionGetPublicNonce" + ) + ) + ); + } + + static completedTransactionGetSignature(transaction) { + return new Promise((resolve, reject) => + this.#fn.completed_transaction_get_signature.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionGetSignature") + ) + ); + } + + static excessDestroy(excess) { + return new Promise((resolve, reject) => + this.#fn.excess_destroy.async( + excess, + this.checkAsyncRes(resolve, reject, "excessDestroy") + ) + ); + } + + static nonceDestroy(nonce) { + return new Promise((resolve, reject) => + this.#fn.nonce_destroy.async( + nonce, + this.checkAsyncRes(resolve, reject, "nonceDestroy") + ) + ); + } + + static signatureDestroy(signature) { + return new Promise((resolve, reject) => + this.#fn.signature_destroy.async( + signature, + this.checkAsyncRes(resolve, reject, "signatureDestroy") + ) + ); + } + + static completedTransactionsGetLength(transactions) { + return new Promise((resolve, reject) => + this.#fn.completed_transactions_get_length.async( + transactions, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionsGetLength") + ) + ); + } + + static completedTransactionsGetAt(transactions, position) { + return new Promise((resolve, reject) => + this.#fn.completed_transactions_get_at.async( + transactions, + position, + this.error, + this.checkAsyncRes(resolve, reject, "completedTransactionsGetAt") + ) + ); + } + + static completedTransactionsDestroy(transactions) { + return new Promise((resolve, reject) => + this.#fn.completed_transactions_destroy.async( + transactions, + this.checkAsyncRes(resolve, reject, "completedTransactionsDestroy") + ) + ); + } + + static pendingOutboundTransactionGetTransactionId(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_get_transaction_id.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionGetTransactionId" + ) + ) + ); + } + + static pendingOutboundTransactionGetDestinationPublicKey(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_get_destination_public_key.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionGetDestinationPublicKey" + ) + ) + ); + } + + static pendingOutboundTransactionGetAmount(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_get_amount.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionGetAmount" + ) + ) + ); + } + + static pendingOutboundTransactionGetFee(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_get_fee.async( + transaction, + this.error, + this.checkAsyncRes(resolve, reject, "pendingOutboundTransactionGetFee") + ) + ); + } + + static pendingOutboundTransactionGetMessage(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_get_message.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionGetMessage" + ) + ) + ); + } + + static pendingOutboundTransactionGetTimestamp(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_get_timestamp.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionGetTimestamp" + ) + ) + ); + } + + static pendingOutboundTransactionGetStatus(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_get_status.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionGetStatus" + ) + ) + ); + } + + static pendingOutboundTransactionDestroy(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transaction_destroy.async( + transaction, + this.checkAsyncRes(resolve, reject, "pendingOutboundTransactionDestroy") + ) + ); + } + + static pendingOutboundTransactionsGetLength(transactions) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transactions_get_length.async( + transactions, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionsGetLength" + ) + ) + ); + } + + static pendingOutboundTransactionsGetAt(transactions, position) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transactions_get_at.async( + transactions, + position, + this.error, + this.checkAsyncRes(resolve, reject, "pendingOutboundTransactionsGetAt") + ) + ); + } + + static pendingOutboundTransactionsDestroy(transactions) { + return new Promise((resolve, reject) => + this.#fn.pending_outbound_transactions_destroy.async( + transactions, + this.checkAsyncRes( + resolve, + reject, + "pendingOutboundTransactionsDestroy" + ) + ) + ); + } + + static pendingInboundTransactionGetTransactionId(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transaction_get_transaction_id.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingInboundTransactionGetTransactionId" + ) + ) + ); + } + + static pendingInboundTransactionGetSourcePublicKey(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transaction_get_source_public_key.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingInboundTransactionGetSourcePublicKey" + ) + ) + ); + } + + static pendingInboundTransactionGetMessage(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transaction_get_message.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingInboundTransactionGetMessage" + ) + ) + ); + } + + static pendingInboundTransactionGetAmount(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transaction_get_amount.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingInboundTransactionGetAmount" + ) + ) + ); + } + + static pendingInboundTransactionGetTimestamp(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transaction_get_timestamp.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingInboundTransactionGetTimestamp" + ) + ) + ); + } + + static pendingInboundTransactionGetStatus(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transaction_get_status.async( + transaction, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingInboundTransactionGetStatus" + ) + ) + ); + } + + static pendingInboundTransactionDestroy(transaction) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transaction_destroy.async( + transaction, + this.checkAsyncRes(resolve, reject, "pendingInboundTransactionDestroy") + ) + ); + } + + static pendingInboundTransactionsGetLength(transactions) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transactions_get_length.async( + transactions, + this.error, + this.checkAsyncRes( + resolve, + reject, + "pendingInboundTransactionsGetLength" + ) + ) + ); + } + + static pendingInboundTransactionsGetAt(transactions, position) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transactions_get_at.async( + transactions, + position, + this.error, + this.checkAsyncRes(resolve, reject, "pendingInboundTransactionsGetAt") + ) + ); + } + + static pendingInboundTransactionsDestroy(transactions) { + return new Promise((resolve, reject) => + this.#fn.pending_inbound_transactions_destroy.async( + transactions, + this.checkAsyncRes(resolve, reject, "pendingInboundTransactionsDestroy") + ) + ); + } + + static commsConfigCreate( + public_address, + transport, + database_name, + datastore_path, + discovery_timeout_in_secs, + saf_message_duration_in_secs, + network + ) { + return new Promise((resolve, reject) => + this.#fn.comms_config_create.async( + public_address, + transport, + database_name, + datastore_path, + discovery_timeout_in_secs, + saf_message_duration_in_secs, + network, + this.error, + this.checkAsyncRes(resolve, reject, "commsConfigCreate") + ) + ); + } + + static commsConfigDestroy(wc) { + return new Promise((resolve, reject) => + this.#fn.comms_config_destroy.async( + wc, + this.checkAsyncRes(resolve, reject, "commsConfigDestroy") + ) + ); + } + + static walletCreate( + config, + log_path, + num_rolling_log_files, + size_per_log_file_bytes, + passphrase, + seed_words, + callback_received_transaction, + callback_received_transaction_reply, + callback_received_finalized_transaction, + callback_transaction_broadcast, + callback_transaction_mined, + callback_transaction_mined_unconfirmed, + callback_direct_send_result, + callback_store_and_forward_send_result, + callback_transaction_cancellation, + callback_utxo_validation_complete, + callback_stxo_validation_complete, + callback_invalid_txo_validation_complete, + callback_transaction_validation_complete, + callback_saf_message_received + ) { + return new Promise((resolve, reject) => + this.#fn.wallet_create.async( + config, + log_path, + num_rolling_log_files, + size_per_log_file_bytes, + passphrase, + seed_words, + callback_received_transaction, + callback_received_transaction_reply, + callback_received_finalized_transaction, + callback_transaction_broadcast, + callback_transaction_mined, + callback_transaction_mined_unconfirmed, + callback_direct_send_result, + callback_store_and_forward_send_result, + callback_transaction_cancellation, + callback_utxo_validation_complete, + callback_stxo_validation_complete, + callback_invalid_txo_validation_complete, + callback_transaction_validation_complete, + callback_saf_message_received, + this.recovery_in_progress, + this.error, + this.checkAsyncRes(resolve, reject, "walletCreate") + ) + ); + } + + static walletSignMessage(wallet, msg) { + return new Promise((resolve, reject) => + this.#fn.wallet_sign_message.async( + wallet, + msg, + this.error, + this.checkAsyncRes(resolve, reject, "walletSignMessage") + ) + ); + } + + static walletVerifyMessageSignature(wallet, public_key, hex_sig_nonce, msg) { + return new Promise((resolve, reject) => + this.#fn.wallet_verify_message_signature.async( + wallet, + public_key, + hex_sig_nonce, + msg, + this.error, + this.checkAsyncRes(resolve, reject, "walletVerifyMessageSignature") + ) + ); + } + + static walletAddBaseNodePeer(wallet, public_key, address) { + return new Promise((resolve, reject) => + this.#fn.wallet_add_base_node_peer.async( + wallet, + public_key, + address, + this.error, + this.checkAsyncRes(resolve, reject, "walletAddBaseNodePeer") + ) + ); + } + + static walletUpsertContact(wallet, contact) { + return new Promise((resolve, reject) => + this.#fn.wallet_upsert_contact.async( + wallet, + contact, + this.error, + this.checkAsyncRes(resolve, reject, "walletUpsertContact") + ) + ); + } + + static walletRemoveContact(wallet, contact) { + return new Promise((resolve, reject) => + this.#fn.wallet_remove_contact.async( + wallet, + contact, + this.error, + this.checkAsyncRes(resolve, reject, "walletRemoveContact") + ) + ); + } + + static walletGetAvailableBalance(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_available_balance.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetAvailableBalance") + ) + ); + } + + static walletGetPendingIncomingBalance(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_pending_incoming_balance.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetPendingIncomingBalance") + ) + ); + } + + static walletGetPendingOutgoingBalance(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_pending_outgoing_balance.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetPendingOutgoingBalance") + ) + ); + } + + static walletGetFeeEstimate( + wallet, + amount, + fee_per_gram, + num_kernels, + num_outputs + ) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_fee_estimate.async( + wallet, + amount, + fee_per_gram, + num_kernels, + num_outputs, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetFeeEstimate") + ) + ); + } + + static walletGetNumConfirmationsRequired(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_num_confirmations_required.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetNumConfirmationsRequired") + ) + ); + } + + static walletSetNumConfirmationsRequired(wallet, num) { + return new Promise((resolve, reject) => + this.#fn.wallet_set_num_confirmations_required.async( + wallet, + num, + this.error, + this.checkAsyncRes(resolve, reject, "walletSetNumConfirmationsRequired") + ) + ); + } + + static walletSendTransaction( + wallet, + destination, + amount, + fee_per_gram, + message + ) { + return new Promise((resolve, reject) => + this.#fn.wallet_send_transaction.async( + wallet, + destination, + amount, + fee_per_gram, + message, + this.error, + this.checkAsyncRes(resolve, reject, "walletSendTransaction") + ) + ); + } + + static walletGetContacts(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_contacts.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetContacts") + ) + ); + } + + static walletGetCompletedTransactions(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_completed_transactions.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetCompletedTransactions") + ) + ); + } + + static walletGetPendingOutboundTransactions(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_pending_outbound_transactions.async( + wallet, + this.error, + this.checkAsyncRes( + resolve, + reject, + "walletGetPendingOutboundTransactions" + ) + ) + ); + } + + static walletGetPublicKey(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_public_key.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetPublicKey") + ) + ); + } + + static walletGetPendingInboundTransactions(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_pending_inbound_transactions.async( + wallet, + this.error, + this.checkAsyncRes( + resolve, + reject, + "walletGetPendingInboundTransactions" + ) + ) + ); + } + + static walletGetCancelledTransactions(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_cancelled_transactions.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetCancelledTransactions") + ) + ); + } + + static walletGetCompletedTransactionById(wallet, transaction_id) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_completed_transaction_by_id.async( + wallet, + transaction_id, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetCompletedTransactionById") + ) + ); + } + + static walletGetPendingOutboundTransactionById(wallet, transaction_id) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_pending_outbound_transaction_by_id.async( + wallet, + transaction_id, + this.error, + this.checkAsyncRes( + resolve, + reject, + "walletGetPendingOutboundTransactionById" + ) + ) + ); + } + + static walletGetPendingInboundTransactionById(wallet, transaction_id) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_pending_inbound_transaction_by_id.async( + wallet, + transaction_id, + this.error, + this.checkAsyncRes( + resolve, + reject, + "walletGetPendingInboundTransactionById" + ) + ) + ); + } + + static walletGetCancelledTransactionById(wallet, transaction_id) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_cancelled_transaction_by_id.async( + wallet, + transaction_id, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetCancelledTransactionById") + ) + ); + } + + static walletImportUtxo( + wallet, + amount, + spending_key, + source_public_key, + message + ) { + return new Promise((resolve, reject) => + this.#fn.wallet_import_utxo.async( + wallet, + amount, + spending_key, + source_public_key, + message, + this.error, + this.checkAsyncRes(resolve, reject, "walletImportUtxo") + ) + ); + } + + static walletStartUtxoValidation(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_start_utxo_validation.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletStartUtxoValidation") + ) + ); + } + + static walletStartStxoValidation(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_start_stxo_validation.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletStartStxoValidation") + ) + ); + } + + static walletStartInvalidTxoValidation(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_start_invalid_txo_validation.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletStartInvalidTxoValidation") + ) + ); + } + + static walletStartTransactionValidation(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_start_transaction_validation.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletStartTransactionValidation") + ) + ); + } + + static walletRestartTransactionBroadcast(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_restart_transaction_broadcast.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletRestartTransactionBroadcast") + ) + ); + } + + static walletSetLowPowerMode(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_set_low_power_mode.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletSetLowPowerMode") + ) + ); + } + + static walletSetNormalPowerMode(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_set_normal_power_mode.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletSetNormalPowerMode") + ) + ); + } + + static walletCancelPendingTransaction(wallet, transaction_id) { + return new Promise((resolve, reject) => + this.#fn.wallet_cancel_pending_transaction.async( + wallet, + transaction_id, + this.error, + this.checkAsyncRes(resolve, reject, "walletCancelPendingTransaction") + ) + ); + } + + static walletCoinSplit(wallet, amount, count, fee, msg, lock_height) { + return new Promise((resolve, reject) => + this.#fn.wallet_coin_split.async( + wallet, + amount, + count, + fee, + msg, + lock_height, + this.error, + this.checkAsyncRes(resolve, reject, "walletCoinSplit") + ) + ); + } + + static walletGetSeedWords(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_seed_words.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetSeedWords") + ) + ); + } + + static walletApplyEncryption(wallet, passphrase) { + return new Promise((resolve, reject) => + this.#fn.wallet_apply_encryption.async( + wallet, + passphrase, + this.error, + this.checkAsyncRes(resolve, reject, "walletApplyEncryption") + ) + ); + } + + static walletRemoveEncryption(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_remove_encryption.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletRemoveEncryption") + ) + ); + } + + static walletSetKeyValue(wallet, key, value) { + return new Promise((resolve, reject) => + this.#fn.wallet_set_key_value.async( + wallet, + key, + value, + this.error, + this.checkAsyncRes(resolve, reject, "walletSetKeyValue") + ) + ); + } + + static walletGetValue(wallet, key) { + return new Promise((resolve, reject) => + this.#fn.wallet_get_value.async( + wallet, + key, + this.error, + this.checkAsyncRes(resolve, reject, "walletGetValue") + ) + ); + } + + static walletClearValue(wallet, key) { + return new Promise((resolve, reject) => + this.#fn.wallet_clear_value.async( + wallet, + key, + this.error, + this.checkAsyncRes(resolve, reject, "walletClearValue") + ) + ); + } + + static walletIsRecoveryInProgress(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_is_recovery_in_progress.async( + wallet, + this.error, + this.checkAsyncRes(resolve, reject, "walletIsRecoveryInProgress") + ) + ); + } + + static walletStartRecovery( + wallet, + base_node_public_key, + recovery_progress_callback + ) { + return new Promise((resolve, reject) => + this.#fn.wallet_start_recovery.async( + wallet, + base_node_public_key, + recovery_progress_callback, + this.error, + this.checkAsyncRes(resolve, reject, "walletStartRecovery") + ) + ); + } + + static walletDestroy(wallet) { + return new Promise((resolve, reject) => + this.#fn.wallet_destroy.async( + wallet, + this.checkAsyncRes(resolve, reject, "walletDestroy") + ) + ); + } + + static filePartialBackup(original_file_path, backup_file_path) { + return new Promise((resolve, reject) => + this.#fn.file_partial_backup.async( + original_file_path, + backup_file_path, + this.error, + this.checkAsyncRes(resolve, reject, "filePartialBackup") + ) + ); + } + + static logDebugMessage(msg) { + return new Promise((resolve, reject) => + this.#fn.log_debug_message.async( + msg, + this.checkAsyncRes(resolve, reject, "logDebugMessage") + ) + ); + } + + static getEmojiSet() { + return new Promise((resolve, reject) => + this.#fn.get_emoji_set.async( + this.checkAsyncRes(resolve, reject, "getEmojiSet") + ) + ); + } + + static emojiSetDestroy(emoji_set) { + return new Promise((resolve, reject) => + this.#fn.emoji_set_destroy.async( + emoji_set, + this.checkAsyncRes(resolve, reject, "emojiSetDestroy") + ) + ); + } + + static emojiSetGetAt(emoji_set, position) { + return new Promise((resolve, reject) => + this.#fn.emoji_set_get_at.async( + emoji_set, + position, + this.error, + this.checkAsyncRes(resolve, reject, "emojiSetGetAt") + ) + ); + } + + static emojiSetGetLength(emoji_set) { + return new Promise((resolve, reject) => + this.#fn.emoji_set_get_length.async( + emoji_set, + this.error, + this.checkAsyncRes(resolve, reject, "emojiSetGetLength") + ) + ); + } +} +module.exports = WalletFFI; diff --git a/integration_tests/helpers/mergeMiningProxyClient.js b/integration_tests/helpers/mergeMiningProxyClient.js index 185c16bc08..b990d9e4e5 100644 --- a/integration_tests/helpers/mergeMiningProxyClient.js +++ b/integration_tests/helpers/mergeMiningProxyClient.js @@ -90,9 +90,12 @@ class MergeMiningProxyClient { const template = await this.getBlockTemplate(); const block = template.blocktemplate_blob; // Need to insert a nonce into the template as xmrig would for it to be a valid block. - await this.submitBlock(block); tipHeight = parseInt(await this.baseNodeClient.getTipHeight()); - } while (tipHeight < height); + if (tipHeight >= height) { + break; + } + await this.submitBlock(block); + } while (tipHeight + 1 < height); return await this.baseNodeClient.getTipHeight(); } } diff --git a/integration_tests/helpers/mergeMiningProxyProcess.js b/integration_tests/helpers/mergeMiningProxyProcess.js index d26a081668..7d29b8d866 100644 --- a/integration_tests/helpers/mergeMiningProxyProcess.js +++ b/integration_tests/helpers/mergeMiningProxyProcess.js @@ -29,7 +29,7 @@ class MergeMiningProxyProcess { } async init() { - this.port = await getFreePort(19000, 25000); + this.port = await getFreePort(); this.name = `MMProxy${this.port}-${this.name}`; this.baseDir = `./temp/base_nodes/${dateFormat( new Date(), @@ -46,6 +46,7 @@ class MergeMiningProxyProcess { } const proxyAddress = "127.0.0.1:" + this.port; + const envs = createEnv( this.name, false, @@ -57,12 +58,15 @@ class MergeMiningProxyProcess { this.nodeGrpcPort, this.baseNodePort, proxyAddress, + "127.0.0.1:8085", [], [] ); const extraEnvs = { TARI_MERGE_MINING_PROXY__LOCALNET__PROXY_SUBMIT_TO_ORIGIN: this.submitOrigin, + TARI_MERGE_MINING_PROXY__LOCALNET__monerod_url: + "http://3.104.4.129:18081", }; const completeEnvs = { ...envs, ...extraEnvs }; const ps = spawn(cmd, args, { diff --git a/integration_tests/helpers/miningNodeProcess.js b/integration_tests/helpers/miningNodeProcess.js index faf05c7429..edee1a9301 100644 --- a/integration_tests/helpers/miningNodeProcess.js +++ b/integration_tests/helpers/miningNodeProcess.js @@ -69,6 +69,7 @@ class MiningNodeProcess { this.nodeGrpcPort, this.baseNodePort, "127.0.0.1:8084", + "127.0.0.1:8085", { mineOnTipOnly: this.mineOnTipOnly, numMiningThreads: this.numMiningThreads, diff --git a/integration_tests/helpers/transactionBuilder.js b/integration_tests/helpers/transactionBuilder.js index 3e6f97f2df..a9fef132ca 100644 --- a/integration_tests/helpers/transactionBuilder.js +++ b/integration_tests/helpers/transactionBuilder.js @@ -167,13 +167,15 @@ class TransactionBuilder { flags: 0, maturity: 0, }; - let key = Math.floor(Math.random() * 500 + 1); + let key = Math.floor(Math.random() * 500000000000 + 1); let privateKey = Buffer.from(toLittleEndian(key, 256)).toString("hex"); - let scriptKey = Math.floor(Math.random() * 500 + 1); + let scriptKey = Math.floor(Math.random() * 500000000000 + 1); let scriptPrivateKey = Buffer.from(toLittleEndian(scriptKey, 256)).toString( "hex" ); - let scriptOffsetPrivateKeyNum = Math.floor(Math.random() * 500 + 1); + let scriptOffsetPrivateKeyNum = Math.floor( + Math.random() * 500000000000 + 1 + ); let scriptOffsetPrivateKey = Buffer.from( toLittleEndian(scriptOffsetPrivateKeyNum, 256) ).toString("hex"); @@ -339,7 +341,9 @@ class TransactionBuilder { flags: 1, maturity: lockHeight, }; - let scriptOffsetPrivateKeyNum = Math.floor(Math.random() * 500 + 1); + let scriptOffsetPrivateKeyNum = Math.floor( + Math.random() * 500000000000 + 1 + ); let scriptOffsetPrivateKey = Buffer.from( toLittleEndian(scriptOffsetPrivateKeyNum, 256) ).toString("hex"); diff --git a/integration_tests/helpers/util.js b/integration_tests/helpers/util.js index 7943f46994..d18dcebe38 100644 --- a/integration_tests/helpers/util.js +++ b/integration_tests/helpers/util.js @@ -1,6 +1,4 @@ const net = require("net"); -const yargs = require("yargs"); -const { hideBin } = require("yargs/helpers"); const { blake2bInit, blake2bUpdate, blake2bFinal } = require("blakejs"); @@ -28,6 +26,50 @@ function withTimeout(ms, promise, message = "") { return Promise.race([timeout, promise]); } +async function tryConnect(makeClient, opts = {}) { + const options = Object.assign( + { + deadline: Infinity, + maxAttempts: 3, + }, + opts + ); + let attempts = 0; + for (;;) { + let client = makeClient(); + + // Don't log the uninteresting case + if (attempts > 0) { + console.warn( + `GRPC connection attempt ${attempts + 1}/${options.maxAttempts}` + ); + } + let error = await new Promise((resolve) => { + client.waitForReady(options.deadline, (err) => { + if (err) { + return resolve(err); + } + resolve(null); + }); + }); + + if (error) { + if (attempts >= options.maxAttempts) { + throw error; + } + attempts++; + console.error( + `Failed connection attempt ${attempts + 1}/${options.maxAttempts}` + ); + console.error(error); + await sleep(1000); + continue; + } + + return client; + } +} + async function waitFor( asyncTestFn, toBe, @@ -55,7 +97,7 @@ async function waitFor( } catch (e) { if (i > 1) { if (e && e.code && e.code === NO_CONNECTION) { - console.log("No connection yet (waitFor)..."); + // console.log("No connection yet (waitFor)..."); } else { console.error("Error in waitFor: ", e); } @@ -121,52 +163,23 @@ function hexSwitchEndianness(val) { return res; } -// Thanks to https://stackoverflow.com/questions/29860354/in-nodejs-how-do-i-check-if-a-port-is-listening-or-in-use -const portInUse = function (port, callback) { - const server = net.createServer(function (socket) { - socket.write("Echo server\r\n"); - socket.pipe(socket); - }); - - server.listen(port, "127.0.0.1"); - server.on("error", function () { - callback(true); - }); - server.on("listening", function () { - server.close(); - callback(false); - }); -}; - -let index = 0; -const getFreePort = async function (from, to) { - function testPort(port) { - return new Promise((r) => { - portInUse(port, (v) => { - if (v) { - r(false); +const getFreePort = function () { + return new Promise((resolve, reject) => { + const srv = net.createServer(function (_sock) {}); + srv.listen(0, function () { + let { port } = srv.address(); + srv.close((err) => { + if (err) { + reject(err); + } else { + resolve(port); } - r(true); }); }); - } - - let port = from + index; - if (port > to) { - index = from; - port = from; - } - while (port < to) { - // let port = getRandomInt(from, to); - // await sleep(100); - port++; - index++; - const notInUse = await testPort(port); - // console.log("Port not in use:", notInUse); - if (notInUse) { - return port; - } - } + srv.on("error", function (err) { + reject(err); + }); + }); }; const getTransactionOutputHash = function (output) { @@ -188,23 +201,17 @@ const getTransactionOutputHash = function (output) { return Buffer.from(final); }; -function consoleLogTransactionDetails(txnDetails, txId) { - const found = txnDetails[0]; - const status = txnDetails[1]; - if (found) { - console.log( - " Transaction " + - pad("'" + status.transactions[0].tx_id + "'", 24) + - " has status " + - pad("'" + status.transactions[0].status + "'", 40) + - " and " + - pad("is_cancelled(" + status.transactions[0].is_cancelled + ")", 21) + - " and " + - pad("is_valid(" + status.transactions[0].valid + ")", 16) - ); - } else { - console.log(" Transaction '" + txId + "' " + status); - } +function consoleLogTransactionDetails(txnDetails) { + console.log( + " Transaction " + + pad("'" + txnDetails.tx_id + "'", 24) + + " has status " + + pad("'" + txnDetails.status + "'", 40) + + " and " + + pad("is_cancelled(" + txnDetails.is_cancelled + ")", 21) + + " and " + + pad("is_valid(" + txnDetails.valid + ")", 16) + ); } function consoleLogBalance(balance) { @@ -219,19 +226,6 @@ function consoleLogBalance(balance) { ); } -function consoleLogCoinbaseDetails(txnDetails) { - console.log( - " Transaction " + - pad("'" + txnDetails.tx_id + "'", 24) + - " has status " + - pad("'" + txnDetails.status + "'", 40) + - " and " + - pad("is_cancelled(" + txnDetails.is_cancelled + ")", 21) + - " and " + - pad("is_valid(" + txnDetails.valid + ")", 16) - ); -} - function pad(str, length, padLeft = true) { const padding = Array(length).join(" "); if (typeof str === "undefined") return padding; @@ -280,14 +274,12 @@ module.exports = { getTransactionOutputHash, hexSwitchEndianness, consoleLogTransactionDetails, + tryConnect, consoleLogBalance, - consoleLogCoinbaseDetails, withTimeout, combineTwoTariKeys, byteArrayToHex, waitForPredicate, - yargs: () => yargs(hideBin(process.argv)), - NO_CONNECTION, }; diff --git a/integration_tests/helpers/walletClient.js b/integration_tests/helpers/walletClient.js index 3b363ca44b..f3a38ff35a 100644 --- a/integration_tests/helpers/walletClient.js +++ b/integration_tests/helpers/walletClient.js @@ -1,5 +1,5 @@ const { Client } = require("wallet-grpc-client"); -const { byteArrayToHex } = require("./util"); +const { byteArrayToHex, tryConnect } = require("./util"); function transactionStatus() { return [ @@ -14,11 +14,15 @@ function transactionStatus() { } class WalletClient { - constructor(walletAddress, name) { - this.client = Client.connect(walletAddress); + constructor(name) { + this.client = null; this.name = name; } + async connect(walletAddress) { + this.client = await tryConnect(() => Client.connect(walletAddress)); + } + async getVersion() { return await this.client.getVersion(); } @@ -121,7 +125,7 @@ class WalletClient { } async getAllNormalTransactions() { - const data = this.getCompletedTransactions(); + const data = await this.getCompletedTransactions(); const transactions = []; for (let i = 0; i < data.length; i++) { if ( @@ -218,6 +222,25 @@ class WalletClient { } } + async isTransactionPending(tx_id) { + try { + const txnDetails = await this.getTransactionInfo({ + transaction_ids: [tx_id.toString()], + }); + if ( + transactionStatus().indexOf(txnDetails.transactions[0].status) == 2 && + txnDetails.transactions[0].valid + ) { + return true; + } else { + return false; + } + } catch (err) { + // Any error here must be treated as if the required status was not achieved + return false; + } + } + async isTransactionAtLeastCompleted(tx_id) { try { const txnDetails = await this.getTransactionInfo({ @@ -348,6 +371,22 @@ class WalletClient { num_node_connections: +resp.num_node_connections, }; } + async cancelTransaction(tx_id) { + try { + const result = await this.client.cancelTransaction({ + tx_id: tx_id, + }); + return { + success: result.is_success, + failure_message: result.failure_message, + }; + } catch (err) { + return { + success: false, + failure_message: err, + }; + } + } } module.exports = WalletClient; diff --git a/integration_tests/helpers/walletFFIClient.js b/integration_tests/helpers/walletFFIClient.js new file mode 100644 index 0000000000..60596c8e34 --- /dev/null +++ b/integration_tests/helpers/walletFFIClient.js @@ -0,0 +1,441 @@ +const WalletFFI = require("./ffi/walletFFI"); +const { getFreePort } = require("./util"); +const dateFormat = require("dateformat"); +const { expect } = require("chai"); +const PublicKey = require("./ffi/publicKey"); +const CompletedTransactions = require("./ffi/completedTransactions"); +const PendingOutboundTransactions = require("./ffi/pendingOutboundTransactions"); +const Contact = require("./ffi/contact"); +const Contacts = require("./ffi/contacts"); +const SeedWords = require("./ffi/seedWords"); + +class WalletFFIClient { + #name; + #wallet; + #comms_config; + #port; + #callback_received_transaction; + #callback_received_transaction_reply; + #callback_received_finalized_transaction; + #callback_transaction_broadcast; + #callback_transaction_mined; + #callback_transaction_mined_unconfirmed; + #callback_direct_send_result; + #callback_store_and_forward_send_result; + #callback_transaction_cancellation; + #callback_utxo_validation_complete; + #callback_stxo_validation_complete; + #callback_invalid_txo_validation_complete; + #callback_transaction_validation_complete; + #callback_saf_message_received; + #recovery_progress_callback; + + #callbackReceivedTransaction = (..._args) => { + console.log(`${new Date().toISOString()} callbackReceivedTransaction`); + this.receivedTransaction += 1; + }; + #callbackReceivedTransactionReply = (..._args) => { + console.log(`${new Date().toISOString()} callbackReceivedTransactionReply`); + this.receivedTransactionReply += 1; + }; + #callbackReceivedFinalizedTransaction = (..._args) => { + console.log( + `${new Date().toISOString()} callbackReceivedFinalizedTransaction` + ); + }; + #callbackTransactionBroadcast = (..._args) => { + console.log(`${new Date().toISOString()} callbackTransactionBroadcast`); + this.transactionBroadcast += 1; + }; + #callbackTransactionMined = (..._args) => { + console.log(`${new Date().toISOString()} callbackTransactionMined`); + this.transactionMined += 1; + }; + #callbackTransactionMinedUnconfirmed = (..._args) => { + console.log( + `${new Date().toISOString()} callbackTransactionMinedUnconfirmed` + ); + }; + #callbackDirectSendResult = (..._args) => { + console.log(`${new Date().toISOString()} callbackDirectSendResult`); + }; + #callbackStoreAndForwardSendResult = (..._args) => { + console.log( + `${new Date().toISOString()} callbackStoreAndForwardSendResult` + ); + }; + #callbackTransactionCancellation = (..._args) => { + console.log(`${new Date().toISOString()} callbackTransactionCancellation`); + }; + #callbackUtxoValidationComplete = (_request_key, validation_results) => { + console.log(`${new Date().toISOString()} callbackUtxoValidationComplete`); + this.utxo_validation_complete = true; + this.utxo_validation_result = validation_results; + }; + #callbackStxoValidationComplete = (_request_key, validation_results) => { + console.log(`${new Date().toISOString()} callbackStxoValidationComplete`); + this.stxo_validation_complete = true; + this.stxo_validation_result = validation_results; + }; + #callbackInvalidTxoValidationComplete = (..._args) => { + console.log( + `${new Date().toISOString()} callbackInvalidTxoValidationComplete` + ); + }; + #callbackTransactionValidationComplete = (..._args) => { + console.log( + `${new Date().toISOString()} callbackTransactionValidationComplete` + ); + }; + #callbackSafMessageReceived = (..._args) => { + console.log(`${new Date().toISOString()} callbackSafMessageReceived`); + }; + #recoveryProgressCallback = (a, b, c) => { + console.log(`${new Date().toISOString()} recoveryProgressCallback`); + if (a == 3) + // Progress + this.recoveryProgress = [b, c]; + if (a == 4) + // Completed + this.recoveryInProgress = false; + }; + + clearCallbackCounters() { + this.receivedTransaction = + this.receivedTransactionReply = + this.transactionBroadcast = + this.transactionMined = + 0; + } + + constructor(name) { + this.#wallet = null; + this.#name = name; + this.baseDir = ""; + this.clearCallbackCounters(); + + // Create the ffi callbacks + this.#callback_received_transaction = + WalletFFI.createCallbackReceivedTransaction( + this.#callbackReceivedTransaction + ); + this.#callback_received_transaction_reply = + WalletFFI.createCallbackReceivedTransactionReply( + this.#callbackReceivedTransactionReply + ); + this.#callback_received_finalized_transaction = + WalletFFI.createCallbackReceivedFinalizedTransaction( + this.#callbackReceivedFinalizedTransaction + ); + this.#callback_transaction_broadcast = + WalletFFI.createCallbackTransactionBroadcast( + this.#callbackTransactionBroadcast + ); + this.#callback_transaction_mined = WalletFFI.createCallbackTransactionMined( + this.#callbackTransactionMined + ); + this.#callback_transaction_mined_unconfirmed = + WalletFFI.createCallbackTransactionMinedUnconfirmed( + this.#callbackTransactionMinedUnconfirmed + ); + this.#callback_direct_send_result = + WalletFFI.createCallbackDirectSendResult(this.#callbackDirectSendResult); + this.#callback_store_and_forward_send_result = + WalletFFI.createCallbackStoreAndForwardSendResult( + this.#callbackStoreAndForwardSendResult + ); + this.#callback_transaction_cancellation = + WalletFFI.createCallbackTransactionCancellation( + this.#callbackTransactionCancellation + ); + this.#callback_utxo_validation_complete = + WalletFFI.createCallbackUtxoValidationComplete( + this.#callbackUtxoValidationComplete + ); + this.#callback_stxo_validation_complete = + WalletFFI.createCallbackStxoValidationComplete( + this.#callbackStxoValidationComplete + ); + this.#callback_invalid_txo_validation_complete = + WalletFFI.createCallbackInvalidTxoValidationComplete( + this.#callbackInvalidTxoValidationComplete + ); + this.#callback_transaction_validation_complete = + WalletFFI.createCallbackTransactionValidationComplete( + this.#callbackTransactionValidationComplete + ); + this.#callback_saf_message_received = + WalletFFI.createCallbackSafMessageReceived( + this.#callbackSafMessageReceived + ); + this.#recovery_progress_callback = WalletFFI.createRecoveryProgressCallback( + this.#recoveryProgressCallback + ); + } + + static async Init() { + await WalletFFI.Init(); + } + + async startNew(seed_words_text) { + this.#port = await getFreePort(19000, 25000); + const name = `WalletFFI${this.#port}-${this.#name}`; + this.baseDir = `./temp/base_nodes/${dateFormat( + new Date(), + "yyyymmddHHMM" + )}/${name}`; + const tcp = await WalletFFI.transportTcpCreate( + `/ip4/0.0.0.0/tcp/${this.#port}` + ); + this.#comms_config = await WalletFFI.commsConfigCreate( + `/ip4/0.0.0.0/tcp/${this.#port}`, + tcp, + "wallet.dat", + this.baseDir, + 30, + 600, + "localnet" + ); + await this.start(seed_words_text); + } + + async start(seed_words_text) { + let seed_words; + let seed_words_ptr = WalletFFI.NULL; + if (seed_words_text) { + seed_words = await SeedWords.fromString(seed_words_text); + seed_words_ptr = seed_words.getPtr(); + } + this.#wallet = await WalletFFI.walletCreate( + this.#comms_config, + `${this.baseDir}/log/wallet.log`, + 50, + 102400, + WalletFFI.NULL, + seed_words_ptr, + this.#callback_received_transaction, + this.#callback_received_transaction_reply, + this.#callback_received_finalized_transaction, + this.#callback_transaction_broadcast, + this.#callback_transaction_mined, + this.#callback_transaction_mined_unconfirmed, + this.#callback_direct_send_result, + this.#callback_store_and_forward_send_result, + this.#callback_transaction_cancellation, + this.#callback_utxo_validation_complete, + this.#callback_stxo_validation_complete, + this.#callback_invalid_txo_validation_complete, + this.#callback_transaction_validation_complete, + this.#callback_saf_message_received + ); + if (seed_words) await seed_words.destroy(); + } + + async startRecovery(base_node_pubkey) { + const node_pubkey = await PublicKey.fromString(base_node_pubkey); + expect( + await WalletFFI.walletStartRecovery( + this.#wallet, + node_pubkey.getPtr(), + this.#recovery_progress_callback + ) + ).to.be.true; + node_pubkey.destroy(); + this.recoveryInProgress = true; + } + + recoveryInProgress() { + return this.recoveryInProgress; + } + + async stop() { + await WalletFFI.walletDestroy(this.#wallet); + } + + async getPublicKey() { + const public_key = await PublicKey.fromWallet(this.#wallet); + const public_key_hex = public_key.getHex(); + public_key.destroy(); + return public_key_hex; + } + + async getEmojiId() { + const public_key = await PublicKey.fromWallet(this.#wallet); + const emoji_id = await public_key.getEmojiId(); + public_key.destroy(); + return emoji_id; + } + + async getBalance() { + return await WalletFFI.walletGetAvailableBalance(this.#wallet); + } + + async addBaseNodePeer(public_key_hex, address) { + const public_key = await PublicKey.fromString(public_key_hex); + expect( + await WalletFFI.walletAddBaseNodePeer( + this.#wallet, + public_key.getPtr(), + address + ) + ).to.be.true; + await public_key.destroy(); + } + + async sendTransaction(destination, amount, fee_per_gram, message) { + const dest_public_key = await PublicKey.fromString(destination); + const result = await WalletFFI.walletSendTransaction( + this.#wallet, + dest_public_key.getPtr(), + amount, + fee_per_gram, + message + ); + await dest_public_key.destroy(); + return result; + } + + async applyEncryption(passphrase) { + await WalletFFI.walletApplyEncryption(this.#wallet, passphrase); + } + + async getCompletedTransactions() { + const txs = await CompletedTransactions.fromWallet(this.#wallet); + const length = await txs.getLength(); + let outbound = 0; + let inbound = 0; + for (let i = 0; i < length; ++i) { + const tx = await txs.getAt(i); + if (await tx.isOutbound()) { + ++outbound; + } else { + ++inbound; + } + tx.destroy(); + } + txs.destroy(); + return [outbound, inbound]; + } + + async getBroadcastTransactionsCount() { + let broadcast_tx_cnt = 0; + const txs = await PendingOutboundTransactions.fromWallet(this.#wallet); + const length = await txs.getLength(); + for (let i = 0; i < length; ++i) { + const tx = await txs.getAt(i); + const status = await tx.getStatus(); + tx.destroy(); + if (status === 1) { + // Broadcast + broadcast_tx_cnt += 1; + } + } + await txs.destroy(); + return broadcast_tx_cnt; + } + + async getOutboundTransactionsCount() { + let outbound_tx_cnt = 0; + const txs = await PendingOutboundTransactions.fromWallet(this.#wallet); + const length = await txs.getLength(); + for (let i = 0; i < length; ++i) { + const tx = await txs.getAt(i); + const status = await tx.getStatus(); + if (status === 4) { + // Pending + outbound_tx_cnt += 1; + } + tx.destroy(); + } + await txs.destroy(); + return outbound_tx_cnt; + } + + async addContact(alias, pubkey_hex) { + const public_key = await PublicKey.fromString(pubkey_hex); + const contact = new Contact( + await WalletFFI.contactCreate(alias, public_key.getPtr()) + ); + public_key.destroy(); + expect(await WalletFFI.walletUpsertContact(this.#wallet, contact.getPtr())) + .to.be.true; + contact.destroy(); + } + + async #findContact(lookup_alias) { + const contacts = await Contacts.fromWallet(this.#wallet); + const length = await contacts.getLength(); + let contact; + for (let i = 0; i < length; ++i) { + contact = await contacts.getAt(i); + const alias = await contact.getAlias(); + const found = alias === lookup_alias; + if (found) { + break; + } + contact.destroy(); + contact = undefined; + } + contacts.destroy(); + return contact; + } + + async getContact(alias) { + const contact = await this.#findContact(alias); + if (contact) { + const pubkey = await contact.getPubkey(); + const pubkey_hex = pubkey.getHex(); + pubkey.destroy(); + contact.destroy(); + return pubkey_hex; + } + } + + async removeContact(alias) { + const contact = await this.#findContact(alias); + if (contact) { + expect( + await WalletFFI.walletRemoveContact(this.#wallet, contact.getPtr()) + ).to.be.true; + contact.destroy(); + } + } + + async identify() { + return { + public_key: await this.getPublicKey(), + }; + } + + async cancelAllOutboundTransactions() { + const txs = await PendingOutboundTransactions.fromWallet(this.#wallet); + const length = await txs.getLength(); + let cancelled = 0; + for (let i = 0; i < length; ++i) { + const tx = await txs.getAt(i); + if ( + await WalletFFI.walletCancelPendingTransaction( + this.#wallet, + await tx.getTransactionId() + ) + ) { + ++cancelled; + } + tx.destroy(); + } + txs.destroy(); + return cancelled; + } + + startUtxoValidation() { + this.utxo_validation_complete = false; + return WalletFFI.walletStartUtxoValidation(this.#wallet); + } + + startStxoValidation() { + this.stxo_validation_complete = false; + return WalletFFI.walletStartStxoValidation(this.#wallet); + } +} + +module.exports = WalletFFIClient; diff --git a/integration_tests/helpers/walletProcess.js b/integration_tests/helpers/walletProcess.js index 80c291c407..4dd70c6932 100644 --- a/integration_tests/helpers/walletProcess.js +++ b/integration_tests/helpers/walletProcess.js @@ -7,13 +7,19 @@ const { expect } = require("chai"); const { createEnv } = require("./config"); const WalletClient = require("./walletClient"); const csvParser = require("csv-parser"); +var tari_crypto = require("tari_crypto"); let outputProcess; class WalletProcess { constructor(name, excludeTestEnvars, options, logFilePath, seedWords) { this.name = name; - this.options = options; + this.options = Object.assign( + { + baseDir: "./temp/base_nodes", + }, + options || {} + ); this.logFilePath = logFilePath ? path.resolve(logFilePath) : logFilePath; this.recoverWallet = !!seedWords; this.seedWords = seedWords; @@ -21,10 +27,10 @@ class WalletProcess { } async init() { - this.port = await getFreePort(19000, 25000); + this.port = await getFreePort(); this.name = `Wallet${this.port}-${this.name}`; - this.grpcPort = await getFreePort(19000, 25000); - this.baseDir = `./temp/base_nodes/${dateFormat( + this.grpcPort = await getFreePort(); + this.baseDir = `${this.options.baseDir}/${dateFormat( new Date(), "yyyymmddHHMM" )}/${this.name}`; @@ -35,8 +41,10 @@ class WalletProcess { return "127.0.0.1:" + this.grpcPort; } - getClient() { - return new WalletClient(this.getGrpcAddress(), this.name); + async connectClient() { + let client = new WalletClient(this.name); + await client.connect(this.getGrpcAddress()); + return client; } getSeedWords() { @@ -51,7 +59,7 @@ class WalletProcess { this.peerSeeds = addresses.join(","); } - run(cmd, args, saveFile, input_buffer) { + run(cmd, args, saveFile, input_buffer, output) { return new Promise((resolve, reject) => { if (!fs.existsSync(this.baseDir)) { fs.mkdirSync(this.baseDir, { recursive: true }); @@ -59,6 +67,11 @@ class WalletProcess { } let envs = {}; + const network = + this.options && this.options.network + ? this.options.network.toUpperCase() + : "LOCALNET"; + envs[`TARI_BASE_NODE__COMMON__NETWORK`] = network; if (!this.excludeTestEnvars) { envs = createEnv( this.name, @@ -71,17 +84,15 @@ class WalletProcess { "8080", "8081", "127.0.0.1:8084", + "127.0.0.1:8085", this.options, this.peerSeeds ); } else if (this.options["grpc_console_wallet_address"]) { - const network = - this.options && this.options.network - ? this.options.network.toUpperCase() - : "LOCALNET"; - envs[`TARI_BASE_NODE__${network}__GRPC_CONSOLE_WALLET_ADDRESS`] = this.options["grpc_console_wallet_address"]; + this.grpcPort = + this.options["grpc_console_wallet_address"].split(":")[1]; } if (saveFile) { @@ -100,6 +111,9 @@ class WalletProcess { } ps.stdout.on("data", (data) => { //console.log(`stdout: ${data}`); + if (output !== undefined && output.buffer !== undefined) { + output.buffer += data; + } fs.appendFileSync(`${this.baseDir}/log/stdout.log`, data.toString()); if ( (!this.recoverWallet && @@ -212,21 +226,25 @@ class WalletProcess { ); } - async setBaseNode(baseNode) { + async runCommand(command) { + // we need to quit the wallet before running a command + await this.stop(); const args = [ "--base-path", ".", "--password", "kensentme", "--command", - `set-base-node ${baseNode}`, + command, "--non-interactive", ]; if (this.logFilePath) { args.push("--log-config", this.logFilePath); } - // After the change of base node, the console is awaiting confirmation (Enter) or quit (q). - return await this.run(await this.compile(), args, true, "\n"); + let output = { buffer: "" }; + // In case we killed the wallet fast send enter. Because it will ask for the logs again (e.g. whois test) + await this.run(await this.compile(), args, true, "\n", output); + return output; } async exportSpentOutputs() { @@ -300,6 +318,43 @@ class WalletProcess { return unblinded_outputs; } + + // Faucet outputs are only provided with an amount and spending key so we zero out the other output data + // and update the input data to be the public key of the spending key, make the script private key the spending key + // and then we can test if this output is still spendable when imported into the wallet. + async readExportedOutputsAsFaucetOutputs() { + let outputs = await this.readExportedOutputs(); + for (let i = 0; i < outputs.length; i++) { + outputs[i].metadata_signature = { + public_nonce_commitment: Buffer.from( + "0000000000000000000000000000000000000000000000000000000000000000", + "hex" + ), + signature_u: Buffer.from( + "0000000000000000000000000000000000000000000000000000000000000000", + "hex" + ), + signature_v: Buffer.from( + "0000000000000000000000000000000000000000000000000000000000000000", + "hex" + ), + }; + outputs[i].sender_offset_public_key = Buffer.from( + "0000000000000000000000000000000000000000000000000000000000000000", + "hex" + ); + outputs[i].script_private_key = outputs[i].spending_key; + let scriptPublicKey = tari_crypto.pubkey_from_secret( + outputs[i].spending_key.toString("hex") + ); + let input_data = Buffer.concat([ + Buffer.from([0x04]), + Buffer.from(scriptPublicKey, "hex"), + ]); + outputs[i].input_data = input_data; + } + return outputs; + } } module.exports = WalletProcess; diff --git a/integration_tests/log4rs/base_node.yml b/integration_tests/log4rs/base_node.yml index 02fca7f6f9..78802da9cc 100644 --- a/integration_tests/log4rs/base_node.yml +++ b/integration_tests/log4rs/base_node.yml @@ -113,6 +113,11 @@ loggers: appenders: - base_layer additive: false + base_node: + level: debug + appenders: + - base_layer + additive: false # network comms: level: debug diff --git a/integration_tests/package-lock.json b/integration_tests/package-lock.json index 404f24f76c..b4b81f666d 100644 --- a/integration_tests/package-lock.json +++ b/integration_tests/package-lock.json @@ -5,6 +5,7 @@ "requires": true, "packages": { "": { + "name": "integration_tests", "version": "1.0.0", "license": "ISC", "dependencies": { @@ -15,14 +16,18 @@ "dateformat": "^3.0.3", "glob": "^7.1.7", "hex64": "^0.4.0", - "jszip": "^3.6.0", + "jszip": "^3.7.0", "sha3": "^2.1.3", "synchronized-promise": "^0.3.1", "tari_crypto": "^0.9.1", - "wallet-grpc-client": "file:../clients/wallet_grpc_client", - "yargs": "^17.0.1" + "utf8": "^3.0.0", + "wallet-grpc-client": "file:../clients/wallet_grpc_client" }, "devDependencies": { + "@babel/core": "^7.15.0", + "@babel/eslint-parser": "^7.15.0", + "@babel/eslint-plugin": "^7.14.5", + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.5.5", "blakejs": "^1.1.0", "chai": "^4.2.0", @@ -36,25 +41,25 @@ "eslint-plugin-node": "^11.1.0", "eslint-plugin-prettier": "^3.4.0", "eslint-plugin-promise": "^4.3.1", - "grpc": "^1.24.3", + "ffi-napi": "^4.0.3", "grpc-promise": "^1.4.0", "husky": "^6.0.0", - "prettier": "^2.2.1" + "prettier": "^2.2.1", + "ref-napi": "^3.0.3" } }, "../clients/wallet_grpc_client": { "name": "@tari/wallet-grpc-client", "version": "0.0.1", "dependencies": { - "@grpc/grpc-js": "^1.2.3", + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.5.5", "grpc-promise": "^1.4.0" } }, "../clients/wallet_grpc_client/node_modules/@grpc/grpc-js": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.4.tgz", - "integrity": "sha512-AxtZcm0mArQhY9z8T3TynCYVEaSKxNCa9mVhVwBCUnsuUEe8Zn94bPYYKVQSLt+hJJ1y0ukr3mUvtWfcATL/IQ==", + "version": "1.3.6", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", "dependencies": { "@types/node": ">=12.12.47" }, @@ -134,9 +139,8 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "../clients/wallet_grpc_client/node_modules/@types/node": { - "version": "16.3.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.3.0.tgz", - "integrity": "sha512-OydMCocGMGqw/1BnWbhtK+AtwyWTOigtrQlRe57OQmTNcI3HKlVI5FGlh+c4mSqInMPLynFrTlYjfajPu9O/eQ==" + "version": "16.3.2", + "integrity": "sha512-jJs9ErFLP403I+hMLGnqDRWT0RYKSvArxuBVh2veudHV7ifEC1WAmjJADacZ7mRbA2nWgHtn8xyECMAot0SkAw==" }, "../clients/wallet_grpc_client/node_modules/grpc-promise": { "version": "1.4.0", @@ -187,11 +191,310 @@ "@babel/highlight": "^7.10.4" } }, + "node_modules/@babel/compat-data": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.15.0.tgz", + "integrity": "sha512-0NqAC1IJE0S0+lL1SWFMxMkz1pKCNCjI4tr2Zx4LJSXxCLAdr6KyArnY+sno5m3yH9g737ygOyPABDsnXkpxiA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.15.0.tgz", + "integrity": "sha512-tXtmTminrze5HEUPn/a0JtOzzfp0nk+UEXQ/tqIJo3WDGypl/2OFQEMll/zSFU8f/lfmfLXvTaORHF3cfXIQMw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.14.5", + "@babel/generator": "^7.15.0", + "@babel/helper-compilation-targets": "^7.15.0", + "@babel/helper-module-transforms": "^7.15.0", + "@babel/helpers": "^7.14.8", + "@babel/parser": "^7.15.0", + "@babel/template": "^7.14.5", + "@babel/traverse": "^7.15.0", + "@babel/types": "^7.15.0", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.1.2", + "semver": "^6.3.0", + "source-map": "^0.5.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/@babel/code-frame": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz", + "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/eslint-parser": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/eslint-parser/-/eslint-parser-7.15.0.tgz", + "integrity": "sha512-+gSPtjSBxOZz4Uh8Ggqu7HbfpB8cT1LwW0DnVVLZEJvzXauiD0Di3zszcBkRmfGGrLdYeHUwcflG7i3tr9kQlw==", + "dev": true, + "dependencies": { + "eslint-scope": "^5.1.1", + "eslint-visitor-keys": "^2.1.0", + "semver": "^6.3.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || >=14.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.11.0", + "eslint": ">=7.5.0" + } + }, + "node_modules/@babel/eslint-parser/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/eslint-plugin": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/eslint-plugin/-/eslint-plugin-7.14.5.tgz", + "integrity": "sha512-nzt/YMnOOIRikvSn2hk9+W2omgJBy6U8TN0R+WTTmqapA+HnZTuviZaketdTE9W7/k/+E/DfZlt1ey1NSE39pg==", + "dev": true, + "dependencies": { + "eslint-rule-composer": "^0.3.0" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || >=14.0.0" + }, + "peerDependencies": { + "@babel/eslint-parser": ">=7.11.0", + "eslint": ">=7.5.0" + } + }, + "node_modules/@babel/generator": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.15.0.tgz", + "integrity": "sha512-eKl4XdMrbpYvuB505KTta4AV9g+wWzmVBW69tX0H2NwKVKd2YJbKgyK6M8j/rgLbmHOYJn6rUklV677nOyJrEQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.15.0", + "jsesc": "^2.5.1", + "source-map": "^0.5.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.15.0.tgz", + "integrity": "sha512-h+/9t0ncd4jfZ8wsdAsoIxSa61qhBYlycXiHWqJaQBCXAhDCMbPRSMTGnZIkkmt1u4ag+UQmuqcILwqKzZ4N2A==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.15.0", + "@babel/helper-validator-option": "^7.14.5", + "browserslist": "^4.16.6", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.14.5.tgz", + "integrity": "sha512-Gjna0AsXWfFvrAuX+VKcN/aNNWonizBj39yGwUzVDVTlMYJMK2Wp6xdpy72mfArFq5uK+NOuexfzZlzI1z9+AQ==", + "dev": true, + "dependencies": { + "@babel/helper-get-function-arity": "^7.14.5", + "@babel/template": "^7.14.5", + "@babel/types": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-get-function-arity": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.14.5.tgz", + "integrity": "sha512-I1Db4Shst5lewOM4V+ZKJzQ0JGGaZ6VY1jYvMghRjqs6DWgxLCIyFt30GlnKkfUeFLpJt2vzbMVEXVSXlIFYUg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.14.5.tgz", + "integrity": "sha512-R1PXiz31Uc0Vxy4OEOm07x0oSjKAdPPCh3tPivn/Eo8cvz6gveAeuyUUPB21Hoiif0uoPQSSdhIPS3352nvdyQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.15.0.tgz", + "integrity": "sha512-Jq8H8U2kYiafuj2xMTPQwkTBnEEdGKpT35lJEQsRRjnG0LW3neucsaMWLgKcwu3OHKNeYugfw+Z20BXBSEs2Lg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.15.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.14.5.tgz", + "integrity": "sha512-SwrNHu5QWS84XlHwGYPDtCxcA0hrSlL2yhWYLgeOc0w7ccOl2qv4s/nARI0aYZW+bSwAL5CukeXA47B/1NKcnQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.15.0.tgz", + "integrity": "sha512-RkGiW5Rer7fpXv9m1B3iHIFDZdItnO2/BLfWVW/9q7+KqQSDY5kUfQEbzdXM1MVhJGcugKV7kRrNVzNxmk7NBg==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.14.5", + "@babel/helper-replace-supers": "^7.15.0", + "@babel/helper-simple-access": "^7.14.8", + "@babel/helper-split-export-declaration": "^7.14.5", + "@babel/helper-validator-identifier": "^7.14.9", + "@babel/template": "^7.14.5", + "@babel/traverse": "^7.15.0", + "@babel/types": "^7.15.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.14.5.tgz", + "integrity": "sha512-IqiLIrODUOdnPU9/F8ib1Fx2ohlgDhxnIDU7OEVi+kAbEZcyiF7BLU8W6PfvPi9LzztjS7kcbzbmL7oG8kD6VA==", + "dev": true, + "dependencies": { + "@babel/types": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.15.0.tgz", + "integrity": "sha512-6O+eWrhx+HEra/uJnifCwhwMd6Bp5+ZfZeJwbqUTuqkhIT6YcRhiZCOOFChRypOIe0cV46kFrRBlm+t5vHCEaA==", + "dev": true, + "dependencies": { + "@babel/helper-member-expression-to-functions": "^7.15.0", + "@babel/helper-optimise-call-expression": "^7.14.5", + "@babel/traverse": "^7.15.0", + "@babel/types": "^7.15.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.14.8", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.14.8.tgz", + "integrity": "sha512-TrFN4RHh9gnWEU+s7JloIho2T76GPwRHhdzOWLqTrMnlas8T9O7ec+oEDNsRXndOmru9ymH9DFrEOxpzPoSbdg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.14.8" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.14.5.tgz", + "integrity": "sha512-hprxVPu6e5Kdp2puZUmvOGjaLv9TCe58E/Fl6hRq4YiVQxIcNvuq6uTM2r1mT/oPskuS9CgR+I94sqAYv0NGKA==", + "dev": true, + "dependencies": { + "@babel/types": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-validator-identifier": { + "version": "7.14.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.9.tgz", + "integrity": "sha512-pQYxPY0UP6IHISRitNe8bsijHex4TWZXi2HwKVsjPiltzlhse2znVcm9Ace510VT1kxIHjGJCZZQBX2gJDbo0g==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz", - "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.14.5.tgz", + "integrity": "sha512-OX8D5eeX4XwcroVW45NMvoYaIuFI+GQpA2a8Gi+X/U/cDUIRsV37qQfF905F0htTRCREQIB4KqPeaveRJUl3Ow==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.14.8", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.14.8.tgz", + "integrity": "sha512-ZRDmI56pnV+p1dH6d+UN6GINGz7Krps3+270qqI9UJ4wxYThfAIcI5i7j5vXC4FJ3Wap+S9qcebxeYiqn87DZw==", "dev": true, + "dependencies": { + "@babel/template": "^7.14.5", + "@babel/traverse": "^7.14.8", + "@babel/types": "^7.14.8" + }, "engines": { "node": ">=6.9.0" } @@ -210,6 +513,18 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/parser": { + "version": "7.15.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.15.2.tgz", + "integrity": "sha512-bMJXql1Ss8lFnvr11TZDH4ArtwlAS5NG9qBmdiFW2UHHm6MVoR+GDc5XE2b9K938cyjc9O6/+vjjcffLDtfuDg==", + "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/@babel/runtime-corejs3": { "version": "7.14.7", "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.14.7.tgz", @@ -223,6 +538,86 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/template": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.14.5.tgz", + "integrity": "sha512-6Z3Po85sfxRGachLULUhOmvAaOo7xCvqGQtxINai2mEGPFm6pQ4z5QInFnUrRpfoSV60BnjyF5F3c+15fxFV1g==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.14.5", + "@babel/parser": "^7.14.5", + "@babel/types": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template/node_modules/@babel/code-frame": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz", + "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.15.0.tgz", + "integrity": "sha512-392d8BN0C9eVxVWd8H6x9WfipgVH5IaIoLp23334Sc1vbKKWINnvwRpb4us0xtPaCumlwbTtIYNA0Dv/32sVFw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.14.5", + "@babel/generator": "^7.15.0", + "@babel/helper-function-name": "^7.14.5", + "@babel/helper-hoist-variables": "^7.14.5", + "@babel/helper-split-export-declaration": "^7.14.5", + "@babel/parser": "^7.15.0", + "@babel/types": "^7.15.0", + "debug": "^4.1.0", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse/node_modules/@babel/code-frame": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz", + "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse/node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/types": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.15.0.tgz", + "integrity": "sha512-OBvfqnllOIdX4ojTHpwZbpvz4j3EWyjkZEdmjH0/cgsd6QOdSgU8rLSk6ard/pcW7rlmjdVSX/AWOaORR1uNOQ==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.14.9", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@eslint/eslintrc": { "version": "0.4.2", "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.2.tgz", @@ -243,6 +638,17 @@ "node": "^10.12.0 || >=12.0.0" } }, + "node_modules/@grpc/grpc-js": { + "version": "1.3.6", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", + "dev": true, + "dependencies": { + "@types/node": ">=12.12.47" + }, + "engines": { + "node": "^8.13.0 || >=10.10.0" + } + }, "node_modules/@grpc/proto-loader": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.5.6.tgz", @@ -276,26 +682,6 @@ "integrity": "sha512-wdppn25U8z/2yiaT6YGquE6X8sSv7hNMWSXYSSU1jGv/yd6XqjXgTDJ8KP4NgjTXfJ3GbRjeeb8RTV7a/VpM+w==", "dev": true }, - "node_modules/@mapbox/node-pre-gyp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.5.tgz", - "integrity": "sha512-4srsKPXWlIxp5Vbqz5uLfBN+du2fJChBoYn/f2h991WLdk7jUvcSk/McVLSv/X+xQIPI8eGD5GjrnygdyHnhPA==", - "dev": true, - "dependencies": { - "detect-libc": "^1.0.3", - "https-proxy-agent": "^5.0.0", - "make-dir": "^3.1.0", - "node-fetch": "^2.6.1", - "nopt": "^5.0.0", - "npmlog": "^4.1.2", - "rimraf": "^3.0.2", - "semver": "^7.3.4", - "tar": "^6.1.0" - }, - "bin": { - "node-pre-gyp": "bin/node-pre-gyp" - } - }, "node_modules/@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", @@ -360,16 +746,6 @@ "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=", "dev": true }, - "node_modules/@types/bytebuffer": { - "version": "5.0.42", - "resolved": "https://registry.npmjs.org/@types/bytebuffer/-/bytebuffer-5.0.42.tgz", - "integrity": "sha512-lEgKojWUAc/MG2t649oZS5AfYFP2xRNPoDuwDBlBMjHXd8MaGPgFgtCXUK7inZdBOygmVf10qxc1Us8GXC96aw==", - "dev": true, - "dependencies": { - "@types/long": "*", - "@types/node": "*" - } - }, "node_modules/@types/long": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", @@ -382,12 +758,6 @@ "integrity": "sha512-OydMCocGMGqw/1BnWbhtK+AtwyWTOigtrQlRe57OQmTNcI3HKlVI5FGlh+c4mSqInMPLynFrTlYjfajPu9O/eQ==", "dev": true }, - "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "dev": true - }, "node_modules/acorn": { "version": "7.4.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", @@ -409,18 +779,6 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, - "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "dev": true, - "dependencies": { - "debug": "4" - }, - "engines": { - "node": ">= 6.0.0" - } - }, "node_modules/ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -473,12 +831,6 @@ "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=", "dev": true }, - "node_modules/aproba": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", - "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==", - "dev": true - }, "node_modules/archiver": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/archiver/-/archiver-5.3.0.tgz", @@ -530,35 +882,10 @@ "util-deprecate": "~1.0.1" } }, - "node_modules/are-we-there-yet": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz", - "integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==", - "dev": true, - "dependencies": { - "delegates": "^1.0.0", - "readable-stream": "^2.0.6" - } - }, - "node_modules/are-we-there-yet/node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", "dev": true, "dependencies": { "sprintf-js": "~1.0.2" @@ -600,16 +927,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/ascli": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ascli/-/ascli-1.0.1.tgz", - "integrity": "sha1-vPpZdKYvGOgcq660lzKrSoj5Brw=", - "dev": true, - "dependencies": { - "colour": "~0.7.1", - "optjs": "~3.2.2" - } - }, "node_modules/assert-plus": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", @@ -730,6 +1047,29 @@ "concat-map": "0.0.1" } }, + "node_modules/browserslist": { + "version": "4.16.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.7.tgz", + "integrity": "sha512-7I4qVwqZltJ7j37wObBe3SoTz+nS8APaNcrBOlgoirb6/HbEU2XxW/LpUDTCngM6iauwFqmRTuOMfyKnFGY5JA==", + "dev": true, + "dependencies": { + "caniuse-lite": "^1.0.30001248", + "colorette": "^1.2.2", + "electron-to-chromium": "^1.3.793", + "escalade": "^3.1.1", + "node-releases": "^1.1.73" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + } + }, "node_modules/buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", @@ -761,27 +1101,6 @@ "node": "*" } }, - "node_modules/bytebuffer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/bytebuffer/-/bytebuffer-5.0.1.tgz", - "integrity": "sha1-WC7qSxqHO20CCkjVjfhfC7ps/d0=", - "dev": true, - "dependencies": { - "long": "~3" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/bytebuffer/node_modules/long": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/long/-/long-3.2.0.tgz", - "integrity": "sha1-2CG3E4yhy1gcFymQ7xTbIAtcR0s=", - "dev": true, - "engines": { - "node": ">=0.6" - } - }, "node_modules/call-bind": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", @@ -804,13 +1123,14 @@ "node": ">=6" } }, - "node_modules/camelcase": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", - "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", + "node_modules/caniuse-lite": { + "version": "1.0.30001249", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001249.tgz", + "integrity": "sha512-vcX4U8lwVXPdqzPWi6cAJ3FnQaqXbBqy/GZseKNQzRj37J7qZdGcBtxq/QLFNLLlfsoXLUdHw8Iwenri86Tagw==", "dev": true, - "engines": { - "node": ">=0.10.0" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" } }, "node_modules/chai": { @@ -862,15 +1182,6 @@ "node": "*" } }, - "node_modules/chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", - "dev": true, - "engines": { - "node": ">=10" - } - }, "node_modules/cli-table3": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.5.1.tgz", @@ -887,56 +1198,6 @@ "colors": "^1.1.2" } }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/clone-deep": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", @@ -950,15 +1211,6 @@ "node": ">=6" } }, - "node_modules/code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/color-convert": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", @@ -974,6 +1226,12 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "dev": true }, + "node_modules/colorette": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz", + "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w==", + "dev": true + }, "node_modules/colors": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", @@ -983,15 +1241,6 @@ "node": ">=0.1.90" } }, - "node_modules/colour": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/colour/-/colour-0.7.1.tgz", - "integrity": "sha1-nLFpkX7F0SwHNtPoaFdG3xyt93g=", - "dev": true, - "engines": { - "node": ">=0.8" - } - }, "node_modules/commander": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/commander/-/commander-3.0.2.tgz", @@ -1017,11 +1266,14 @@ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" }, - "node_modules/console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", - "dev": true + "node_modules/convert-source-map": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", + "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", + "dev": true, + "dependencies": { + "safe-buffer": "~5.1.1" + } }, "node_modules/core-js-pure": { "version": "3.15.2", @@ -1231,15 +1483,6 @@ } } }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/deep-eql": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", @@ -1270,24 +1513,6 @@ "node": ">= 0.4" } }, - "node_modules/delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", - "dev": true - }, - "node_modules/detect-libc": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", - "integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=", - "dev": true, - "bin": { - "detect-libc": "bin/detect-libc.js" - }, - "engines": { - "node": ">=0.10" - } - }, "node_modules/diff": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", @@ -1319,10 +1544,17 @@ "es5-ext": "~0.10.46" } }, + "node_modules/electron-to-chromium": { + "version": "1.3.801", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.801.tgz", + "integrity": "sha512-xapG8ekC+IAHtJrGBMQSImNuN+dm+zl7UP1YbhvTkwQn8zf/yYuoxfTSAEiJ9VDD+kjvXaAhNDPSxJ+VImtAJA==", + "dev": true + }, "node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true }, "node_modules/end-of-stream": { "version": "1.4.4", @@ -1443,8 +1675,8 @@ }, "node_modules/escalade": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true, "engines": { "node": ">=6" } @@ -1743,6 +1975,15 @@ "node": ">=6" } }, + "node_modules/eslint-rule-composer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/eslint-rule-composer/-/eslint-rule-composer-0.3.0.tgz", + "integrity": "sha512-bt+Sh8CtDmn2OajxvNO+BX7Wn4CIWMpTRm3MaiKPCQcnnlm0CS2mhui6QaoeQugs+3Kj2ESKEEGJUdVafwhiCg==", + "dev": true, + "engines": { + "node": ">=4.0.0" + } + }, "node_modules/eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", @@ -2044,6 +2285,30 @@ "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", "dev": true }, + "node_modules/ffi-napi": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/ffi-napi/-/ffi-napi-4.0.3.tgz", + "integrity": "sha512-PMdLCIvDY9mS32RxZ0XGb95sonPRal8aqRhLbeEtWKZTe2A87qRFG9HjOhvG8EX2UmQw5XNRMIOT+1MYlWmdeg==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "debug": "^4.1.1", + "get-uv-event-loop-napi-h": "^1.0.5", + "node-addon-api": "^3.0.0", + "node-gyp-build": "^4.2.1", + "ref-napi": "^2.0.1 || ^3.0.2", + "ref-struct-di": "^1.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ffi-napi/node_modules/node-addon-api": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", + "integrity": "sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A==", + "dev": true + }, "node_modules/figures": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", @@ -2172,18 +2437,6 @@ "graceful-fs": "^4.1.6" } }, - "node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" - } - }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -2201,75 +2454,13 @@ "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", "dev": true }, - "node_modules/gauge": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz", - "integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=", - "dev": true, - "dependencies": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" - } - }, - "node_modules/gauge/node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/gauge/node_modules/is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "dependencies": { - "number-is-nan": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/gauge/node_modules/string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "dependencies": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/gauge/node_modules/strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", "dev": true, - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", "engines": { - "node": "6.* || 8.* || >= 10.*" + "node": ">=6.9.0" } }, "node_modules/get-func-name": { @@ -2295,6 +2486,21 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/get-symbol-from-current-process-h": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz", + "integrity": "sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw==", + "dev": true + }, + "node_modules/get-uv-event-loop-napi-h": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz", + "integrity": "sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg==", + "dev": true, + "dependencies": { + "get-symbol-from-current-process-h": "^1.0.1" + } + }, "node_modules/gherkin": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/gherkin/-/gherkin-5.0.0.tgz", @@ -2367,175 +2573,46 @@ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==" }, - "node_modules/grpc": { - "version": "1.24.10", - "resolved": "https://registry.npmjs.org/grpc/-/grpc-1.24.10.tgz", - "integrity": "sha512-mTR+P5IL3WO3oCgNwxKFE5ksXEJfCYP+dk0aIbjB494f7OnHTmssU5r9vznsSq3+cdLcxAzGFskOj5CaPwi8KA==", - "deprecated": "This library will not receive further updates other than security fixes. We recommend using @grpc/grpc-js instead.", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "@mapbox/node-pre-gyp": "^1.0.4", - "@types/bytebuffer": "^5.0.40", - "lodash.camelcase": "^4.3.0", - "lodash.clone": "^4.5.0", - "nan": "^2.13.2", - "protobufjs": "^5.0.3" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/grpc-promise": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/grpc-promise/-/grpc-promise-1.4.0.tgz", "integrity": "sha512-4BBXHXb5OjjBh7luylu8vFqL6H6aPn/LeqpQaSBeRzO/Xv95wHW/WkU9TJRqaCTMZ5wq9jTSvlJWp0vRJy1pVA==", "dev": true }, - "node_modules/grpc/node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", "dev": true, + "dependencies": { + "function-bind": "^1.1.1" + }, "engines": { - "node": ">=0.10.0" + "node": ">= 0.4.0" } }, - "node_modules/grpc/node_modules/cliui": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", - "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", + "node_modules/has-bigints": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", + "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==", "dev": true, - "dependencies": { - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wrap-ansi": "^2.0.0" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/grpc/node_modules/is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", "dev": true, - "dependencies": { - "number-is-nan": "^1.0.0" - }, "engines": { - "node": ">=0.10.0" + "node": ">=4" } }, - "node_modules/grpc/node_modules/protobufjs": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-5.0.3.tgz", - "integrity": "sha512-55Kcx1MhPZX0zTbVosMQEO5R6/rikNXd9b6RQK4KSPcrSIIwoXTtebIczUrXlwaSrbz4x8XUVThGPob1n8I4QA==", - "dev": true, - "dependencies": { - "ascli": "~1", - "bytebuffer": "~5", - "glob": "^7.0.5", - "yargs": "^3.10.0" - }, - "bin": { - "pbjs": "bin/pbjs" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/grpc/node_modules/string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "dependencies": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/grpc/node_modules/strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/grpc/node_modules/wrap-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", - "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", - "dev": true, - "dependencies": { - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/grpc/node_modules/y18n": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz", - "integrity": "sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==", - "dev": true - }, - "node_modules/grpc/node_modules/yargs": { - "version": "3.32.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.32.0.tgz", - "integrity": "sha1-AwiOnr+edWtpdRYR0qXvWRSCyZU=", - "dev": true, - "dependencies": { - "camelcase": "^2.0.1", - "cliui": "^3.0.3", - "decamelize": "^1.1.1", - "os-locale": "^1.4.0", - "string-width": "^1.0.1", - "window-size": "^0.1.4", - "y18n": "^3.2.0" - } - }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, - "node_modules/has-bigints": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.1.tgz", - "integrity": "sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA==", - "dev": true, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/has-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", - "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", + "node_modules/has-symbols": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.2.tgz", + "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", "dev": true, "engines": { "node": ">= 0.4" @@ -2544,12 +2621,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=", - "dev": true - }, "node_modules/hex64": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/hex64/-/hex64-0.4.0.tgz", @@ -2564,19 +2635,6 @@ "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", "dev": true }, - "node_modules/https-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", - "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", - "dev": true, - "dependencies": { - "agent-base": "6", - "debug": "4" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/husky": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/husky/-/husky-6.0.0.tgz", @@ -2670,15 +2728,6 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, - "node_modules/invert-kv": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", - "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", @@ -2921,6 +2970,18 @@ "js-yaml": "bin/js-yaml.js" } }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/json-parse-better-errors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", @@ -2967,9 +3028,9 @@ } }, "node_modules/jszip": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.6.0.tgz", - "integrity": "sha512-jgnQoG9LKnWO3mnVNBnfhkh0QknICd1FGSrXcgrl67zioyJ4wgx25o9ZqwNtrROSflGBCGYnJfjrIyRIby1OoQ==", + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.7.0.tgz", + "integrity": "sha512-Y2OlFIzrDOPWUnpU0LORIcDn2xN7rC9yKffFM/7pGhQuhO+SUhfm2trkJ/S5amjFvem0Y+1EALz/MEPkvHXVNw==", "dependencies": { "lie": "~3.3.0", "pako": "~1.0.2", @@ -3033,18 +3094,6 @@ "util-deprecate": "~1.0.1" } }, - "node_modules/lcid": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", - "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", - "dev": true, - "dependencies": { - "invert-kv": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -3106,12 +3155,6 @@ "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=", "dev": true }, - "node_modules/lodash.clone": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz", - "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=", - "dev": true - }, "node_modules/lodash.clonedeep": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", @@ -3185,30 +3228,6 @@ "node": ">=10" } }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dev": true, - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/make-dir/node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true, - "bin": { - "semver": "bin/semver.js" - } - }, "node_modules/minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", @@ -3225,43 +3244,6 @@ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" }, - "node_modules/minipass": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.3.tgz", - "integrity": "sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "dev": true, - "bin": { - "mkdirp": "bin/cmd.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -3279,12 +3261,6 @@ "thenify-all": "^1.0.0" } }, - "node_modules/nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", - "dev": true - }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", @@ -3320,30 +3296,23 @@ "lodash.toarray": "^4.4.0" } }, - "node_modules/node-fetch": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", - "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==", - "dev": true, - "engines": { - "node": "4.x || >=6.0.0" - } - }, - "node_modules/nopt": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", - "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "node_modules/node-gyp-build": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz", + "integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg==", "dev": true, - "dependencies": { - "abbrev": "1" - }, "bin": { - "nopt": "bin/nopt.js" - }, - "engines": { - "node": ">=6" + "node-gyp-build": "bin.js", + "node-gyp-build-optional": "optional.js", + "node-gyp-build-test": "build-test.js" } }, + "node_modules/node-releases": { + "version": "1.1.73", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.73.tgz", + "integrity": "sha512-uW7fodD6pyW2FZNZnp/Z3hvWKeEW1Y8R1+1CnErE8cXFXzl5blBOoVB41CvMer6P6Q0S5FXDwcHgFd1Wj0U9zg==", + "dev": true + }, "node_modules/normalize-package-data": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", @@ -3373,27 +3342,6 @@ "node": ">=0.10.0" } }, - "node_modules/npmlog": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz", - "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==", - "dev": true, - "dependencies": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" - } - }, - "node_modules/number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -3493,24 +3441,6 @@ "node": ">= 0.8.0" } }, - "node_modules/optjs": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/optjs/-/optjs-3.2.2.tgz", - "integrity": "sha1-aabOicRCpEQDFBrS+bNwvVu29O4=", - "dev": true - }, - "node_modules/os-locale": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", - "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=", - "dev": true, - "dependencies": { - "lcid": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/p-limit": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", @@ -3813,6 +3743,46 @@ "minimatch": "^3.0.4" } }, + "node_modules/ref-napi": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.3.tgz", + "integrity": "sha512-LiMq/XDGcgodTYOMppikEtJelWsKQERbLQsYm0IOOnzhwE9xYZC7x8txNnFC9wJNOkPferQI4vD4ZkC0mDyrOA==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "debug": "^4.1.1", + "get-symbol-from-current-process-h": "^1.0.2", + "node-addon-api": "^3.0.0", + "node-gyp-build": "^4.2.1" + }, + "engines": { + "node": ">= 10.0" + } + }, + "node_modules/ref-napi/node_modules/node-addon-api": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", + "integrity": "sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A==", + "dev": true + }, + "node_modules/ref-struct-di": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz", + "integrity": "sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g==", + "dev": true, + "dependencies": { + "debug": "^3.1.0" + } + }, + "node_modules/ref-struct-di/node_modules/debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "dependencies": { + "ms": "^2.1.1" + } + }, "node_modules/regenerator-runtime": { "version": "0.13.7", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", @@ -3840,14 +3810,6 @@ "node": ">=0.10" } }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", @@ -3932,12 +3894,6 @@ "node": ">=8" } }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", - "dev": true - }, "node_modules/set-immediate-shim": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz", @@ -4009,12 +3965,6 @@ "node": ">=8" } }, - "node_modules/signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", - "dev": true - }, "node_modules/slice-ansi": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", @@ -4358,23 +4308,6 @@ "node": ">=8" } }, - "node_modules/tar": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.0.tgz", - "integrity": "sha512-DUCttfhsnLCjwoDoFcI+B2iJgYa93vBnDUATYEeRx6sntCTdN01VnqsIuTlALXla/LWooNg0yEGeB+Y8WdFxGA==", - "dev": true, - "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "engines": { - "node": ">= 10" - } - }, "node_modules/tar-stream": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", @@ -4432,6 +4365,15 @@ "upper-case": "^1.0.3" } }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", + "dev": true, + "engines": { + "node": ">=4" + } + }, "node_modules/traverse-chain": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/traverse-chain/-/traverse-chain-0.1.0.tgz", @@ -4524,6 +4466,11 @@ "punycode": "^2.1.0" } }, + "node_modules/utf8": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/utf8/-/utf8-3.0.0.tgz", + "integrity": "sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ==" + }, "node_modules/util-arity": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/util-arity/-/util-arity-1.1.0.tgz", @@ -4610,27 +4557,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/wide-align": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", - "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", - "dev": true, - "dependencies": { - "string-width": "^1.0.2 || 2" - } - }, - "node_modules/window-size": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz", - "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=", - "dev": true, - "bin": { - "window-size": "cli.js" - }, - "engines": { - "node": ">= 0.10.0" - } - }, "node_modules/word-wrap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", @@ -4640,92 +4566,6 @@ "node": ">=0.10.0" } }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi/node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/wrap-ansi/node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "node_modules/wrap-ansi/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -4740,85 +4580,12 @@ "@babel/runtime-corejs3": "^7.12.1" } }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "engines": { - "node": ">=10" - } - }, "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", "dev": true }, - "node_modules/yargs": { - "version": "17.0.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz", - "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==", - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/zip-stream": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/zip-stream/-/zip-stream-4.1.0.tgz", @@ -4843,12 +4610,236 @@ "@babel/highlight": "^7.10.4" } }, + "@babel/compat-data": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.15.0.tgz", + "integrity": "sha512-0NqAC1IJE0S0+lL1SWFMxMkz1pKCNCjI4tr2Zx4LJSXxCLAdr6KyArnY+sno5m3yH9g737ygOyPABDsnXkpxiA==", + "dev": true + }, + "@babel/core": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.15.0.tgz", + "integrity": "sha512-tXtmTminrze5HEUPn/a0JtOzzfp0nk+UEXQ/tqIJo3WDGypl/2OFQEMll/zSFU8f/lfmfLXvTaORHF3cfXIQMw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.14.5", + "@babel/generator": "^7.15.0", + "@babel/helper-compilation-targets": "^7.15.0", + "@babel/helper-module-transforms": "^7.15.0", + "@babel/helpers": "^7.14.8", + "@babel/parser": "^7.15.0", + "@babel/template": "^7.14.5", + "@babel/traverse": "^7.15.0", + "@babel/types": "^7.15.0", + "convert-source-map": "^1.7.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.1.2", + "semver": "^6.3.0", + "source-map": "^0.5.0" + }, + "dependencies": { + "@babel/code-frame": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz", + "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==", + "dev": true, + "requires": { + "@babel/highlight": "^7.14.5" + } + }, + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "@babel/eslint-parser": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/eslint-parser/-/eslint-parser-7.15.0.tgz", + "integrity": "sha512-+gSPtjSBxOZz4Uh8Ggqu7HbfpB8cT1LwW0DnVVLZEJvzXauiD0Di3zszcBkRmfGGrLdYeHUwcflG7i3tr9kQlw==", + "dev": true, + "requires": { + "eslint-scope": "^5.1.1", + "eslint-visitor-keys": "^2.1.0", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "@babel/eslint-plugin": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/eslint-plugin/-/eslint-plugin-7.14.5.tgz", + "integrity": "sha512-nzt/YMnOOIRikvSn2hk9+W2omgJBy6U8TN0R+WTTmqapA+HnZTuviZaketdTE9W7/k/+E/DfZlt1ey1NSE39pg==", + "dev": true, + "requires": { + "eslint-rule-composer": "^0.3.0" + } + }, + "@babel/generator": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.15.0.tgz", + "integrity": "sha512-eKl4XdMrbpYvuB505KTta4AV9g+wWzmVBW69tX0H2NwKVKd2YJbKgyK6M8j/rgLbmHOYJn6rUklV677nOyJrEQ==", + "dev": true, + "requires": { + "@babel/types": "^7.15.0", + "jsesc": "^2.5.1", + "source-map": "^0.5.0" + } + }, + "@babel/helper-compilation-targets": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.15.0.tgz", + "integrity": "sha512-h+/9t0ncd4jfZ8wsdAsoIxSa61qhBYlycXiHWqJaQBCXAhDCMbPRSMTGnZIkkmt1u4ag+UQmuqcILwqKzZ4N2A==", + "dev": true, + "requires": { + "@babel/compat-data": "^7.15.0", + "@babel/helper-validator-option": "^7.14.5", + "browserslist": "^4.16.6", + "semver": "^6.3.0" + }, + "dependencies": { + "semver": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", + "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "dev": true + } + } + }, + "@babel/helper-function-name": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.14.5.tgz", + "integrity": "sha512-Gjna0AsXWfFvrAuX+VKcN/aNNWonizBj39yGwUzVDVTlMYJMK2Wp6xdpy72mfArFq5uK+NOuexfzZlzI1z9+AQ==", + "dev": true, + "requires": { + "@babel/helper-get-function-arity": "^7.14.5", + "@babel/template": "^7.14.5", + "@babel/types": "^7.14.5" + } + }, + "@babel/helper-get-function-arity": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.14.5.tgz", + "integrity": "sha512-I1Db4Shst5lewOM4V+ZKJzQ0JGGaZ6VY1jYvMghRjqs6DWgxLCIyFt30GlnKkfUeFLpJt2vzbMVEXVSXlIFYUg==", + "dev": true, + "requires": { + "@babel/types": "^7.14.5" + } + }, + "@babel/helper-hoist-variables": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.14.5.tgz", + "integrity": "sha512-R1PXiz31Uc0Vxy4OEOm07x0oSjKAdPPCh3tPivn/Eo8cvz6gveAeuyUUPB21Hoiif0uoPQSSdhIPS3352nvdyQ==", + "dev": true, + "requires": { + "@babel/types": "^7.14.5" + } + }, + "@babel/helper-member-expression-to-functions": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.15.0.tgz", + "integrity": "sha512-Jq8H8U2kYiafuj2xMTPQwkTBnEEdGKpT35lJEQsRRjnG0LW3neucsaMWLgKcwu3OHKNeYugfw+Z20BXBSEs2Lg==", + "dev": true, + "requires": { + "@babel/types": "^7.15.0" + } + }, + "@babel/helper-module-imports": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.14.5.tgz", + "integrity": "sha512-SwrNHu5QWS84XlHwGYPDtCxcA0hrSlL2yhWYLgeOc0w7ccOl2qv4s/nARI0aYZW+bSwAL5CukeXA47B/1NKcnQ==", + "dev": true, + "requires": { + "@babel/types": "^7.14.5" + } + }, + "@babel/helper-module-transforms": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.15.0.tgz", + "integrity": "sha512-RkGiW5Rer7fpXv9m1B3iHIFDZdItnO2/BLfWVW/9q7+KqQSDY5kUfQEbzdXM1MVhJGcugKV7kRrNVzNxmk7NBg==", + "dev": true, + "requires": { + "@babel/helper-module-imports": "^7.14.5", + "@babel/helper-replace-supers": "^7.15.0", + "@babel/helper-simple-access": "^7.14.8", + "@babel/helper-split-export-declaration": "^7.14.5", + "@babel/helper-validator-identifier": "^7.14.9", + "@babel/template": "^7.14.5", + "@babel/traverse": "^7.15.0", + "@babel/types": "^7.15.0" + } + }, + "@babel/helper-optimise-call-expression": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.14.5.tgz", + "integrity": "sha512-IqiLIrODUOdnPU9/F8ib1Fx2ohlgDhxnIDU7OEVi+kAbEZcyiF7BLU8W6PfvPi9LzztjS7kcbzbmL7oG8kD6VA==", + "dev": true, + "requires": { + "@babel/types": "^7.14.5" + } + }, + "@babel/helper-replace-supers": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.15.0.tgz", + "integrity": "sha512-6O+eWrhx+HEra/uJnifCwhwMd6Bp5+ZfZeJwbqUTuqkhIT6YcRhiZCOOFChRypOIe0cV46kFrRBlm+t5vHCEaA==", + "dev": true, + "requires": { + "@babel/helper-member-expression-to-functions": "^7.15.0", + "@babel/helper-optimise-call-expression": "^7.14.5", + "@babel/traverse": "^7.15.0", + "@babel/types": "^7.15.0" + } + }, + "@babel/helper-simple-access": { + "version": "7.14.8", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.14.8.tgz", + "integrity": "sha512-TrFN4RHh9gnWEU+s7JloIho2T76GPwRHhdzOWLqTrMnlas8T9O7ec+oEDNsRXndOmru9ymH9DFrEOxpzPoSbdg==", + "dev": true, + "requires": { + "@babel/types": "^7.14.8" + } + }, + "@babel/helper-split-export-declaration": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.14.5.tgz", + "integrity": "sha512-hprxVPu6e5Kdp2puZUmvOGjaLv9TCe58E/Fl6hRq4YiVQxIcNvuq6uTM2r1mT/oPskuS9CgR+I94sqAYv0NGKA==", + "dev": true, + "requires": { + "@babel/types": "^7.14.5" + } + }, "@babel/helper-validator-identifier": { + "version": "7.14.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.9.tgz", + "integrity": "sha512-pQYxPY0UP6IHISRitNe8bsijHex4TWZXi2HwKVsjPiltzlhse2znVcm9Ace510VT1kxIHjGJCZZQBX2gJDbo0g==", + "dev": true + }, + "@babel/helper-validator-option": { "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.14.5.tgz", - "integrity": "sha512-5lsetuxCLilmVGyiLEfoHBRX8UCFD+1m2x3Rj97WrW3V7H3u4RWRXA4evMjImCsin2J2YT0QaVDGf+z8ondbAg==", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.14.5.tgz", + "integrity": "sha512-OX8D5eeX4XwcroVW45NMvoYaIuFI+GQpA2a8Gi+X/U/cDUIRsV37qQfF905F0htTRCREQIB4KqPeaveRJUl3Ow==", "dev": true }, + "@babel/helpers": { + "version": "7.14.8", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.14.8.tgz", + "integrity": "sha512-ZRDmI56pnV+p1dH6d+UN6GINGz7Krps3+270qqI9UJ4wxYThfAIcI5i7j5vXC4FJ3Wap+S9qcebxeYiqn87DZw==", + "dev": true, + "requires": { + "@babel/template": "^7.14.5", + "@babel/traverse": "^7.14.8", + "@babel/types": "^7.14.8" + } + }, "@babel/highlight": { "version": "7.14.5", "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.14.5.tgz", @@ -4860,6 +4851,12 @@ "js-tokens": "^4.0.0" } }, + "@babel/parser": { + "version": "7.15.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.15.2.tgz", + "integrity": "sha512-bMJXql1Ss8lFnvr11TZDH4ArtwlAS5NG9qBmdiFW2UHHm6MVoR+GDc5XE2b9K938cyjc9O6/+vjjcffLDtfuDg==", + "dev": true + }, "@babel/runtime-corejs3": { "version": "7.14.7", "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.14.7.tgz", @@ -4870,6 +4867,72 @@ "regenerator-runtime": "^0.13.4" } }, + "@babel/template": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.14.5.tgz", + "integrity": "sha512-6Z3Po85sfxRGachLULUhOmvAaOo7xCvqGQtxINai2mEGPFm6pQ4z5QInFnUrRpfoSV60BnjyF5F3c+15fxFV1g==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.14.5", + "@babel/parser": "^7.14.5", + "@babel/types": "^7.14.5" + }, + "dependencies": { + "@babel/code-frame": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz", + "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==", + "dev": true, + "requires": { + "@babel/highlight": "^7.14.5" + } + } + } + }, + "@babel/traverse": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.15.0.tgz", + "integrity": "sha512-392d8BN0C9eVxVWd8H6x9WfipgVH5IaIoLp23334Sc1vbKKWINnvwRpb4us0xtPaCumlwbTtIYNA0Dv/32sVFw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.14.5", + "@babel/generator": "^7.15.0", + "@babel/helper-function-name": "^7.14.5", + "@babel/helper-hoist-variables": "^7.14.5", + "@babel/helper-split-export-declaration": "^7.14.5", + "@babel/parser": "^7.15.0", + "@babel/types": "^7.15.0", + "debug": "^4.1.0", + "globals": "^11.1.0" + }, + "dependencies": { + "@babel/code-frame": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.14.5.tgz", + "integrity": "sha512-9pzDqyc6OLDaqe+zbACgFkb6fKMNG6CObKpnYXChRsvYGyEdc7CA2BaqeOM+vOtCS5ndmJicPJhKAwYRI6UfFw==", + "dev": true, + "requires": { + "@babel/highlight": "^7.14.5" + } + }, + "globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true + } + } + }, + "@babel/types": { + "version": "7.15.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.15.0.tgz", + "integrity": "sha512-OBvfqnllOIdX4ojTHpwZbpvz4j3EWyjkZEdmjH0/cgsd6QOdSgU8rLSk6ard/pcW7rlmjdVSX/AWOaORR1uNOQ==", + "dev": true, + "requires": { + "@babel/helper-validator-identifier": "^7.14.9", + "to-fast-properties": "^2.0.0" + } + }, "@eslint/eslintrc": { "version": "0.4.2", "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.2.tgz", @@ -4887,6 +4950,14 @@ "strip-json-comments": "^3.1.1" } }, + "@grpc/grpc-js": { + "version": "1.3.6", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", + "dev": true, + "requires": { + "@types/node": ">=12.12.47" + } + }, "@grpc/proto-loader": { "version": "0.5.6", "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.5.6.tgz", @@ -4910,26 +4981,9 @@ }, "@humanwhocodes/object-schema": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.0.tgz", - "integrity": "sha512-wdppn25U8z/2yiaT6YGquE6X8sSv7hNMWSXYSSU1jGv/yd6XqjXgTDJ8KP4NgjTXfJ3GbRjeeb8RTV7a/VpM+w==", - "dev": true - }, - "@mapbox/node-pre-gyp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.5.tgz", - "integrity": "sha512-4srsKPXWlIxp5Vbqz5uLfBN+du2fJChBoYn/f2h991WLdk7jUvcSk/McVLSv/X+xQIPI8eGD5GjrnygdyHnhPA==", - "dev": true, - "requires": { - "detect-libc": "^1.0.3", - "https-proxy-agent": "^5.0.0", - "make-dir": "^3.1.0", - "node-fetch": "^2.6.1", - "nopt": "^5.0.0", - "npmlog": "^4.1.2", - "rimraf": "^3.0.2", - "semver": "^7.3.4", - "tar": "^6.1.0" - } + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.0.tgz", + "integrity": "sha512-wdppn25U8z/2yiaT6YGquE6X8sSv7hNMWSXYSSU1jGv/yd6XqjXgTDJ8KP4NgjTXfJ3GbRjeeb8RTV7a/VpM+w==", + "dev": true }, "@protobufjs/aspromise": { "version": "1.1.2", @@ -4995,16 +5049,6 @@ "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=", "dev": true }, - "@types/bytebuffer": { - "version": "5.0.42", - "resolved": "https://registry.npmjs.org/@types/bytebuffer/-/bytebuffer-5.0.42.tgz", - "integrity": "sha512-lEgKojWUAc/MG2t649oZS5AfYFP2xRNPoDuwDBlBMjHXd8MaGPgFgtCXUK7inZdBOygmVf10qxc1Us8GXC96aw==", - "dev": true, - "requires": { - "@types/long": "*", - "@types/node": "*" - } - }, "@types/long": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.1.tgz", @@ -5017,12 +5061,6 @@ "integrity": "sha512-OydMCocGMGqw/1BnWbhtK+AtwyWTOigtrQlRe57OQmTNcI3HKlVI5FGlh+c4mSqInMPLynFrTlYjfajPu9O/eQ==", "dev": true }, - "abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", - "dev": true - }, "acorn": { "version": "7.4.1", "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.4.1.tgz", @@ -5036,15 +5074,6 @@ "dev": true, "requires": {} }, - "agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "dev": true, - "requires": { - "debug": "4" - } - }, "ajv": { "version": "6.12.6", "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", @@ -5084,12 +5113,6 @@ "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=", "dev": true }, - "aproba": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-1.2.0.tgz", - "integrity": "sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw==", - "dev": true - }, "archiver": { "version": "5.3.0", "resolved": "https://registry.npmjs.org/archiver/-/archiver-5.3.0.tgz", @@ -5137,33 +5160,6 @@ } } }, - "are-we-there-yet": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz", - "integrity": "sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w==", - "dev": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^2.0.6" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - } - } - }, "argparse": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", @@ -5197,16 +5193,6 @@ "es-abstract": "^1.18.0-next.1" } }, - "ascli": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ascli/-/ascli-1.0.1.tgz", - "integrity": "sha1-vPpZdKYvGOgcq660lzKrSoj5Brw=", - "dev": true, - "requires": { - "colour": "~0.7.1", - "optjs": "~3.2.2" - } - }, "assert-plus": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", @@ -5304,6 +5290,19 @@ "concat-map": "0.0.1" } }, + "browserslist": { + "version": "4.16.7", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.16.7.tgz", + "integrity": "sha512-7I4qVwqZltJ7j37wObBe3SoTz+nS8APaNcrBOlgoirb6/HbEU2XxW/LpUDTCngM6iauwFqmRTuOMfyKnFGY5JA==", + "dev": true, + "requires": { + "caniuse-lite": "^1.0.30001248", + "colorette": "^1.2.2", + "electron-to-chromium": "^1.3.793", + "escalade": "^3.1.1", + "node-releases": "^1.1.73" + } + }, "buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", @@ -5318,23 +5317,6 @@ "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=" }, - "bytebuffer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/bytebuffer/-/bytebuffer-5.0.1.tgz", - "integrity": "sha1-WC7qSxqHO20CCkjVjfhfC7ps/d0=", - "dev": true, - "requires": { - "long": "~3" - }, - "dependencies": { - "long": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/long/-/long-3.2.0.tgz", - "integrity": "sha1-2CG3E4yhy1gcFymQ7xTbIAtcR0s=", - "dev": true - } - } - }, "call-bind": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", @@ -5351,10 +5333,10 @@ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true }, - "camelcase": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-2.1.1.tgz", - "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=", + "caniuse-lite": { + "version": "1.0.30001249", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001249.tgz", + "integrity": "sha512-vcX4U8lwVXPdqzPWi6cAJ3FnQaqXbBqy/GZseKNQzRj37J7qZdGcBtxq/QLFNLLlfsoXLUdHw8Iwenri86Tagw==", "dev": true }, "chai": { @@ -5396,12 +5378,6 @@ "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", "dev": true }, - "chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", - "dev": true - }, "cli-table3": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.5.1.tgz", @@ -5413,46 +5389,6 @@ "string-width": "^2.1.1" } }, - "cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, - "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, "clone-deep": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz", @@ -5463,12 +5399,6 @@ "shallow-clone": "^3.0.0" } }, - "code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "dev": true - }, "color-convert": { "version": "1.9.3", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", @@ -5484,18 +5414,18 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "dev": true }, + "colorette": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.2.2.tgz", + "integrity": "sha512-MKGMzyfeuutC/ZJ1cba9NqcNpfeqMUcYmyF1ZFY6/Cn7CNSAKx6a+s48sqLqyAiZuaP2TcqMhoo+dlwFnVxT9w==", + "dev": true + }, "colors": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", "dev": true }, - "colour": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/colour/-/colour-0.7.1.tgz", - "integrity": "sha1-nLFpkX7F0SwHNtPoaFdG3xyt93g=", - "dev": true - }, "commander": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/commander/-/commander-3.0.2.tgz", @@ -5518,11 +5448,14 @@ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" }, - "console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4=", - "dev": true + "convert-source-map": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz", + "integrity": "sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA==", + "dev": true, + "requires": { + "safe-buffer": "~5.1.1" + } }, "core-js-pure": { "version": "3.15.2", @@ -5684,12 +5617,6 @@ "ms": "2.1.2" } }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true - }, "deep-eql": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", @@ -5714,18 +5641,6 @@ "object-keys": "^1.0.12" } }, - "delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=", - "dev": true - }, - "detect-libc": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-1.0.3.tgz", - "integrity": "sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=", - "dev": true - }, "diff": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", @@ -5751,10 +5666,17 @@ "es5-ext": "~0.10.46" } }, + "electron-to-chromium": { + "version": "1.3.801", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.801.tgz", + "integrity": "sha512-xapG8ekC+IAHtJrGBMQSImNuN+dm+zl7UP1YbhvTkwQn8zf/yYuoxfTSAEiJ9VDD+kjvXaAhNDPSxJ+VImtAJA==", + "dev": true + }, "emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true }, "end-of-stream": { "version": "1.4.4", @@ -5860,8 +5782,8 @@ }, "escalade": { "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true }, "escape-string-regexp": { "version": "2.0.0", @@ -6153,6 +6075,12 @@ "integrity": "sha512-bY2sGqyptzFBDLh/GMbAxfdJC+b0f23ME63FOE4+Jao0oZ3E1LEwFtWJX/1pGMJLiTtrSSern2CRM/g+dfc0eQ==", "dev": true }, + "eslint-rule-composer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/eslint-rule-composer/-/eslint-rule-composer-0.3.0.tgz", + "integrity": "sha512-bt+Sh8CtDmn2OajxvNO+BX7Wn4CIWMpTRm3MaiKPCQcnnlm0CS2mhui6QaoeQugs+3Kj2ESKEEGJUdVafwhiCg==", + "dev": true + }, "eslint-scope": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", @@ -6309,6 +6237,28 @@ "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", "dev": true }, + "ffi-napi": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/ffi-napi/-/ffi-napi-4.0.3.tgz", + "integrity": "sha512-PMdLCIvDY9mS32RxZ0XGb95sonPRal8aqRhLbeEtWKZTe2A87qRFG9HjOhvG8EX2UmQw5XNRMIOT+1MYlWmdeg==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "get-uv-event-loop-napi-h": "^1.0.5", + "node-addon-api": "^3.0.0", + "node-gyp-build": "^4.2.1", + "ref-napi": "^2.0.1 || ^3.0.2", + "ref-struct-di": "^1.1.0" + }, + "dependencies": { + "node-addon-api": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", + "integrity": "sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A==", + "dev": true + } + } + }, "figures": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", @@ -6406,15 +6356,6 @@ } } }, - "fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", - "dev": true, - "requires": { - "minipass": "^3.0.0" - } - }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -6432,63 +6373,11 @@ "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=", "dev": true }, - "gauge": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-2.7.4.tgz", - "integrity": "sha1-LANAXHU4w51+s3sxcCLjJfsBi/c=", - "dev": true, - "requires": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - } - } - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==" + "gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true }, "get-func-name": { "version": "2.0.0", @@ -6507,161 +6396,71 @@ "has-symbols": "^1.0.1" } }, - "gherkin": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/gherkin/-/gherkin-5.0.0.tgz", - "integrity": "sha1-lt70EZjsOQgli1Ea909lWidk0qE=", + "get-symbol-from-current-process-h": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-symbol-from-current-process-h/-/get-symbol-from-current-process-h-1.0.2.tgz", + "integrity": "sha512-syloC6fsCt62ELLrr1VKBM1ggOpMdetX9hTrdW77UQdcApPHLmf7CI7OKcN1c9kYuNxKcDe4iJ4FY9sX3aw2xw==", "dev": true }, - "glob": { - "version": "7.1.7", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", - "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "requires": { - "is-glob": "^4.0.1" - } - }, - "globals": { - "version": "13.10.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.10.0.tgz", - "integrity": "sha512-piHC3blgLGFjvOuMmWZX60f+na1lXFDhQXBf1UYp2fXPXqvEUbOhNwi6BsQ0bQishwedgnjkwv1d9zKf+MWw3g==", + "get-uv-event-loop-napi-h": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/get-uv-event-loop-napi-h/-/get-uv-event-loop-napi-h-1.0.6.tgz", + "integrity": "sha512-t5c9VNR84nRoF+eLiz6wFrEp1SE2Acg0wS+Ysa2zF0eROes+LzOfuTaVHxGy8AbS8rq7FHEJzjnCZo1BupwdJg==", "dev": true, "requires": { - "type-fest": "^0.20.2" - }, - "dependencies": { - "type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true - } + "get-symbol-from-current-process-h": "^1.0.1" } }, - "graceful-fs": { - "version": "4.2.6", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", - "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==" + "gherkin": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/gherkin/-/gherkin-5.0.0.tgz", + "integrity": "sha1-lt70EZjsOQgli1Ea909lWidk0qE=", + "dev": true }, - "grpc": { - "version": "1.24.10", - "resolved": "https://registry.npmjs.org/grpc/-/grpc-1.24.10.tgz", - "integrity": "sha512-mTR+P5IL3WO3oCgNwxKFE5ksXEJfCYP+dk0aIbjB494f7OnHTmssU5r9vznsSq3+cdLcxAzGFskOj5CaPwi8KA==", - "dev": true, - "requires": { - "@mapbox/node-pre-gyp": "^1.0.4", - "@types/bytebuffer": "^5.0.40", - "lodash.camelcase": "^4.3.0", - "lodash.clone": "^4.5.0", - "nan": "^2.13.2", - "protobufjs": "^5.0.3" - }, - "dependencies": { - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "cliui": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-3.2.0.tgz", - "integrity": "sha1-EgYBU3qRbSmUD5NNo7SNWFo5IT0=", - "dev": true, - "requires": { - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wrap-ansi": "^2.0.0" - } - }, - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "protobufjs": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-5.0.3.tgz", - "integrity": "sha512-55Kcx1MhPZX0zTbVosMQEO5R6/rikNXd9b6RQK4KSPcrSIIwoXTtebIczUrXlwaSrbz4x8XUVThGPob1n8I4QA==", - "dev": true, - "requires": { - "ascli": "~1", - "bytebuffer": "~5", - "glob": "^7.0.5", - "yargs": "^3.10.0" - } - }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "wrap-ansi": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-2.1.0.tgz", - "integrity": "sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU=", - "dev": true, - "requires": { - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1" - } - }, - "y18n": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-3.2.2.tgz", - "integrity": "sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==", + "glob": { + "version": "7.1.7", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", + "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "requires": { + "is-glob": "^4.0.1" + } + }, + "globals": { + "version": "13.10.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.10.0.tgz", + "integrity": "sha512-piHC3blgLGFjvOuMmWZX60f+na1lXFDhQXBf1UYp2fXPXqvEUbOhNwi6BsQ0bQishwedgnjkwv1d9zKf+MWw3g==", + "dev": true, + "requires": { + "type-fest": "^0.20.2" + }, + "dependencies": { + "type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true - }, - "yargs": { - "version": "3.32.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.32.0.tgz", - "integrity": "sha1-AwiOnr+edWtpdRYR0qXvWRSCyZU=", - "dev": true, - "requires": { - "camelcase": "^2.0.1", - "cliui": "^3.0.3", - "decamelize": "^1.1.1", - "os-locale": "^1.4.0", - "string-width": "^1.0.1", - "window-size": "^0.1.4", - "y18n": "^3.2.0" - } } } }, + "graceful-fs": { + "version": "4.2.6", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", + "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==" + }, "grpc-promise": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/grpc-promise/-/grpc-promise-1.4.0.tgz", @@ -6695,12 +6494,6 @@ "integrity": "sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw==", "dev": true }, - "has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk=", - "dev": true - }, "hex64": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/hex64/-/hex64-0.4.0.tgz", @@ -6712,16 +6505,6 @@ "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", "dev": true }, - "https-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz", - "integrity": "sha512-EkYm5BcKUGiduxzSt3Eppko+PiNWNEpa4ySk9vTC6wDsQJW9rHSa+UhGNJoRYp7bz6Ht1eaRIa6QaJqO5rCFbA==", - "dev": true, - "requires": { - "agent-base": "6", - "debug": "4" - } - }, "husky": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/husky/-/husky-6.0.0.tgz", @@ -6780,12 +6563,6 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, - "invert-kv": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/invert-kv/-/invert-kv-1.0.0.tgz", - "integrity": "sha1-EEqOSqym09jNFXqO+L+rLXo//bY=", - "dev": true - }, "is-arrayish": { "version": "0.2.1", "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", @@ -6950,6 +6727,12 @@ "esprima": "^4.0.0" } }, + "jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true + }, "json-parse-better-errors": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", @@ -6988,9 +6771,9 @@ } }, "jszip": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.6.0.tgz", - "integrity": "sha512-jgnQoG9LKnWO3mnVNBnfhkh0QknICd1FGSrXcgrl67zioyJ4wgx25o9ZqwNtrROSflGBCGYnJfjrIyRIby1OoQ==", + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.7.0.tgz", + "integrity": "sha512-Y2OlFIzrDOPWUnpU0LORIcDn2xN7rC9yKffFM/7pGhQuhO+SUhfm2trkJ/S5amjFvem0Y+1EALz/MEPkvHXVNw==", "requires": { "lie": "~3.3.0", "pako": "~1.0.2", @@ -7052,15 +6835,6 @@ } } }, - "lcid": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz", - "integrity": "sha1-MIrMr6C8SDo4Z7S28rlQYlHRuDU=", - "dev": true, - "requires": { - "invert-kv": "^1.0.0" - } - }, "levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -7113,12 +6887,6 @@ "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=", "dev": true }, - "lodash.clone": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz", - "integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=", - "dev": true - }, "lodash.clonedeep": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", @@ -7189,23 +6957,6 @@ "yallist": "^4.0.0" } }, - "make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dev": true, - "requires": { - "semver": "^6.0.0" - }, - "dependencies": { - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - } - } - }, "minimatch": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", @@ -7219,31 +6970,6 @@ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" }, - "minipass": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.3.tgz", - "integrity": "sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dev": true, - "requires": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - } - }, - "mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "dev": true - }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -7261,12 +6987,6 @@ "thenify-all": "^1.0.0" } }, - "nan": { - "version": "2.14.2", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.14.2.tgz", - "integrity": "sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ==", - "dev": true - }, "natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", @@ -7302,20 +7022,17 @@ "lodash.toarray": "^4.4.0" } }, - "node-fetch": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", - "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==", + "node-gyp-build": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.2.3.tgz", + "integrity": "sha512-MN6ZpzmfNCRM+3t57PTJHgHyw/h4OWnZ6mR8P5j/uZtqQr46RRuDE/P+g3n0YR/AiYXeWixZZzaip77gdICfRg==", "dev": true }, - "nopt": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", - "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", - "dev": true, - "requires": { - "abbrev": "1" - } + "node-releases": { + "version": "1.1.73", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.73.tgz", + "integrity": "sha512-uW7fodD6pyW2FZNZnp/Z3hvWKeEW1Y8R1+1CnErE8cXFXzl5blBOoVB41CvMer6P6Q0S5FXDwcHgFd1Wj0U9zg==", + "dev": true }, "normalize-package-data": { "version": "2.5.0", @@ -7342,24 +7059,6 @@ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==" }, - "npmlog": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-4.1.2.tgz", - "integrity": "sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==", - "dev": true, - "requires": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" - } - }, - "number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "dev": true - }, "object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -7432,21 +7131,6 @@ "word-wrap": "^1.2.3" } }, - "optjs": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/optjs/-/optjs-3.2.2.tgz", - "integrity": "sha1-aabOicRCpEQDFBrS+bNwvVu29O4=", - "dev": true - }, - "os-locale": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/os-locale/-/os-locale-1.4.0.tgz", - "integrity": "sha1-IPnxeuKe00XoveWDsT0gCYA8FNk=", - "dev": true, - "requires": { - "lcid": "^1.0.0" - } - }, "p-limit": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", @@ -7669,6 +7353,46 @@ "minimatch": "^3.0.4" } }, + "ref-napi": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/ref-napi/-/ref-napi-3.0.3.tgz", + "integrity": "sha512-LiMq/XDGcgodTYOMppikEtJelWsKQERbLQsYm0IOOnzhwE9xYZC7x8txNnFC9wJNOkPferQI4vD4ZkC0mDyrOA==", + "dev": true, + "requires": { + "debug": "^4.1.1", + "get-symbol-from-current-process-h": "^1.0.2", + "node-addon-api": "^3.0.0", + "node-gyp-build": "^4.2.1" + }, + "dependencies": { + "node-addon-api": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-3.2.1.tgz", + "integrity": "sha512-mmcei9JghVNDYydghQmeDX8KoAm0FAiYyIcUt/N4nhyAipB17pllZQDOJD2fotxABnt4Mdz+dKTO7eftLg4d0A==", + "dev": true + } + } + }, + "ref-struct-di": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ref-struct-di/-/ref-struct-di-1.1.1.tgz", + "integrity": "sha512-2Xyn/0Qgz89VT+++WP0sTosdm9oeowLP23wRJYhG4BFdMUrLj3jhwHZNEytYNYgtPKLNTP3KJX4HEgBvM1/Y2g==", + "dev": true, + "requires": { + "debug": "^3.1.0" + }, + "dependencies": { + "debug": { + "version": "3.2.7", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", + "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", + "dev": true, + "requires": { + "ms": "^2.1.1" + } + } + } + }, "regenerator-runtime": { "version": "0.13.7", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz", @@ -7687,11 +7411,6 @@ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", "dev": true }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=" - }, "require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", @@ -7752,12 +7471,6 @@ "type-fest": "^0.3.0" } }, - "set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", - "dev": true - }, "set-immediate-shim": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz", @@ -7805,12 +7518,6 @@ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", "dev": true }, - "signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", - "dev": true - }, "slice-ansi": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", @@ -8088,20 +7795,6 @@ } } }, - "tar": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.0.tgz", - "integrity": "sha512-DUCttfhsnLCjwoDoFcI+B2iJgYa93vBnDUATYEeRx6sntCTdN01VnqsIuTlALXla/LWooNg0yEGeB+Y8WdFxGA==", - "dev": true, - "requires": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - } - }, "tar-stream": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", @@ -8153,6 +7846,12 @@ "upper-case": "^1.0.3" } }, + "to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=", + "dev": true + }, "traverse-chain": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/traverse-chain/-/traverse-chain-0.1.0.tgz", @@ -8230,6 +7929,11 @@ "punycode": "^2.1.0" } }, + "utf8": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/utf8/-/utf8-3.0.0.tgz", + "integrity": "sha512-E8VjFIQ/TyQgp+TZfS6l8yp/xWppSAHzidGiRrqe4bK4XP9pTRyKFgGJpO3SN7zdX4DeomTrwaseCHovfpFcqQ==" + }, "util-arity": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/util-arity/-/util-arity-1.1.0.tgz", @@ -8277,15 +7981,14 @@ "wallet-grpc-client": { "version": "file:../clients/wallet_grpc_client", "requires": { - "@grpc/grpc-js": "^1.2.3", + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.5.5", "grpc-promise": "^1.4.0" }, "dependencies": { "@grpc/grpc-js": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.3.4.tgz", - "integrity": "sha512-AxtZcm0mArQhY9z8T3TynCYVEaSKxNCa9mVhVwBCUnsuUEe8Zn94bPYYKVQSLt+hJJ1y0ukr3mUvtWfcATL/IQ==", + "version": "1.3.6", + "integrity": "sha512-v7+LQFbqZKmd/Tvf5/j1Xlbq6jXL/4d+gUtm2TNX4QiEC3ELWADmGr2dGlUyLl6aKTuYfsN72vAsO5zmavYkEg==", "requires": { "@types/node": ">=12.12.47" } @@ -8359,9 +8062,8 @@ "integrity": "sha512-5tXH6Bx/kNGd3MgffdmP4dy2Z+G4eaXw0SE81Tq3BNadtnMR5/ySMzX4SLEzHJzSmPNn4HIdpQsBvXMUykr58w==" }, "@types/node": { - "version": "16.3.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.3.0.tgz", - "integrity": "sha512-OydMCocGMGqw/1BnWbhtK+AtwyWTOigtrQlRe57OQmTNcI3HKlVI5FGlh+c4mSqInMPLynFrTlYjfajPu9O/eQ==" + "version": "16.3.2", + "integrity": "sha512-jJs9ErFLP403I+hMLGnqDRWT0RYKSvArxuBVh2veudHV7ifEC1WAmjJADacZ7mRbA2nWgHtn8xyECMAot0SkAw==" }, "grpc-promise": { "version": "1.4.0", @@ -8422,88 +8124,12 @@ "is-symbol": "^1.0.3" } }, - "wide-align": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.3.tgz", - "integrity": "sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA==", - "dev": true, - "requires": { - "string-width": "^1.0.2 || 2" - } - }, - "window-size": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.4.tgz", - "integrity": "sha1-+OGqHuWlPsW/FR/6CXQqatdpeHY=", - "dev": true - }, "word-wrap": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", "dev": true }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, - "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, "wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -8518,66 +8144,12 @@ "@babel/runtime-corejs3": "^7.12.1" } }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==" - }, "yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", "dev": true }, - "yargs": { - "version": "17.0.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz", - "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==", - "requires": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==" - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==" - }, - "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, - "yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==" - }, "zip-stream": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/zip-stream/-/zip-stream-4.1.0.tgz", diff --git a/integration_tests/package.json b/integration_tests/package.json index 31d45481a7..9437b3061f 100644 --- a/integration_tests/package.json +++ b/integration_tests/package.json @@ -14,6 +14,10 @@ "author": "The Tari Project", "license": "ISC", "devDependencies": { + "@babel/core": "^7.15.0", + "@babel/eslint-parser": "^7.15.0", + "@babel/eslint-plugin": "^7.14.5", + "@grpc/grpc-js": "^1.3.6", "@grpc/proto-loader": "^0.5.5", "blakejs": "^1.1.0", "chai": "^4.2.0", @@ -27,10 +31,11 @@ "eslint-plugin-node": "^11.1.0", "eslint-plugin-prettier": "^3.4.0", "eslint-plugin-promise": "^4.3.1", - "grpc": "^1.24.3", + "ffi-napi": "^4.0.3", "grpc-promise": "^1.4.0", "husky": "^6.0.0", - "prettier": "^2.2.1" + "prettier": "^2.2.1", + "ref-napi": "^3.0.3" }, "dependencies": { "archiver": "^5.3.0", @@ -40,11 +45,11 @@ "dateformat": "^3.0.3", "glob": "^7.1.7", "hex64": "^0.4.0", - "jszip": "^3.6.0", + "jszip": "^3.7.0", "sha3": "^2.1.3", "synchronized-promise": "^0.3.1", "tari_crypto": "^0.9.1", - "wallet-grpc-client": "file:../clients/wallet_grpc_client", - "yargs": "^17.0.1" + "utf8": "^3.0.0", + "wallet-grpc-client": "file:../clients/wallet_grpc_client" } }