From 1a919d4820c8ff2a22b9ed829f2aff587f4be179 Mon Sep 17 00:00:00 2001 From: Lucas B Date: Thu, 25 Aug 2022 17:18:46 -0500 Subject: [PATCH] jito patch only reroute if relayer connected (#123) feat: add client tls config (#121) remove extra val (#129) fix clippy (#130) copy all binaries to docker-output (#131) Ledger tool halts at slot passed to create-snapshot (#118) update program submodule (#133) quick fix for tips and clearing old bundles (#135) update submodule to new program (#136) Improve stake-meta-generator usability (#134) pinning submodule head (#140) Use BundleAccountLocker when handling tip txs (#147) Add metrics for relayer + block engine proxy (#149) Build claim-mev in docker (#141) Rework bundle receiving and add metrics (#152) (#154) update submodule + dev files (#158) Deterministically find tip amounts, add meta to stake info, and cleanup pubkey/strings in MEV tips (#159) update jito-programs submodule (#160) Separate MEV tip related workflow (#161) Add block builder fee protos (#162) fix jito programs (#163) update submodule so autosnapshot exits out of ledger tool early (#164) Pipe through block builder fee (#167) pull in new snapshot code (#171) block builder bug (#172) update submodule add accountsdb conn submod (#170) new submodules (#180) (#181) Fixed broken links to repositories (#184) (#186) Changed from ssh to https transfer for clone Co-authored-by: Tomas Eminger <86430113+Tomas-Eminger@users.noreply.github.com> Seg/update submods (#187) (#189) * Seg/update submods (#187) * fix tests (#190) * rm geyser submod (#192) * rm dangling geyser references * fix syntax err * use deterministic req ids in batch calls Seg/v1.14 backport (#202) * update submod * added arg --rpc-max-request-payload-size to validator (#26377) * added ability to pass --rpc-max-request-payload-size to validator * fixed lint errors * more lint fix * patch Co-authored-by: ultd Co-authored-by: Justin Starry Co-authored-by: Ahmad <12675427+ultd@users.noreply.github.com> Co-authored-by: Justin Starry use efficient data structures when calling batch_simulate_bundles (#206) (#207) add config to simulate on top of working bank (#213) rm frozen bank check simulate_bundle rpc bugfixes (#214) (#215) rm frozen bank check in simulate_bundle rpc method bump versions, rm dead code, and build [JIT-519] Store ClaimStatus address in merkle-root-json (#210) * add files * switch to include bump update autosnapshot (#222) Print TX error in Bundles (#223) add new args to support single relayer and block-engine endpoints (#224) point to new jito-programs submod and invoke updated init tda instruction (#228) update lockfiles, address clippy fix validator start scripts (#232) update lockfile Backport #247 on v1.14 (#249) rebase issues use updated anchor submodule Backport #225 on v1.14 (#256) * buffer bundles that exceed cost model Seg/v1.14 (#259) * bump jito-programs submod * update Cargo lock clear qos failed bundles buffer if not leader soon (#260) Backport #240 on 1.14 (#261) Add missing rustfmt install for rust-version toolchain (#263) fix shellcheck and cargo lock update jito-programs submodule fix simulate_bundle client and better error handling (#267) (#269) backport Dockerfile (#276) Backport #238 onto v1.14 (#264) * Preallocate Bundle Cost (#238) update Cargo lock fix lockfile update jito-programs submod add reclaim rent workflow (#283) downgrade clap fix clippy errs bump to 1.14.18 Fix CVE-2023-26964 (#296) Backport #299 onto v1.14 (#300) Backport #290 to v1.14 (#302) Backport #304 onto v1.14 (#307) Jl/backport 306 v1.14 (#308) Backport #309 (#312) Co-authored-by: segfaultdoctor <17258903+segfaultdoc@users.noreply.github.com> Backport tip distributor scripts updates from master (#318) --ignore RUSTSEC-2022-0093 --- .dockerignore | 1 + .github/dependabot.yml | 41 - .github/workflows/client-targets.yml | 6 +- .gitignore | 5 +- .gitmodules | 9 + Cargo.lock | 770 ++++-- Cargo.toml | 4 + README.md | 12 +- anchor | 1 + banking-bench/src/main.rs | 15 +- banks-server/Cargo.toml | 4 + banks-server/src/banks_server.rs | 5 +- banks-server/src/rpc_banks_service.rs | 33 +- bootstrap | 21 + ci/buildkite-pipeline-in-disk.sh | 36 +- ci/buildkite-pipeline.sh | 36 +- ci/buildkite-solana-private.sh | 31 +- ci/channel-info.sh | 2 +- ci/do-audit.sh | 3 + ci/docker-rust/Dockerfile | 1 + ci/rust-version.sh | 1 + ci/test-stable.sh | 2 +- client/src/http_sender.rs | 210 +- client/src/mock_sender.rs | 7 + client/src/nonblocking/rpc_client.rs | 127 +- client/src/rpc_client.rs | 30 + client/src/rpc_config.rs | 52 +- client/src/rpc_request.rs | 3 + client/src/rpc_response.rs | 49 +- client/src/rpc_sender.rs | 4 + core/Cargo.toml | 15 + core/benches/banking_stage.rs | 17 +- core/benches/cluster_info.rs | 1 + core/benches/proto_to_packet.rs | 56 + core/benches/retransmit_stage.rs | 1 + core/src/admin_rpc_post_init.rs | 9 +- core/src/banking_stage.rs | 196 +- core/src/broadcast_stage.rs | 55 +- .../broadcast_duplicates_run.rs | 1 + .../broadcast_fake_shreds_run.rs | 1 + core/src/broadcast_stage/broadcast_utils.rs | 60 +- .../fail_entry_verification_broadcast_run.rs | 4 +- .../broadcast_stage/standard_broadcast_run.rs | 30 +- core/src/bundle_account_locker.rs | 333 +++ core/src/bundle_sanitizer.rs | 617 +++++ core/src/bundle_stage.rs | 2145 +++++++++++++++++ core/src/bundle_stage_leader_stats.rs | 326 +++ core/src/consensus_cache_updater.rs | 52 + core/src/lib.rs | 47 + core/src/packet_bundle.rs | 7 + core/src/proxy/auth.rs | 185 ++ core/src/proxy/block_engine_stage.rs | 561 +++++ core/src/proxy/fetch_stage_manager.rs | 161 ++ core/src/proxy/mod.rs | 100 + core/src/proxy/relayer_stage.rs | 524 ++++ core/src/qos_service.rs | 44 +- core/src/retransmit_stage.rs | 17 +- core/src/tip_manager.rs | 472 ++++ core/src/tpu.rs | 108 +- core/src/tvu.rs | 5 +- core/src/validator.rs | 32 +- core/tests/snapshots.rs | 2 + deploy_programs | 17 + dev/Dockerfile | 48 + entry/src/entry.rs | 2 +- entry/src/poh.rs | 29 +- f | 30 + gossip/src/cluster_info.rs | 4 + jito-programs | 1 + jito-protos/Cargo.toml | 14 + jito-protos/build.rs | 17 + jito-protos/protos | 1 + jito-protos/src/lib.rs | 25 + ledger-tool/src/main.rs | 27 +- ledger/src/bank_forks_utils.rs | 4 +- ledger/src/blockstore_processor.rs | 4 +- ledger/src/token_balances.rs | 58 +- .../src/local_cluster_snapshot_utils.rs | 6 +- local-cluster/src/validator_configs.rs | 5 + local-cluster/tests/local_cluster.rs | 12 +- merkle-tree/src/merkle_tree.rs | 46 +- multinode-demo/bootstrap-validator.sh | 40 + multinode-demo/validator.sh | 34 + perf/src/sigverify.rs | 2 +- poh/src/poh_recorder.rs | 126 +- poh/src/poh_service.rs | 34 +- programs/bpf/Cargo.lock | 441 +++- programs/bpf/tests/programs.rs | 4 +- rpc/src/rpc.rs | 436 +++- rpc/src/rpc_service.rs | 6 +- runtime/src/accounts.rs | 96 +- runtime/src/bank.rs | 1146 ++++++++- runtime/src/builtins.rs | 2 +- runtime/src/cost_tracker.rs | 8 + runtime/src/snapshot_utils.rs | 30 +- runtime/src/stake_account.rs | 4 +- runtime/src/stakes.rs | 12 +- rustfmt.toml | 5 + s | 15 + scripts/coverage.sh | 17 +- scripts/increment-cargo-version.sh | 2 + scripts/run.sh | 4 + sdk/Cargo.toml | 1 + sdk/src/bundle/error.rs | 51 + sdk/src/bundle/mod.rs | 12 + sdk/src/bundle/sanitized.rs | 20 + sdk/src/bundle/utils.rs | 20 + sdk/src/lib.rs | 1 + sdk/src/transaction/error.rs | 8 + send-transaction-service/Cargo.toml | 2 + .../src/send_transaction_service.rs | 51 +- start | 9 + start_multi | 29 + storage-bigtable/Cargo.toml | 4 +- storage-proto/proto/transaction_by_addr.proto | 2 + storage-proto/src/convert.rs | 8 + tip-distributor/Cargo.toml | 51 + tip-distributor/README.md | 43 + tip-distributor/src/bin/claim-mev-tips.rs | 52 + .../src/bin/merkle-root-generator.rs | 34 + .../src/bin/merkle-root-uploader.rs | 50 + tip-distributor/src/bin/reclaim-rent.rs | 62 + .../src/bin/stake-meta-generator.rs | 67 + tip-distributor/src/claim_mev_workflow.rs | 151 ++ tip-distributor/src/lib.rs | 890 +++++++ .../src/merkle_root_generator_workflow.rs | 54 + .../src/merkle_root_upload_workflow.rs | 134 + tip-distributor/src/reclaim_rent_workflow.rs | 168 ++ .../src/stake_meta_generator_workflow.rs | 951 ++++++++ validator/Cargo.toml | 1 + validator/src/admin_rpc_service.rs | 115 +- validator/src/bootstrap.rs | 8 +- validator/src/dashboard.rs | 1 + validator/src/main.rs | 431 +++- 134 files changed, 13297 insertions(+), 703 deletions(-) create mode 100644 .dockerignore delete mode 100644 .github/dependabot.yml create mode 100644 .gitmodules create mode 160000 anchor create mode 100755 bootstrap create mode 100644 core/benches/proto_to_packet.rs create mode 100644 core/src/bundle_account_locker.rs create mode 100644 core/src/bundle_sanitizer.rs create mode 100644 core/src/bundle_stage.rs create mode 100644 core/src/bundle_stage_leader_stats.rs create mode 100644 core/src/consensus_cache_updater.rs create mode 100644 core/src/packet_bundle.rs create mode 100644 core/src/proxy/auth.rs create mode 100644 core/src/proxy/block_engine_stage.rs create mode 100644 core/src/proxy/fetch_stage_manager.rs create mode 100644 core/src/proxy/mod.rs create mode 100644 core/src/proxy/relayer_stage.rs create mode 100644 core/src/tip_manager.rs create mode 100755 deploy_programs create mode 100644 dev/Dockerfile create mode 100755 f create mode 160000 jito-programs create mode 100644 jito-protos/Cargo.toml create mode 100644 jito-protos/build.rs create mode 160000 jito-protos/protos create mode 100644 jito-protos/src/lib.rs create mode 100755 s create mode 100644 sdk/src/bundle/error.rs create mode 100644 sdk/src/bundle/mod.rs create mode 100644 sdk/src/bundle/sanitized.rs create mode 100644 sdk/src/bundle/utils.rs create mode 100755 start create mode 100755 start_multi create mode 100644 tip-distributor/Cargo.toml create mode 100644 tip-distributor/README.md create mode 100644 tip-distributor/src/bin/claim-mev-tips.rs create mode 100644 tip-distributor/src/bin/merkle-root-generator.rs create mode 100644 tip-distributor/src/bin/merkle-root-uploader.rs create mode 100644 tip-distributor/src/bin/reclaim-rent.rs create mode 100644 tip-distributor/src/bin/stake-meta-generator.rs create mode 100644 tip-distributor/src/claim_mev_workflow.rs create mode 100644 tip-distributor/src/lib.rs create mode 100644 tip-distributor/src/merkle_root_generator_workflow.rs create mode 100644 tip-distributor/src/merkle_root_upload_workflow.rs create mode 100644 tip-distributor/src/reclaim_rent_workflow.rs create mode 100644 tip-distributor/src/stake_meta_generator_workflow.rs diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..2f7896d1d1 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +target/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index c2fc36a3e6..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,41 +0,0 @@ -# To get started with Dependabot version updates, you'll need to specify which -# package ecosystems to update and where the package manifests are located. -# Please see the documentation for all configuration options: -# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates - -version: 2 -updates: -- package-ecosystem: cargo - directory: "/" - schedule: - interval: daily - time: "01:00" - timezone: America/Los_Angeles - #labels: - # - "automerge" - open-pull-requests-limit: 3 - -- package-ecosystem: npm - directory: "/web3.js" - schedule: - interval: daily - time: "01:00" - timezone: America/Los_Angeles - labels: - - "automerge" - commit-message: - prefix: "chore:" - open-pull-requests-limit: 3 - -- package-ecosystem: npm - directory: "/explorer" - schedule: - interval: daily - time: "01:00" - timezone: America/Los_Angeles - labels: - - "automerge" - commit-message: - prefix: "chore:" - include: "scope" - open-pull-requests-limit: 3 diff --git a/.github/workflows/client-targets.yml b/.github/workflows/client-targets.yml index c5c8532461..8e4448ca39 100644 --- a/.github/workflows/client-targets.yml +++ b/.github/workflows/client-targets.yml @@ -45,8 +45,10 @@ jobs: platform: android os: ubuntu-latest steps: - - name: Checkout code - uses: actions/checkout@v2 + - uses: actions/checkout@v3 + with: + submodules: recursive + ssh-key: ${{ secrets.DEPLOYER_SSH_KEY }} - uses: actions-rs/toolchain@v1 with: toolchain: stable diff --git a/.gitignore b/.gitignore index 124358b46f..92281a9a08 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,7 @@ /solana-release.tar.bz2 /solana-metrics/ /solana-metrics.tar.bz2 -/target/ +**/target/ /test-ledger/ **/*.rs.bk @@ -30,3 +30,6 @@ log-*/ .DS_Store # scripts that may be generated by cargo *-bpf commands **/cargo-*-bpf-child-script-*.sh + +.env +docker-output/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..e31fc7fccd --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "anchor"] + path = anchor + url = https://github.com/jito-foundation/anchor.git +[submodule "jito-programs"] + path = jito-programs + url = https://github.com/jito-foundation/jito-programs.git +[submodule "jito-protos/protos"] + path = jito-protos/protos + url = https://github.com/jito-labs/mev-protos.git diff --git a/Cargo.lock b/Cargo.lock index 720fc934a2..5cb41cf8a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,6 +95,145 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "anchor-attribute-access-control" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.66", + "quote 1.0.31", + "regex", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-account" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "bs58 0.4.0", + "proc-macro2 1.0.66", + "quote 1.0.31", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-constant" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.66", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-error" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-event" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-interface" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "heck 0.3.3", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-program" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-state" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", +] + +[[package]] +name = "anchor-derive-accounts" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", +] + +[[package]] +name = "anchor-lang" +version = "0.24.2" +dependencies = [ + "anchor-attribute-access-control", + "anchor-attribute-account", + "anchor-attribute-constant", + "anchor-attribute-error", + "anchor-attribute-event", + "anchor-attribute-interface", + "anchor-attribute-program", + "anchor-attribute-state", + "anchor-derive-accounts", + "arrayref", + "base64 0.13.0", + "bincode", + "borsh", + "bytemuck", + "solana-program 1.14.24", + "thiserror", +] + +[[package]] +name = "anchor-syn" +version = "0.24.2" +dependencies = [ + "anyhow", + "bs58 0.3.1", + "heck 0.3.3", + "proc-macro2 1.0.66", + "proc-macro2-diagnostics", + "quote 1.0.31", + "serde", + "serde_json", + "sha2 0.9.9", + "syn 1.0.109", + "thiserror", +] + [[package]] name = "ansi_term" version = "0.11.0" @@ -156,8 +295,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "synstructure", ] @@ -168,8 +307,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -232,8 +371,8 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -243,8 +382,8 @@ version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -372,8 +511,8 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "regex", "rustc-hash", "shlex", @@ -488,7 +627,7 @@ dependencies = [ "borsh-derive-internal", "borsh-schema-derive-internal", "proc-macro-crate 0.1.5", - "proc-macro2 1.0.52", + "proc-macro2 1.0.66", "syn 1.0.109", ] @@ -498,8 +637,8 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -509,8 +648,8 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -535,6 +674,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bs58" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" + [[package]] name = "bs58" version = "0.4.0" @@ -605,8 +750,8 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aca418a974d83d40a0c1f0c5cba6ff4bc28d8df099109ca459a2118d40b6322" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -800,9 +945,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.23" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", "bitflags", @@ -817,14 +962,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.18" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.0", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -906,8 +1051,8 @@ version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef196d5d972878a48da7decb7686eded338b4858fbabeed513d63a7c98b2b82d" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-xid 0.2.2", ] @@ -1160,8 +1305,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df" dependencies = [ "convert_case", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustc_version 0.3.3", "syn 1.0.109", ] @@ -1248,8 +1393,8 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1330,8 +1475,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86b50932a01e7ec5c06160492ab660fb19b6bb2a7878030dd6cd68d21df9d4d" dependencies = [ "enum-ordinalize", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1371,8 +1516,8 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8958699f9359f0b04e691a13850d48b7de329138023876d07cbd024c2c820598" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1384,8 +1529,8 @@ checksum = "0b166c9e378360dd5a6666a9604bb4f54ae0cac39023ffbac425e917a2a04fef" dependencies = [ "num-bigint 0.4.3", "num-traits", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1396,8 +1541,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eb359f1476bf611266ac1f5355bc14aeca37b299d0ebccc038ee7058891c9cb" dependencies = [ "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -1427,13 +1572,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -1531,6 +1676,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "fixedbitset" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" + [[package]] name = "fixedbitset" version = "0.4.0" @@ -1651,8 +1802,8 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2241,6 +2392,28 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +[[package]] +name = "jito-programs-vote-state" +version = "0.1.2" +dependencies = [ + "anchor-lang", + "bincode", + "serde", + "serde_derive", + "solana-program 1.14.24", +] + +[[package]] +name = "jito-protos" +version = "1.14.24" +dependencies = [ + "bytes", + "prost 0.8.0", + "prost-types 0.8.0", + "tonic 0.5.2", + "tonic-build 0.5.2", +] + [[package]] name = "jobserver" version = "0.1.24" @@ -2321,8 +2494,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" dependencies = [ "proc-macro-crate 0.1.5", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2420,9 +2593,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.140" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99227334921fae1a979cf0bfdfcc6b3e5ce376ef57e16fb6fb3ea2ed6095f80c" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libloading" @@ -2697,8 +2870,8 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a7d5f7076603ebc68de2dc6a650ec331a062a13abaa346975be747bbfa4b789" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2821,8 +2994,8 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2894,8 +3067,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2be1598bf1c313dcdd12092e3f1920f463462525a21b7b4e11b4168353d0123e" dependencies = [ "proc-macro-crate 1.1.0", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -2962,8 +3135,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3039,8 +3212,8 @@ checksum = "084fd65d5dd8b3772edccb5ffd1e4b7eba43897ecd0f9401e330e8c542959408" dependencies = [ "Inflector", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3187,8 +3360,8 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3203,13 +3376,23 @@ dependencies = [ "sha-1 0.8.2", ] +[[package]] +name = "petgraph" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" +dependencies = [ + "fixedbitset 0.2.0", + "indexmap", +] + [[package]] name = "petgraph" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" dependencies = [ - "fixedbitset", + "fixedbitset 0.4.0", "indexmap", ] @@ -3238,8 +3421,8 @@ version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3335,7 +3518,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b83ec2d0af5c5c556257ff52c9f98934e243b9fd39604bfb2a9b75ec2e97f18" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.66", "syn 1.0.109", ] @@ -3365,8 +3548,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "version_check", ] @@ -3377,8 +3560,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "version_check", ] @@ -3393,13 +3576,26 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +dependencies = [ + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", + "version_check", + "yansi", +] + [[package]] name = "proptest" version = "1.0.0" @@ -3420,6 +3616,16 @@ dependencies = [ "tempfile", ] +[[package]] +name = "prost" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" +dependencies = [ + "bytes", + "prost-derive 0.8.0", +] + [[package]] name = "prost" version = "0.9.0" @@ -3440,6 +3646,24 @@ dependencies = [ "prost-derive 0.11.0", ] +[[package]] +name = "prost-build" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" +dependencies = [ + "bytes", + "heck 0.3.3", + "itertools", + "log", + "multimap", + "petgraph 0.5.1", + "prost 0.8.0", + "prost-types 0.8.0", + "tempfile", + "which", +] + [[package]] name = "prost-build" version = "0.9.0" @@ -3452,7 +3676,7 @@ dependencies = [ "lazy_static", "log", "multimap", - "petgraph", + "petgraph 0.6.0", "prost 0.9.0", "prost-types 0.9.0", "regex", @@ -3472,7 +3696,7 @@ dependencies = [ "lazy_static", "log", "multimap", - "petgraph", + "petgraph 0.6.0", "prost 0.11.0", "prost-types 0.11.0", "regex", @@ -3480,6 +3704,19 @@ dependencies = [ "which", ] +[[package]] +name = "prost-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2 1.0.66", + "quote 1.0.31", + "syn 1.0.109", +] + [[package]] name = "prost-derive" version = "0.9.0" @@ -3488,8 +3725,8 @@ checksum = "f9cc1a3263e07e0bf68e96268f37665207b49560d98739662cdfaae215c720fe" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -3501,11 +3738,21 @@ checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] +[[package]] +name = "prost-types" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" +dependencies = [ + "bytes", + "prost 0.8.0", +] + [[package]] name = "prost-types" version = "0.9.0" @@ -3594,7 +3841,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustls 0.20.6", - "rustls-native-certs", + "rustls-native-certs 0.6.1", "rustls-pemfile 0.2.1", "slab", "thiserror", @@ -3629,11 +3876,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.18" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "5fe8a65d69dd0808184ebb5f836ab526bb259db23c657efa38711b1072ee47f0" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.66", ] [[package]] @@ -3787,7 +4034,7 @@ dependencies = [ name = "rbpf-cli" version = "1.14.24" dependencies = [ - "clap 3.2.23", + "clap 3.2.25", "serde", "serde_json", "solana-bpf-loader-program", @@ -3924,7 +4171,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 0.22.1", "winreg", ] @@ -4025,7 +4272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e78cc525325c06b4a7ff02db283472f3c042b7ff0c391f96c6d5ac6f4f91b75" dependencies = [ "bitflags", - "errno 0.3.0", + "errno 0.3.1", "io-lifetimes 1.0.9", "libc", "linux-raw-sys 0.3.1", @@ -4057,6 +4304,18 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "rustls-native-certs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +dependencies = [ + "openssl-probe", + "rustls 0.19.1", + "schannel", + "security-framework", +] + [[package]] name = "rustls-native-certs" version = "0.6.1" @@ -4151,8 +4410,8 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4250,8 +4509,8 @@ version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -4310,8 +4569,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1b95bb2f4f624565e8fe8140c789af7e2082c0e0561b5a82a1b678baa9703dc" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustversion", "syn 1.0.109", ] @@ -4514,7 +4773,7 @@ dependencies = [ "Inflector", "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", "bv", "lazy_static", "serde", @@ -4605,7 +4864,7 @@ dependencies = [ name = "solana-banking-bench" version = "1.14.24" dependencies = [ - "clap 3.2.23", + "clap 3.2.25", "crossbeam-channel", "log", "rand 0.7.3", @@ -4659,9 +4918,11 @@ dependencies = [ "futures 0.3.21", "solana-banks-interface", "solana-client", + "solana-gossip", "solana-runtime", "solana-sdk 1.14.24", "solana-send-transaction-service", + "solana-streamer", "tarpc", "tokio", "tokio-serde", @@ -4672,7 +4933,7 @@ dependencies = [ name = "solana-bench-streamer" version = "1.14.24" dependencies = [ - "clap 3.2.23", + "clap 3.2.25", "crossbeam-channel", "solana-net-utils", "solana-streamer", @@ -4779,7 +5040,7 @@ name = "solana-cargo-build-bpf" version = "1.14.24" dependencies = [ "cargo_metadata", - "clap 3.2.23", + "clap 3.2.25", "solana-sdk 1.14.24", ] @@ -4789,7 +5050,7 @@ version = "1.14.24" dependencies = [ "bzip2", "cargo_metadata", - "clap 3.2.23", + "clap 3.2.25", "log", "regex", "serial_test", @@ -4804,7 +5065,7 @@ name = "solana-cargo-test-bpf" version = "1.14.24" dependencies = [ "cargo_metadata", - "clap 3.2.23", + "clap 3.2.25", ] [[package]] @@ -4812,7 +5073,7 @@ name = "solana-cargo-test-sbf" version = "1.14.24" dependencies = [ "cargo_metadata", - "clap 3.2.23", + "clap 3.2.25", ] [[package]] @@ -4837,7 +5098,7 @@ name = "solana-clap-v3-utils" version = "1.14.24" dependencies = [ "chrono", - "clap 3.2.23", + "clap 3.2.25", "rpassword", "solana-perf", "solana-remote-wallet", @@ -4854,7 +5115,7 @@ name = "solana-cli" version = "1.14.24" dependencies = [ "bincode", - "bs58", + "bs58 0.4.0", "clap 2.33.3", "console", "const_format", @@ -4946,7 +5207,7 @@ dependencies = [ "async-trait", "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", "bytes", "clap 2.33.3", "crossbeam-channel", @@ -5045,23 +5306,33 @@ name = "solana-core" version = "1.14.24" dependencies = [ "ahash", + "anchor-lang", "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", + "bytes", "chrono", + "clap 3.2.25", "crossbeam-channel", "dashmap", "eager", "etcd-client", "fs_extra", + "futures 0.3.21", + "futures-util", + "h2", "histogram", + "indexmap", "itertools", + "jito-protos", "lazy_static", "log", "lru", "matches", "min-max-heap", "num_enum", + "prost 0.8.0", + "prost-types 0.8.0", "rand 0.7.3", "rand_chacha 0.2.2", "raptorq", @@ -5104,7 +5375,12 @@ dependencies = [ "tempfile", "test-case", "thiserror", + "tip-distribution", + "tip-payment", "tokio", + "tokio-stream", + "tonic 0.5.2", + "tonic-build 0.5.2", "trees", ] @@ -5113,7 +5389,7 @@ name = "solana-dos" version = "1.14.24" dependencies = [ "bincode", - "clap 3.2.23", + "clap 3.2.25", "crossbeam-channel", "itertools", "log", @@ -5213,7 +5489,7 @@ dependencies = [ "ahash", "blake3", "block-buffer 0.9.0", - "bs58", + "bs58 0.4.0", "bv", "byteorder", "cc", @@ -5245,7 +5521,7 @@ dependencies = [ "ahash", "blake3", "block-buffer 0.9.0", - "bs58", + "bs58 0.4.0", "bv", "byteorder", "cc", @@ -5277,8 +5553,8 @@ version = "1.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57892538250428ad3dc3cbe05f6cd75ad14f4f16734fcb91bc7cd5fbb63d6315" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -5287,8 +5563,8 @@ dependencies = [ name = "solana-frozen-abi-macro" version = "1.14.24" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -5338,7 +5614,7 @@ dependencies = [ name = "solana-geyser-plugin-manager" version = "1.14.24" dependencies = [ - "bs58", + "bs58 0.4.0", "crossbeam-channel", "json5", "libloading", @@ -5437,8 +5713,8 @@ dependencies = [ name = "solana-keygen" version = "1.14.24" dependencies = [ - "bs58", - "clap 3.2.23", + "bs58 0.4.0", + "clap 3.2.25", "dirs-next", "num_cpus", "solana-clap-v3-utils", @@ -5456,7 +5732,7 @@ dependencies = [ "assert_matches", "bincode", "bitflags", - "bs58", + "bs58 0.4.0", "byteorder", "chrono", "chrono-humanize", @@ -5582,7 +5858,7 @@ name = "solana-log-analyzer" version = "1.14.24" dependencies = [ "byte-unit", - "clap 3.2.23", + "clap 3.2.25", "serde", "serde_json", "solana-logger 1.14.24", @@ -5659,7 +5935,7 @@ dependencies = [ name = "solana-net-shaper" version = "1.14.24" dependencies = [ - "clap 3.2.23", + "clap 3.2.25", "rand 0.7.3", "serde", "serde_json", @@ -5671,7 +5947,7 @@ name = "solana-net-utils" version = "1.14.24" dependencies = [ "bincode", - "clap 3.2.23", + "clap 3.2.25", "crossbeam-channel", "log", "nix", @@ -5750,7 +6026,7 @@ dependencies = [ name = "solana-poh-bench" version = "1.14.24" dependencies = [ - "clap 3.2.23", + "clap 3.2.25", "log", "rand 0.7.3", "rayon", @@ -5774,7 +6050,7 @@ dependencies = [ "blake3", "borsh", "borsh-derive", - "bs58", + "bs58 0.4.0", "bv", "bytemuck", "cc", @@ -5823,7 +6099,7 @@ dependencies = [ "blake3", "borsh", "borsh-derive", - "bs58", + "bs58 0.4.0", "bv", "bytemuck", "cc", @@ -5944,7 +6220,7 @@ version = "1.14.24" dependencies = [ "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", "crossbeam-channel", "dashmap", "itertools", @@ -5998,7 +6274,7 @@ name = "solana-rpc-test" version = "1.14.24" dependencies = [ "bincode", - "bs58", + "bs58 0.4.0", "crossbeam-channel", "futures-util", "log", @@ -6091,7 +6367,7 @@ dependencies = [ "bincode", "bitflags", "borsh", - "bs58", + "bs58 0.4.0", "bytemuck", "byteorder", "chrono", @@ -6135,13 +6411,14 @@ dependencies = [ name = "solana-sdk" version = "1.14.24" dependencies = [ + "anchor-lang", "anyhow", "assert_matches", "base64 0.13.0", "bincode", "bitflags", "borsh", - "bs58", + "bs58 0.4.0", "bytemuck", "byteorder", "chrono", @@ -6191,9 +6468,9 @@ version = "1.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d41a09b9cecd0a4df63c78a192adee99ebf2d3757c19713a68246e1d9789c7c" dependencies = [ - "bs58", - "proc-macro2 1.0.52", - "quote 1.0.18", + "bs58 0.4.0", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustversion", "syn 1.0.109", ] @@ -6202,9 +6479,9 @@ dependencies = [ name = "solana-sdk-macro" version = "1.14.24" dependencies = [ - "bs58", - "proc-macro2 1.0.52", - "quote 1.0.18", + "bs58 0.4.0", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustversion", "syn 1.0.109", ] @@ -6216,11 +6493,13 @@ dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-gossip", "solana-logger 1.14.24", "solana-measure", "solana-metrics", "solana-runtime", "solana-sdk 1.14.24", + "solana-streamer", ] [[package]] @@ -6299,7 +6578,7 @@ name = "solana-storage-proto" version = "1.14.24" dependencies = [ "bincode", - "bs58", + "bs58 0.4.0", "enum-iterator", "prost 0.11.0", "protobuf-src", @@ -6388,6 +6667,35 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-tip-distributor" +version = "1.14.24" +dependencies = [ + "anchor-lang", + "clap 3.2.25", + "env_logger", + "futures 0.3.21", + "im", + "itertools", + "log", + "num-traits", + "serde", + "serde_json", + "solana-client", + "solana-genesis-utils", + "solana-ledger", + "solana-merkle-tree", + "solana-metrics", + "solana-program 1.14.24", + "solana-runtime", + "solana-sdk 1.14.24", + "solana-stake-program", + "thiserror", + "tip-distribution", + "tip-payment", + "tokio", +] + [[package]] name = "solana-tokens" version = "1.14.24" @@ -6453,7 +6761,7 @@ dependencies = [ "base64 0.13.0", "bincode", "borsh", - "bs58", + "bs58 0.4.0", "lazy_static", "log", "serde", @@ -6529,6 +6837,7 @@ dependencies = [ "solana-vote-program", "symlink", "tikv-jemallocator", + "tonic 0.5.2", ] [[package]] @@ -6819,8 +7128,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" dependencies = [ "heck 0.4.0", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "rustversion", "syn 1.0.109", ] @@ -6854,8 +7163,8 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "unicode-ident", ] @@ -6871,8 +7180,8 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "unicode-xid 0.2.2", ] @@ -6955,8 +7264,8 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -7015,8 +7324,8 @@ checksum = "8dd461f47ade621665c9f4e44b20449341769911c253275dc5cb03726cbb852c" dependencies = [ "cfg-if 1.0.0", "proc-macro-error", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -7050,8 +7359,8 @@ version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -7153,6 +7462,22 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +[[package]] +name = "tip-distribution" +version = "0.1.0" +dependencies = [ + "anchor-lang", + "jito-programs-vote-state", + "solana-program 1.14.24", +] + +[[package]] +name = "tip-payment" +version = "0.1.0" +dependencies = [ + "anchor-lang", +] + [[package]] name = "tokio" version = "1.14.1" @@ -7189,8 +7514,8 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -7266,7 +7591,7 @@ dependencies = [ "tokio-rustls 0.23.3", "tungstenite", "webpki 0.22.0", - "webpki-roots", + "webpki-roots 0.22.1", ] [[package]] @@ -7308,6 +7633,40 @@ dependencies = [ "serde", ] +[[package]] +name = "tonic" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "796c5e1cd49905e65dd8e700d4cb1dffcbfdb4fc9d017de08c1a537afd83627c" +dependencies = [ + "async-stream", + "async-trait", + "base64 0.13.0", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding 2.1.0", + "pin-project", + "prost 0.8.0", + "prost-derive 0.8.0", + "rustls-native-certs 0.5.0", + "tokio", + "tokio-rustls 0.22.0", + "tokio-stream", + "tokio-util 0.6.9", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", + "webpki-roots 0.21.1", +] + [[package]] name = "tonic" version = "0.6.2" @@ -7374,15 +7733,27 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "tonic-build" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12b52d07035516c2b74337d2ac7746075e7dcae7643816c1b12c5ff8a7484c08" +dependencies = [ + "proc-macro2 1.0.66", + "prost-build 0.8.0", + "quote 1.0.31", + "syn 1.0.109", +] + [[package]] name = "tonic-build" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9403f1bafde247186684b230dc6f38b5cd514584e8bec1dd32514be4745fa757" dependencies = [ - "proc-macro2 1.0.52", + "proc-macro2 1.0.66", "prost-build 0.9.0", - "quote 1.0.18", + "quote 1.0.31", "syn 1.0.109", ] @@ -7393,9 +7764,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fbcd2800e34e743b9ae795867d5f77b535d3a3be69fd731e39145719752df8c" dependencies = [ "prettyplease", - "proc-macro2 1.0.52", + "proc-macro2 1.0.66", "prost-build 0.11.0", - "quote 1.0.18", + "quote 1.0.31", "syn 1.0.109", ] @@ -7469,8 +7840,8 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", ] @@ -7547,7 +7918,7 @@ dependencies = [ "url 2.2.2", "utf-8", "webpki 0.22.0", - "webpki-roots", + "webpki-roots 0.22.1", ] [[package]] @@ -7790,8 +8161,8 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "wasm-bindgen-shared", ] @@ -7814,7 +8185,7 @@ version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" dependencies = [ - "quote 1.0.18", + "quote 1.0.31", "wasm-bindgen-macro-support", ] @@ -7824,8 +8195,8 @@ version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -7867,6 +8238,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki-roots" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +dependencies = [ + "webpki 0.21.4", +] + [[package]] name = "webpki-roots" version = "0.22.1" @@ -7962,7 +8342,16 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.1", ] [[package]] @@ -7971,21 +8360,42 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", + "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm", + "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] +[[package]] +name = "windows-targets" +version = "0.48.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.30.0" @@ -8004,6 +8414,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.30.0" @@ -8022,6 +8438,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.30.0" @@ -8040,6 +8462,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.30.0" @@ -8058,12 +8486,24 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.30.0" @@ -8082,6 +8522,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "winreg" version = "0.10.1" @@ -8127,6 +8573,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "yasna" version = "0.5.0" @@ -8151,8 +8603,8 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7" dependencies = [ - "proc-macro2 1.0.52", - "quote 1.0.18", + "proc-macro2 1.0.66", + "quote 1.0.31", "syn 1.0.109", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 219a27a411..f27d9528c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ members = [ "geyser-plugin-manager", "gossip", "install", + "jito-protos", "keygen", "ledger", "ledger-tool", @@ -80,6 +81,7 @@ members = [ "streamer", "sys-tuner", "test-validator", + "tip-distributor", "tokens", "transaction-dos", "transaction-status", @@ -91,6 +93,8 @@ members = [ ] exclude = [ + "anchor", + "jito-programs", "programs/bpf", ] diff --git a/README.md b/README.md index 0e939f533f..81863bae57 100644 --- a/README.md +++ b/README.md @@ -4,10 +4,14 @@

-[![Solana crate](https://img.shields.io/crates/v/solana-core.svg)](https://crates.io/crates/solana-core) -[![Solana documentation](https://docs.rs/solana-core/badge.svg)](https://docs.rs/solana-core) -[![Build status](https://badge.buildkite.com/8cc350de251d61483db98bdfc895b9ea0ac8ffa4a32ee850ed.svg?branch=master)](https://buildkite.com/solana-labs/solana/builds?branch=master) -[![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana) +[![Build status](https://badge.buildkite.com/a6981eb34c6e0c7c09e3a3cb4bda09579f0ff2dcb1bd74b2ad.svg?branch=master)](https://buildkite.com/jito-labs/jito-solana) + +[//]: # ([![Solana crate](https://img.shields.io/crates/v/solana-core.svg)](https://crates.io/crates/solana-core)) +[//]: # ([![Solana documentation](https://docs.rs/solana-core/badge.svg)](https://docs.rs/solana-core)) +[//]: # ([![codecov](https://codecov.io/gh/solana-labs/solana/branch/master/graph/badge.svg)](https://codecov.io/gh/solana-labs/solana)) + +# About +This repository contains Jito Foundations's fork of the Solana validator. # Building diff --git a/anchor b/anchor new file mode 160000 index 0000000000..25a497e4f0 --- /dev/null +++ b/anchor @@ -0,0 +1 @@ +Subproject commit 25a497e4f0f300eb363b44a52f5c794e7085b0f4 diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 2806a8a9e0..64a19c289a 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -6,7 +6,7 @@ use { rand::{thread_rng, Rng}, rayon::prelude::*, solana_client::connection_cache::{ConnectionCache, DEFAULT_TPU_CONNECTION_POOL_SIZE}, - solana_core::banking_stage::BankingStage, + solana_core::{banking_stage::BankingStage, bundle_account_locker::BundleAccountLocker}, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ blockstore::Blockstore, @@ -30,6 +30,7 @@ use { }, solana_streamer::socket::SocketAddrSpace, std::{ + collections::HashSet, sync::{atomic::Ordering, Arc, RwLock}, thread::sleep, time::{Duration, Instant}, @@ -45,9 +46,15 @@ fn check_txs( let now = Instant::now(); let mut no_bank = false; loop { - if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::from_millis(10)) + if let Ok(WorkingBankEntry { + bank: _, + entries_ticks, + }) = receiver.recv_timeout(Duration::from_millis(10)) { - total += entry.transactions.len(); + total += entries_ticks + .iter() + .map(|e| e.0.transactions.len()) + .sum::(); } if total >= ref_tx_count { break; @@ -359,6 +366,8 @@ fn main() { None, Arc::new(connection_cache), bank_forks.clone(), + HashSet::default(), + BundleAccountLocker::default(), ); poh_recorder.write().unwrap().set_bank(&bank, false); diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index f65be6f48d..31dd561746 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -15,6 +15,7 @@ crossbeam-channel = "0.5" futures = "0.3" solana-banks-interface = { path = "../banks-interface", version = "=1.14.24" } solana-client = { path = "../client", version = "=1.14.24" } +solana-gossip = { path = "../gossip", version = "=1.14.24" } solana-runtime = { path = "../runtime", version = "=1.14.24" } solana-sdk = { path = "../sdk", version = "=1.14.24" } solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.14.24" } @@ -23,6 +24,9 @@ tokio = { version = "1", features = ["full"] } tokio-serde = { version = "0.8", features = ["bincode"] } tokio-stream = "0.1" +[dev-dependencies] +solana-streamer = { path = "../streamer", version = "=1.14.24" } + [lib] crate-type = ["lib"] name = "solana_banks_server" diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index cf0a1c6083..08a5478781 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -7,6 +7,7 @@ use { TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus, }, solana_client::connection_cache::ConnectionCache, + solana_gossip::cluster_info::ClusterInfo, solana_runtime::{ bank::{Bank, TransactionSimulationResult}, bank_forks::BankForks, @@ -391,7 +392,7 @@ pub async fn start_local_server( pub async fn start_tcp_server( listen_addr: SocketAddr, - tpu_addr: SocketAddr, + cluster_info: Arc, bank_forks: Arc>, block_commitment_cache: Arc>, connection_cache: Arc, @@ -416,7 +417,7 @@ pub async fn start_tcp_server( let (sender, receiver) = unbounded(); SendTransactionService::new::( - tpu_addr, + cluster_info.clone(), &bank_forks, None, receiver, diff --git a/banks-server/src/rpc_banks_service.rs b/banks-server/src/rpc_banks_service.rs index f3224fb64e..4d4ad61dae 100644 --- a/banks-server/src/rpc_banks_service.rs +++ b/banks-server/src/rpc_banks_service.rs @@ -4,6 +4,7 @@ use { crate::banks_server::start_tcp_server, futures::{future::FutureExt, pin_mut, prelude::stream::StreamExt, select}, solana_client::connection_cache::ConnectionCache, + solana_gossip::cluster_info::ClusterInfo, solana_runtime::{bank_forks::BankForks, commitment::BlockCommitmentCache}, std::{ net::SocketAddr, @@ -27,7 +28,7 @@ pub struct RpcBanksService { /// Run the TCP service until `exit` is set to true async fn start_abortable_tcp_server( listen_addr: SocketAddr, - tpu_addr: SocketAddr, + cluster_info: Arc, bank_forks: Arc>, block_commitment_cache: Arc>, connection_cache: Arc, @@ -35,7 +36,7 @@ async fn start_abortable_tcp_server( ) { let server = start_tcp_server( listen_addr, - tpu_addr, + cluster_info, bank_forks.clone(), block_commitment_cache.clone(), connection_cache, @@ -59,7 +60,7 @@ async fn start_abortable_tcp_server( impl RpcBanksService { fn run( listen_addr: SocketAddr, - tpu_addr: SocketAddr, + cluster_info: Arc, bank_forks: Arc>, block_commitment_cache: Arc>, connection_cache: Arc, @@ -67,7 +68,7 @@ impl RpcBanksService { ) { let server = start_abortable_tcp_server( listen_addr, - tpu_addr, + cluster_info, bank_forks, block_commitment_cache, connection_cache, @@ -78,7 +79,7 @@ impl RpcBanksService { pub fn new( listen_addr: SocketAddr, - tpu_addr: SocketAddr, + cluster_info: Arc, bank_forks: &Arc>, block_commitment_cache: &Arc>, connection_cache: &Arc, @@ -93,7 +94,7 @@ impl RpcBanksService { .spawn(move || { Self::run( listen_addr, - tpu_addr, + cluster_info, bank_forks, block_commitment_cache, connection_cache, @@ -112,7 +113,14 @@ impl RpcBanksService { #[cfg(test)] mod tests { - use {super::*, solana_runtime::bank::Bank}; + use { + super::*, + solana_gossip::legacy_contact_info::LegacyContactInfo as ContactInfo, + solana_runtime::bank::Bank, + solana_sdk::signature::Keypair, + solana_streamer::socket::SocketAddrSpace, + std::net::{IpAddr, Ipv4Addr}, + }; #[test] fn test_rpc_banks_server_exit() { @@ -121,9 +129,18 @@ mod tests { let connection_cache = Arc::new(ConnectionCache::default()); let exit = Arc::new(AtomicBool::new(false)); let addr = "127.0.0.1:0".parse().unwrap(); + let contact_info = ContactInfo { + tpu: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + ..ContactInfo::default() + }; + let cluster_info: Arc = Arc::new(ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::new(false), + )); let service = RpcBanksService::new( addr, - addr, + cluster_info, &bank_forks, &block_commitment_cache, &connection_cache, diff --git a/bootstrap b/bootstrap new file mode 100755 index 0000000000..b01bc6734f --- /dev/null +++ b/bootstrap @@ -0,0 +1,21 @@ +#!/usr/bin/env sh +bank_hash=$(./target/release/solana-ledger-tool -l config/bootstrap-validator bank-hash) + +# NOTE: make sure tip-payment and tip-distribution program are deployed using the correct pubkeys +RUST_LOG=INFO,solana_core::bundle_stage=DEBUG \ + NDEBUG=1 ./multinode-demo/bootstrap-validator.sh \ + --wait-for-supermajority 0 \ + --expected-bank-hash $bank_hash \ + --block-engine-address http://127.0.0.1:1003 \ + --block-engine-auth-service-address http://127.0.0.1:1005 \ + --relayer-auth-service-address http://127.0.0.1:11226 \ + --relayer-address http://127.0.0.1:11226 \ + --rpc-pubsub-enable-block-subscription \ + --enable-rpc-transaction-history \ + --tip-payment-program-pubkey 6veFRUKJBNGMR58LEcKn5Bc6MR17WZF4rsgD4Lqq7fsU \ + --tip-distribution-program-pubkey 3PX9z1qPj37eNZqH7e5fyaVDyG7ARqkjkYEe1a4xsBkA \ + --commission-bps 0 \ + --shred-receiver-address 127.0.0.1:1002 \ + --allow-private-addr \ + --trust-relayer-packets \ + --trust-block-engine-packets diff --git a/ci/buildkite-pipeline-in-disk.sh b/ci/buildkite-pipeline-in-disk.sh index fcb4eb6118..694c2dab46 100644 --- a/ci/buildkite-pipeline-in-disk.sh +++ b/ci/buildkite-pipeline-in-disk.sh @@ -185,7 +185,7 @@ all_test_steps() { queue: "gcp" EOF else - annotate --style info \ + annotate --style info --context test-stable-bpf \ "Stable-BPF skipped as no relevant files were modified" fi @@ -203,16 +203,20 @@ EOF ^programs/ \ ^sdk/ \ ; then - cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-perf.sh" - name: "stable-perf" - timeout_in_minutes: 20 - artifact_paths: "log-*.txt" - agents: - queue: "cuda" -EOF + +annotate --style warning --context test-stable-perf \ + "test-stable-perf is currently disabled because it requires GPUs (LB)" +#cat >> "$output_file" <<"EOF" +# - command: "ci/test-stable-perf.sh" +# name: "stable-perf" +# timeout_in_minutes: 20 +# artifact_paths: "log-*.txt" +# agents: +# queue: "cuda" +#EOF + else - annotate --style info \ + annotate --style info --context test-stable-perf \ "Stable-perf skipped as no relevant files were modified" fi @@ -237,7 +241,7 @@ EOF timeout_in_minutes: 30 EOF else - annotate --style info \ + annotate --style info --context test-downstream-projects \ "downstream-projects skipped as no relevant files were modified" fi @@ -247,9 +251,11 @@ EOF ^ci/test-stable.sh \ ^sdk/ \ ; then - command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + annotate --style warning --context test-wasm \ + "test-wasm is currently disabled because it times out (LB)" +# command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 else - annotate --style info \ + annotate --style info --context test-wasm \ "wasm skipped as no relevant files were modified" fi @@ -312,7 +318,7 @@ if [[ -n $BUILDKITE_TAG ]]; then "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly - trigger_secondary_step +# trigger_secondary_step exit 0 fi @@ -340,5 +346,5 @@ fi start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}" pull_or_push_steps wait_step -trigger_secondary_step +#trigger_secondary_step exit 0 diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index 71bdb01b60..a719b14ef4 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -190,7 +190,7 @@ all_test_steps() { queue: "solana" EOF else - annotate --style info \ + annotate --style info --context test-stable-bpf \ "Stable-BPF skipped as no relevant files were modified" fi @@ -208,16 +208,20 @@ EOF ^programs/ \ ^sdk/ \ ; then - cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-perf.sh" - name: "stable-perf" - timeout_in_minutes: 20 - artifact_paths: "log-*.txt" - agents: - queue: "cuda" -EOF + +annotate --style warning --context test-stable-perf \ + "test-stable-perf is currently disabled because it requires GPUs (LB)" +#cat >> "$output_file" <<"EOF" +# - command: "ci/test-stable-perf.sh" +# name: "stable-perf" +# timeout_in_minutes: 20 +# artifact_paths: "log-*.txt" +# agents: +# queue: "cuda" +#EOF + else - annotate --style info \ + annotate --style info --context test-stable-perf \ "Stable-perf skipped as no relevant files were modified" fi @@ -244,7 +248,7 @@ EOF queue: "solana" EOF else - annotate --style info \ + annotate --style info --context test-downstream-projects \ "downstream-projects skipped as no relevant files were modified" fi @@ -254,9 +258,11 @@ EOF ^ci/test-stable.sh \ ^sdk/ \ ; then - command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + annotate --style warning --context test-wasm \ + "test-wasm is currently disabled because it times out (LB)" +# command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 else - annotate --style info \ + annotate --style info --context test-wasm \ "wasm skipped as no relevant files were modified" fi @@ -342,7 +348,7 @@ if [[ -n $BUILDKITE_TAG ]]; then "https://github.com/solana-labs/solana/releases/$BUILDKITE_TAG" # Jump directly to the secondary build to publish release artifacts quickly - trigger_secondary_step +# trigger_secondary_step exit 0 fi @@ -370,5 +376,5 @@ fi start_pipeline "Push pipeline for ${BUILDKITE_BRANCH:-?unknown branch?}" pull_or_push_steps wait_step -trigger_secondary_step +#trigger_secondary_step exit 0 diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index 7a2f19d45f..026ebdaf86 100644 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -185,7 +185,7 @@ all_test_steps() { queue: "sol-private" EOF else - annotate --style info \ + annotate --style info --context test-stable-bpf \ "Stable-BPF skipped as no relevant files were modified" fi @@ -203,16 +203,19 @@ EOF ^programs/ \ ^sdk/ \ ; then - cat >> "$output_file" <<"EOF" - - command: "ci/test-stable-perf.sh" - name: "stable-perf" - timeout_in_minutes: 35 - artifact_paths: "log-*.txt" - agents: - queue: "sol-private" -EOF + +annotate --style warning --context test-stable-perf \ + "test-stable-perf is currently disabled because it requires GPUs (LB)" +# cat >> "$output_file" <<"EOF" +# - command: "ci/test-stable-perf.sh" +# name: "stable-perf" +# timeout_in_minutes: 35 +# artifact_paths: "log-*.txt" +# agents: +# queue: "sol-private" +#EOF else - annotate --style info \ + annotate --style info --context test-stable-perf \ "Stable-perf skipped as no relevant files were modified" fi @@ -239,7 +242,7 @@ EOF queue: "sol-private" EOF else - annotate --style info \ + annotate --style info --context test-downstream-projects \ "downstream-projects skipped as no relevant files were modified" fi @@ -249,9 +252,11 @@ EOF ^ci/test-stable.sh \ ^sdk/ \ ; then - command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + annotate --style warning --context test-wasm \ + "test-wasm is currently disabled because it times out (LB)" +# command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 else - annotate --style info \ + annotate --style info --context test-wasm \ "wasm skipped as no relevant files were modified" fi diff --git a/ci/channel-info.sh b/ci/channel-info.sh index c82806454d..101583307f 100755 --- a/ci/channel-info.sh +++ b/ci/channel-info.sh @@ -11,7 +11,7 @@ here="$(dirname "$0")" # shellcheck source=ci/semver_bash/semver.sh source "$here"/semver_bash/semver.sh -remote=https://github.com/solana-labs/solana.git +remote=https://github.com/jito-foundation/jito-solana.git # Fetch all vX.Y.Z tags # diff --git a/ci/do-audit.sh b/ci/do-audit.sh index a4da2b5b2a..293aeff11f 100755 --- a/ci/do-audit.sh +++ b/ci/do-audit.sh @@ -43,6 +43,9 @@ cargo_audit_ignores=( # # Not worth upgrading tokio version on a stable branch --ignore RUSTSEC-2023-0001 + + + --ignore RUSTSEC-2022-0093 ) scripts/cargo-for-all-lock-files.sh stable audit "${cargo_audit_ignores[@]}" | $dep_tree_filter # we want the `cargo audit` exit code, not `$dep_tree_filter`'s diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 6805f85fcd..cd638e5c28 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -40,6 +40,7 @@ RUN set -x \ && cargo install mdbook-linkcheck \ && cargo install svgbob_cli \ && cargo install wasm-pack \ + && cargo install sccache \ && rustc --version \ && cargo --version \ && curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \ diff --git a/ci/rust-version.sh b/ci/rust-version.sh index dc3570fa93..aec772f68f 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -42,6 +42,7 @@ export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version" echo "$0: Missing toolchain? Installing...: $toolchain" >&2 rustup install "$toolchain" cargo +"$toolchain" -V + rustup component add rustfmt --toolchain "$toolchain" fi } diff --git a/ci/test-stable.sh b/ci/test-stable.sh index e3a630599d..dd70804696 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -119,7 +119,7 @@ test-stable-bpf) # latest mainbeta release version. solana_program_count=$(grep -c 'solana-program v' cargo.log) rm -f cargo.log - if ((solana_program_count > 10)); then + if ((solana_program_count > 20)); then echo "Regression of build redundancy ${solana_program_count}." echo "Review dependency features that trigger redundant rebuilds of solana-program." exit 1 diff --git a/client/src/http_sender.rs b/client/src/http_sender.rs index 18caad62d6..c732208c1a 100644 --- a/client/src/http_sender.rs +++ b/client/src/http_sender.rs @@ -71,6 +71,110 @@ impl HttpSender { stats: RwLock::new(RpcTransportStats::default()), } } + + fn check_response(json: &serde_json::Value) -> Result<()> { + if json["error"].is_object() { + return match serde_json::from_value::(json["error"].clone()) { + Ok(rpc_error_object) => { + let data = match rpc_error_object.code { + rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE => { + match serde_json::from_value::( + json["error"]["data"].clone(), + ) { + Ok(data) => { + RpcResponseErrorData::SendTransactionPreflightFailure(data) + } + Err(err) => { + debug!( + "Failed to deserialize RpcSimulateTransactionResult: {:?}", + err + ); + RpcResponseErrorData::Empty + } + } + } + rpc_custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY => { + match serde_json::from_value::( + json["error"]["data"].clone(), + ) { + Ok(rpc_custom_error::NodeUnhealthyErrorData { num_slots_behind }) => { + RpcResponseErrorData::NodeUnhealthy { num_slots_behind } + } + Err(_err) => RpcResponseErrorData::Empty, + } + } + _ => RpcResponseErrorData::Empty, + }; + + Err(RpcError::RpcResponseError { + request_id: json["id"].as_u64().unwrap(), + code: rpc_error_object.code, + message: rpc_error_object.message, + data, + } + .into()) + } + Err(err) => Err(RpcError::RpcRequestError(format!( + "Failed to deserialize RPC error response: {} [{}]", + serde_json::to_string(&json["error"]).unwrap(), + err + )) + .into()), + }; + } + Ok(()) + } + + async fn do_send_with_retry( + &self, + request: serde_json::Value, + ) -> reqwest::Result { + let mut stats_updater = StatsUpdater::new(&self.stats); + let mut too_many_requests_retries = 5; + loop { + let response = { + let client = self.client.clone(); + let request = request.to_string(); + client + .post(&self.url) + .header(CONTENT_TYPE, "application/json") + .body(request) + .send() + .await + }?; + + if !response.status().is_success() { + if response.status() == StatusCode::TOO_MANY_REQUESTS + && too_many_requests_retries > 0 + { + let mut duration = Duration::from_millis(500); + if let Some(retry_after) = response.headers().get(RETRY_AFTER) { + if let Ok(retry_after) = retry_after.to_str() { + if let Ok(retry_after) = retry_after.parse::() { + if retry_after < 120 { + duration = Duration::from_secs(retry_after); + } + } + } + } + + too_many_requests_retries -= 1; + debug!( + "Too many requests: server responded with {:?}, {} retries left, pausing for {:?}", + response, too_many_requests_retries, duration + ); + + sleep(duration).await; + stats_updater.add_rate_limited_time(duration); + + continue; + } + return Err(response.error_for_status().unwrap_err()); + } + + return response.json::().await; + } + } } #[derive(Deserialize, Debug)] @@ -110,103 +214,37 @@ impl<'a> Drop for StatsUpdater<'a> { #[async_trait] impl RpcSender for HttpSender { - fn get_transport_stats(&self) -> RpcTransportStats { - self.stats.read().unwrap().clone() - } - async fn send( &self, request: RpcRequest, params: serde_json::Value, ) -> Result { - let mut stats_updater = StatsUpdater::new(&self.stats); - let request_id = self.request_id.fetch_add(1, Ordering::Relaxed); - let request_json = request.build_request_json(request_id, params).to_string(); + let request = request.build_request_json(request_id, params); + let mut resp = self.do_send_with_retry(request).await?; + Self::check_response(&resp)?; - let mut too_many_requests_retries = 5; - loop { - let response = { - let client = self.client.clone(); - let request_json = request_json.clone(); - client - .post(&self.url) - .header(CONTENT_TYPE, "application/json") - .body(request_json) - .send() - .await - }?; + Ok(resp["result"].take()) + } - if !response.status().is_success() { - if response.status() == StatusCode::TOO_MANY_REQUESTS - && too_many_requests_retries > 0 - { - let mut duration = Duration::from_millis(500); - if let Some(retry_after) = response.headers().get(RETRY_AFTER) { - if let Ok(retry_after) = retry_after.to_str() { - if let Ok(retry_after) = retry_after.parse::() { - if retry_after < 120 { - duration = Duration::from_secs(retry_after); - } - } - } - } + async fn send_batch( + &self, + requests_and_params: Vec<(RpcRequest, serde_json::Value)>, + ) -> Result { + let mut batch_request = vec![]; + for (request_id, req) in requests_and_params.into_iter().enumerate() { + batch_request.push(req.0.build_request_json(request_id as u64, req.1)); + } - too_many_requests_retries -= 1; - debug!( - "Too many requests: server responded with {:?}, {} retries left, pausing for {:?}", - response, too_many_requests_retries, duration - ); + let resp = self + .do_send_with_retry(serde_json::Value::Array(batch_request)) + .await?; - sleep(duration).await; - stats_updater.add_rate_limited_time(duration); - continue; - } - return Err(response.error_for_status().unwrap_err().into()); - } + Ok(resp) + } - let mut json = response.json::().await?; - if json["error"].is_object() { - return match serde_json::from_value::(json["error"].clone()) { - Ok(rpc_error_object) => { - let data = match rpc_error_object.code { - rpc_custom_error::JSON_RPC_SERVER_ERROR_SEND_TRANSACTION_PREFLIGHT_FAILURE => { - match serde_json::from_value::(json["error"]["data"].clone()) { - Ok(data) => RpcResponseErrorData::SendTransactionPreflightFailure(data), - Err(err) => { - debug!("Failed to deserialize RpcSimulateTransactionResult: {:?}", err); - RpcResponseErrorData::Empty - } - } - }, - rpc_custom_error::JSON_RPC_SERVER_ERROR_NODE_UNHEALTHY => { - match serde_json::from_value::(json["error"]["data"].clone()) { - Ok(rpc_custom_error::NodeUnhealthyErrorData {num_slots_behind}) => RpcResponseErrorData::NodeUnhealthy {num_slots_behind}, - Err(_err) => { - RpcResponseErrorData::Empty - } - } - }, - _ => RpcResponseErrorData::Empty - }; - - Err(RpcError::RpcResponseError { - code: rpc_error_object.code, - message: rpc_error_object.message, - data, - } - .into()) - } - Err(err) => Err(RpcError::RpcRequestError(format!( - "Failed to deserialize RPC error response: {} [{}]", - serde_json::to_string(&json["error"]).unwrap(), - err - )) - .into()), - }; - } - return Ok(json["result"].take()); - } + fn get_transport_stats(&self) -> RpcTransportStats { + self.stats.read().unwrap().clone() } fn url(&self) -> String { diff --git a/client/src/mock_sender.rs b/client/src/mock_sender.rs index 057dbed5c8..2a26c8c0b4 100644 --- a/client/src/mock_sender.rs +++ b/client/src/mock_sender.rs @@ -484,4 +484,11 @@ impl RpcSender for MockSender { fn url(&self) -> String { format!("MockSender: {}", self.url) } + + async fn send_batch( + &self, + _requests_and_params: Vec<(RpcRequest, serde_json::Value)>, + ) -> Result { + todo!() + } } diff --git a/client/src/nonblocking/rpc_client.rs b/client/src/nonblocking/rpc_client.rs index e47908016f..014d6cf118 100644 --- a/client/src/nonblocking/rpc_client.rs +++ b/client/src/nonblocking/rpc_client.rs @@ -37,6 +37,7 @@ use { }, solana_sdk::{ account::Account, + bundle::VersionedBundle, clock::{Epoch, Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT, MAX_HASH_AGE_IN_SECONDS}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, @@ -45,7 +46,7 @@ use { hash::Hash, pubkey::Pubkey, signature::Signature, - transaction, + transaction::{self, VersionedTransaction}, }, solana_transaction_status::{ EncodedConfirmedBlock, EncodedConfirmedTransactionWithStatusMeta, TransactionStatus, @@ -960,6 +961,7 @@ impl RpcClient { code, message, data, + .. }) = &err.kind { debug!("{} {}", code, message); @@ -1406,6 +1408,113 @@ impl RpcClient { .await } + pub async fn batch_simulate_bundle( + &self, + bundles: &[VersionedBundle], + ) -> BatchRpcResult { + let configs = bundles + .iter() + .map(|b| RpcSimulateBundleConfig { + simulation_bank: Some(SimulationSlotConfig::Commitment(self.commitment())), + pre_execution_accounts_configs: vec![None; b.transactions.len()], + post_execution_accounts_configs: vec![None; b.transactions.len()], + ..RpcSimulateBundleConfig::default() + }) + .collect::>(); + + self.batch_simulate_bundle_with_config(bundles.iter().zip(configs).collect()) + .await + } + + pub async fn batch_simulate_bundle_with_config( + &self, + bundles_and_configs: Vec<(&VersionedBundle, RpcSimulateBundleConfig)>, + ) -> BatchRpcResult { + let mut params = vec![]; + for (bundle, config) in bundles_and_configs { + let transaction_encoding = if let Some(encoding) = config.transaction_encoding { + encoding + } else { + self.default_cluster_transaction_encoding().await? + }; + + let simulation_bank = config.simulation_bank.unwrap_or_default(); + + let config = RpcSimulateBundleConfig { + transaction_encoding: Some(transaction_encoding), + simulation_bank: Some(simulation_bank), + ..config + }; + + let encoded_transactions = bundle + .transactions + .iter() + .map(|tx| serialize_and_encode::(tx, transaction_encoding)) + .collect::, ClientError>>()?; + let rpc_bundle_request = RpcBundleRequest { + encoded_transactions, + }; + + params.push(json!([rpc_bundle_request, config])); + } + + let requests_and_params = vec![RpcRequest::SimulateBundle; params.len()] + .into_iter() + .zip(params) + .collect(); + self.send_batch(requests_and_params).await + } + + pub async fn simulate_bundle( + &self, + bundle: &VersionedBundle, + ) -> RpcResult { + self.simulate_bundle_with_config( + bundle, + RpcSimulateBundleConfig { + simulation_bank: Some(SimulationSlotConfig::Commitment(self.commitment())), + pre_execution_accounts_configs: vec![None; bundle.transactions.len()], + post_execution_accounts_configs: vec![None; bundle.transactions.len()], + ..RpcSimulateBundleConfig::default() + }, + ) + .await + } + + pub async fn simulate_bundle_with_config( + &self, + bundle: &VersionedBundle, + config: RpcSimulateBundleConfig, + ) -> RpcResult { + let transaction_encoding = if let Some(enc) = config.transaction_encoding { + enc + } else { + self.default_cluster_transaction_encoding().await? + }; + let simulation_bank = Some(config.simulation_bank.unwrap_or_default()); + + let encoded_transactions = bundle + .transactions + .iter() + .map(|tx| serialize_and_encode::(tx, transaction_encoding)) + .collect::>>()?; + let rpc_bundle_request = RpcBundleRequest { + encoded_transactions, + }; + + let config = RpcSimulateBundleConfig { + transaction_encoding: Some(transaction_encoding), + simulation_bank, + ..config + }; + + self.send( + RpcRequest::SimulateBundle, + json!([rpc_bundle_request, config]), + ) + .await + } + /// Returns the highest slot information that the node has snapshots for. /// /// This will find the highest full snapshot slot, and the highest incremental snapshot slot @@ -5455,6 +5564,22 @@ impl RpcClient { .map_err(|err| ClientError::new_with_request(err.into(), request)) } + pub async fn send_batch( + &self, + requests_and_params: Vec<(RpcRequest, Value)>, + ) -> ClientResult + where + T: serde::de::DeserializeOwned, + { + let response = self.sender.send_batch(requests_and_params).await?; + debug!("response: {:?}", response); + + serde_json::from_value(response).map_err(|err| ClientError { + request: None, + kind: err.into(), + }) + } + pub fn get_transport_stats(&self) -> RpcTransportStats { self.sender.get_transport_stats() } diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index bac070ce6f..d2efc9ad1e 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -28,6 +28,7 @@ use { }, solana_sdk::{ account::Account, + bundle::VersionedBundle, clock::{Epoch, Slot, UnixTimestamp}, commitment_config::CommitmentConfig, epoch_info::EpochInfo, @@ -1151,6 +1152,35 @@ impl RpcClient { ) } + pub fn batch_simulate_bundle( + &self, + bundles: &[VersionedBundle], + ) -> BatchRpcResult { + self.invoke(self.rpc_client.batch_simulate_bundle(bundles)) + } + + pub fn batch_simulate_bundle_with_config( + &self, + bundles_and_configs: Vec<(&VersionedBundle, RpcSimulateBundleConfig)>, + ) -> BatchRpcResult { + self.invoke( + self.rpc_client + .batch_simulate_bundle_with_config(bundles_and_configs), + ) + } + + pub fn simulate_bundle(&self, bundle: &VersionedBundle) -> RpcResult { + self.invoke(self.rpc_client.simulate_bundle(bundle)) + } + + pub fn simulate_bundle_with_config( + &self, + bundle: &VersionedBundle, + config: RpcSimulateBundleConfig, + ) -> RpcResult { + self.invoke(self.rpc_client.simulate_bundle_with_config(bundle, config)) + } + /// Returns the highest slot information that the node has snapshots for. /// /// This will find the highest full snapshot slot, and the highest incremental snapshot slot diff --git a/client/src/rpc_config.rs b/client/src/rpc_config.rs index d5bc986a21..8877a789cb 100644 --- a/client/src/rpc_config.rs +++ b/client/src/rpc_config.rs @@ -46,7 +46,57 @@ pub struct RpcSimulateTransactionConfig { pub min_context_slot: Option, } -#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub enum SimulationSlotConfig { + /// Simulate on top of bank with the provided commitment. + Commitment(CommitmentConfig), + + /// Simulate on the provided slot's bank. + Slot(Slot), + + /// Simulates on top of the RPC's highest slot's bank i.e. the working bank. + Tip, +} + +impl Default for SimulationSlotConfig { + fn default() -> Self { + Self::Commitment(CommitmentConfig { + commitment: CommitmentLevel::Confirmed, + }) + } +} + +#[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBundleRequest { + pub encoded_transactions: Vec, +} + +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcSimulateBundleConfig { + /// Gives the state of accounts pre/post transaction execution. + /// The length of each of these must be equal to the number transactions. + pub pre_execution_accounts_configs: Vec>, + pub post_execution_accounts_configs: Vec>, + + /// Specifies the encoding scheme of the contained transactions. + pub transaction_encoding: Option, + + /// Specifies the bank to run simulation against. + pub simulation_bank: Option, + + /// Opt to skip sig-verify for faster performance. + #[serde(default)] + pub skip_sig_verify: bool, + + /// Replace recent blockhash to simulate old transactions without resigning. + #[serde(default)] + pub replace_recent_blockhash: bool, +} + +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcRequestAirdropConfig { pub recent_blockhash: Option, // base-58 encoded blockhash diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index 42cd79686b..842ebf728c 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -112,6 +112,7 @@ pub enum RpcRequest { RequestAirdrop, SendTransaction, SimulateTransaction, + SimulateBundle, SignVote, } @@ -187,6 +188,7 @@ impl fmt::Display for RpcRequest { RpcRequest::RequestAirdrop => "requestAirdrop", RpcRequest::SendTransaction => "sendTransaction", RpcRequest::SimulateTransaction => "simulateTransaction", + RpcRequest::SimulateBundle => "simulateBundle", RpcRequest::SignVote => "signVote", }; @@ -252,6 +254,7 @@ pub enum RpcError { RpcRequestError(String), #[error("RPC response error {code}: {message} {data}")] RpcResponseError { + request_id: u64, code: i64, message: String, data: RpcResponseErrorData, diff --git a/client/src/rpc_response.rs b/client/src/rpc_response.rs index 557ec2891c..e7d08db73b 100644 --- a/client/src/rpc_response.rs +++ b/client/src/rpc_response.rs @@ -3,6 +3,7 @@ use { serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_account_decoder::{parse_token::UiTokenAmount, UiAccount}, solana_sdk::{ + bundle::error::BundleExecutionError, clock::{Epoch, Slot, UnixTimestamp}, fee_calculator::{FeeCalculator, FeeRateGovernor}, hash::Hash, @@ -36,6 +37,7 @@ impl OptionalContext { } } +pub type BatchRpcResult = client_error::Result>>; pub type RpcResult = client_error::Result>; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -46,6 +48,15 @@ pub struct RpcResponseContext { pub api_version: Option, } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BatchRpcResponseContext { + #[serde(skip_serializing_if = "Option::is_none")] + pub slot: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub api_version: Option, +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct RpcApiVersion(semver::Version); @@ -92,6 +103,12 @@ impl RpcResponseContext { } } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BatchResponse { + pub id: u64, + pub result: Response, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Response { pub context: RpcResponseContext, @@ -359,6 +376,24 @@ pub struct RpcIdentity { #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(rename_all = "camelCase")] +pub enum RpcBundleSimulationSummary { + /// error and offending transaction signature + Failed { + error: BundleExecutionError, + tx_signature: String, + }, + Succeeded, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct RpcSimulateBundleResult { + pub summary: RpcBundleSimulationSummary, + pub transaction_results: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] pub struct RpcVote { /// Vote account address, as base-58 encoded string pub vote_pubkey: String, @@ -411,7 +446,19 @@ pub struct RpcSignatureConfirmation { pub status: Result<()>, } -#[derive(Serialize, Deserialize, Clone, Debug)] +// TODO: consolidate with [RpcSimulateTransactionResult] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct RpcSimulateBundleTransactionResult { + pub err: Option, + pub logs: Option>, + pub pre_execution_accounts: Option>, + pub post_execution_accounts: Option>, + pub units_consumed: Option, + pub return_data: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcSimulateTransactionResult { pub err: Option, diff --git a/client/src/rpc_sender.rs b/client/src/rpc_sender.rs index dded04dfc6..d76923af52 100644 --- a/client/src/rpc_sender.rs +++ b/client/src/rpc_sender.rs @@ -31,6 +31,10 @@ pub trait RpcSender { request: RpcRequest, params: serde_json::Value, ) -> Result; + async fn send_batch( + &self, + requests_and_params: Vec<(RpcRequest, serde_json::Value)>, + ) -> Result; fn get_transport_stats(&self) -> RpcTransportStats; fn url(&self) -> String; } diff --git a/core/Cargo.toml b/core/Cargo.toml index 70d6c82eb8..adca124b01 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -15,22 +15,32 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git [dependencies] ahash = "0.7.6" +anchor-lang = { path = "../anchor/lang" } base64 = "0.13.0" bincode = "1.3.3" bs58 = "0.4.0" +bytes = "1.1.0" chrono = { version = "0.4.11", features = ["serde"] } +clap = { version = "3.1.15", features = ["derive"] } crossbeam-channel = "0.5" dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } eager = "0.1.0" etcd-client = { version = "0.8.1", features = ["tls"] } fs_extra = "1.2.0" +futures = "0.3" +futures-util = "0.3" +h2 = "0.3.18" # CVE-2023-26964 histogram = "0.6.9" +indexmap = "1.8.1" itertools = "0.10.3" +jito-protos = { path = "../jito-protos", version = "=1.14.24" } lazy_static = "1.4.0" log = "0.4.17" lru = "0.7.7" min-max-heap = "1.3.0" num_enum = "0.5.7" +prost = "0.8.0" +prost-types = "0.8.0" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" @@ -63,7 +73,11 @@ solana-vote-program = { path = "../programs/vote", version = "=1.14.24" } sys-info = "0.9.1" tempfile = "3.4.0" thiserror = "1.0" +tip-distribution = { path = "../jito-programs/mev-programs/programs/tip-distribution", features = ["no-entrypoint"] } +tip-payment = { path = "../jito-programs/mev-programs/programs/tip-payment", features = ["no-entrypoint"] } tokio = { version = "~1.14.1", features = ["full"] } +tokio-stream = "0.1.8" +tonic = { version = "0.5.2", features = ["tls"] } trees = "0.4.2" [dev-dependencies] @@ -83,6 +97,7 @@ sysctl = "0.4.4" [build-dependencies] rustc_version = "0.4" +tonic-build = "0.5.2" [[bench]] name = "banking_stage" diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 9905172a6c..aab5a93859 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -11,6 +11,7 @@ use { solana_client::connection_cache::ConnectionCache, solana_core::{ banking_stage::{BankingStage, BankingStageStats}, + bundle_account_locker::BundleAccountLocker, leader_slot_banking_stage_metrics::LeaderSlotMetricsTracker, qos_service::QosService, unprocessed_packet_batches::*, @@ -38,6 +39,7 @@ use { }, solana_streamer::socket::SocketAddrSpace, std::{ + collections::HashSet, sync::{atomic::Ordering, Arc, RwLock}, time::{Duration, Instant}, }, @@ -48,8 +50,15 @@ fn check_txs(receiver: &Arc>, ref_tx_count: usize) { let mut total = 0; let now = Instant::now(); loop { - if let Ok((_bank, (entry, _tick_height))) = receiver.recv_timeout(Duration::new(1, 0)) { - total += entry.transactions.len(); + if let Ok(WorkingBankEntry { + bank: _, + entries_ticks, + }) = receiver.recv_timeout(Duration::new(1, 0)) + { + total += entries_ticks + .iter() + .map(|e| e.0.transactions.len()) + .sum::(); } if total >= ref_tx_count { break; @@ -99,6 +108,8 @@ fn bench_consume_buffered(bencher: &mut Bencher) { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), None, + &HashSet::default(), + &BundleAccountLocker::default(), ); }); @@ -235,6 +246,8 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { None, Arc::new(ConnectionCache::default()), bank_forks, + HashSet::new(), + BundleAccountLocker::default(), ); poh_recorder.write().unwrap().set_bank(&bank, false); diff --git a/core/benches/cluster_info.rs b/core/benches/cluster_info.rs index 2c6df51cca..0002884cce 100644 --- a/core/benches/cluster_info.rs +++ b/core/benches/cluster_info.rs @@ -79,6 +79,7 @@ fn broadcast_shreds_bench(bencher: &mut Bencher) { &cluster_info, &bank_forks, &SocketAddrSpace::Unspecified, + &None, ) .unwrap(); }); diff --git a/core/benches/proto_to_packet.rs b/core/benches/proto_to_packet.rs new file mode 100644 index 0000000000..87f85f9c7f --- /dev/null +++ b/core/benches/proto_to_packet.rs @@ -0,0 +1,56 @@ +#![feature(test)] + +extern crate test; + +use { + jito_protos::proto::packet::{ + Meta as PbMeta, Packet as PbPacket, PacketBatch, PacketFlags as PbFlags, + }, + solana_core::proto_packet_to_packet, + solana_sdk::packet::{Packet, PACKET_DATA_SIZE}, + std::iter::repeat, + test::{black_box, Bencher}, +}; + +fn get_proto_packet(i: u8) -> PbPacket { + PbPacket { + data: repeat(i).take(PACKET_DATA_SIZE).collect(), + meta: Some(PbMeta { + size: PACKET_DATA_SIZE as u64, + addr: "255.255.255.255:65535".to_string(), + port: 65535, + flags: Some(PbFlags { + discard: false, + forwarded: false, + repair: false, + simple_vote_tx: false, + tracer_packet: false, + }), + sender_stake: 0, + }), + } +} + +#[bench] +fn bench_proto_to_packet(bencher: &mut Bencher) { + bencher.iter(|| { + black_box(proto_packet_to_packet(get_proto_packet(1))); + }); +} + +#[bench] +fn bench_batch_list_to_packets(bencher: &mut Bencher) { + let packet_batch = PacketBatch { + packets: (0..128).map(get_proto_packet).collect(), + }; + + bencher.iter(|| { + black_box( + packet_batch + .packets + .iter() + .map(|p| proto_packet_to_packet(p.clone())) + .collect::>(), + ); + }); +} diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs index ff647525df..7fe1bf949e 100644 --- a/core/benches/retransmit_stage.rs +++ b/core/benches/retransmit_stage.rs @@ -121,6 +121,7 @@ fn bench_retransmitter(bencher: &mut Bencher) { shreds_receiver, Arc::default(), // solana_rpc::max_slots::MaxSlots None, + Arc::new(RwLock::new(None)), ); let mut index = 0; diff --git a/core/src/admin_rpc_post_init.rs b/core/src/admin_rpc_post_init.rs index 71d88f2b75..6fcb6783a8 100644 --- a/core/src/admin_rpc_post_init.rs +++ b/core/src/admin_rpc_post_init.rs @@ -1,8 +1,12 @@ use { + crate::proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig}, solana_gossip::cluster_info::ClusterInfo, solana_runtime::bank_forks::BankForks, solana_sdk::pubkey::Pubkey, - std::sync::{Arc, RwLock}, + std::{ + net::SocketAddr, + sync::{Arc, Mutex, RwLock}, + }, }; #[derive(Clone)] @@ -10,4 +14,7 @@ pub struct AdminRpcRequestMetadataPostInit { pub cluster_info: Arc, pub bank_forks: Arc>, pub vote_account: Pubkey, + pub block_engine_config: Arc>, + pub relayer_config: Arc>, + pub shred_receiver_address: Arc>>, } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index fb865b7ac8..98f21333b2 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -4,6 +4,7 @@ use { crate::{ + bundle_account_locker::BundleAccountLocker, forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, leader_slot_banking_stage_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary}, leader_slot_banking_stage_timing_metrics::{ @@ -412,6 +413,8 @@ impl BankingStage { log_messages_bytes_limit: Option, connection_cache: Arc, bank_forks: Arc>, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, ) -> Self { Self::new_num_threads( cluster_info, @@ -426,6 +429,8 @@ impl BankingStage { log_messages_bytes_limit, connection_cache, bank_forks, + blacklisted_accounts, + bundle_account_locker, ) } @@ -443,6 +448,8 @@ impl BankingStage { log_messages_bytes_limit: Option, connection_cache: Arc, bank_forks: Arc>, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, ) -> Self { assert!(num_threads >= MIN_TOTAL_THREADS); // Single thread to generate entries from many banks. @@ -475,6 +482,9 @@ impl BankingStage { let data_budget = data_budget.clone(); let cost_model = cost_model.clone(); let connection_cache = connection_cache.clone(); + let blacklisted_accounts = blacklisted_accounts.clone(); + let bundle_account_locker = bundle_account_locker.clone(); + let bank_forks = bank_forks.clone(); Builder::new() .name(format!("solBanknStgTx{:02}", i)) @@ -494,6 +504,8 @@ impl BankingStage { log_messages_bytes_limit, connection_cache, &bank_forks, + blacklisted_accounts, + bundle_account_locker, ); }) .unwrap() @@ -671,6 +683,8 @@ impl BankingStage { qos_service: &QosService, slot_metrics_tracker: &mut LeaderSlotMetricsTracker, log_messages_bytes_limit: Option, + blacklisted_accounts: &HashSet, + bundle_account_locker: &BundleAccountLocker, ) { let mut rebuffered_packet_count = 0; let mut consumed_buffered_packets_count = 0; @@ -719,6 +733,7 @@ impl BankingStage { banking_stage_stats, packet, payload, + blacklisted_accounts, ) }; @@ -755,7 +770,8 @@ impl BankingStage { banking_stage_stats, qos_service, payload.slot_metrics_tracker, - log_messages_bytes_limit + log_messages_bytes_limit, + bundle_account_locker )); payload .slot_metrics_tracker @@ -886,6 +902,7 @@ impl BankingStage { banking_stage_stats: &BankingStageStats, packet: &ImmutableDeserializedPacket, payload: &mut ConsumeScannerPayload, + blacklisted_accounts: &HashSet, ) -> ProcessingDecision { // If end of the slot, return should process (quick loop after reached end of slot) if payload.reached_end_of_slot { @@ -895,16 +912,18 @@ impl BankingStage { // Before sanitization, let's quickly check the static keys (performance optimization) let message = &packet.transaction().get_message().message; let static_keys = message.static_account_keys(); - for key in static_keys.iter().enumerate().filter_map(|(idx, key)| { - if message.is_maybe_writable(idx) { - Some(key) - } else { - None - } - }) { - if payload.write_accounts.contains(key) { + for (idx, key) in static_keys.iter().enumerate() { + if message.is_maybe_writable(idx) && payload.write_accounts.contains(key) { return ProcessingDecision::Later; } + // throw away transactions that mention blacklisted accounts + if blacklisted_accounts.contains(key) { + payload + .buffered_packet_batches + .message_hash_to_transaction + .remove(packet.message_hash()); + return ProcessingDecision::Never; + } } // Try to deserialize the packet @@ -1015,6 +1034,8 @@ impl BankingStage { connection_cache: &ConnectionCache, tracer_packet_stats: &mut TracerPacketStats, bank_forks: &Arc>, + blacklisted_accounts: &HashSet, + bundle_account_locker: &BundleAccountLocker, ) { let ((metrics_action, decision), make_decision_time) = measure!( { @@ -1073,7 +1094,9 @@ impl BankingStage { recorder, qos_service, slot_metrics_tracker, - log_messages_bytes_limit + log_messages_bytes_limit, + blacklisted_accounts, + bundle_account_locker ), "consume_buffered_packets", ); @@ -1233,6 +1256,8 @@ impl BankingStage { log_messages_bytes_limit: Option, connection_cache: Arc, bank_forks: &Arc>, + blacklisted_accounts: HashSet, + bundle_account_locker: BundleAccountLocker, ) { let recorder = poh_recorder.read().unwrap().recorder(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); @@ -1268,6 +1293,8 @@ impl BankingStage { &connection_cache, &mut tracer_packet_stats, bank_forks, + &blacklisted_accounts, + &bundle_account_locker, ), "process_buffered_packets", ); @@ -1339,8 +1366,10 @@ impl BankingStage { let (hash, hash_time) = measure!(hash_transactions(&transactions), "hash"); record_transactions_timings.hash_us = hash_time.as_us(); - let (res, poh_record_time) = - measure!(recorder.record(bank_slot, hash, transactions), "hash"); + let (res, poh_record_time) = measure!( + recorder.record(bank_slot, vec![(hash, transactions)]), + "hash" + ); record_transactions_timings.poh_record_us = poh_record_time.as_us(); match res { @@ -1394,7 +1423,7 @@ impl BankingStage { }; let pre_token_balances = if transaction_status_sender.is_some() { - collect_token_balances(bank, batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals, None) } else { vec![] }; @@ -1539,7 +1568,7 @@ impl BankingStage { let txs = batch.sanitized_transactions().to_vec(); let post_balances = bank.collect_balances(batch); let post_token_balances = - collect_token_balances(bank, batch, &mut mint_decimals); + collect_token_balances(bank, batch, &mut mint_decimals, None); let mut transaction_index = starting_transaction_index.unwrap_or_default(); let batch_transaction_indexes: Vec<_> = tx_results .execution_results @@ -1620,13 +1649,18 @@ impl BankingStage { gossip_vote_sender: &ReplayVoteSender, qos_service: &QosService, log_messages_bytes_limit: Option, + bundle_account_locker: &BundleAccountLocker, ) -> ProcessTransactionBatchOutput { let mut cost_model_time = Measure::start("cost_model"); let transaction_costs = qos_service.compute_transaction_costs(txs.iter()); - let (transactions_qos_results, num_included) = - qos_service.select_transactions_per_cost(txs.iter(), transaction_costs.iter(), bank); + let (transactions_qos_results, num_included) = qos_service.select_transactions_per_cost( + txs.iter(), + transaction_costs.iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); let cost_model_throttled_transactions_count = txs.len().saturating_sub(num_included); @@ -1640,9 +1674,20 @@ impl BankingStage { // Only lock accounts for those transactions are selected for the block; // Once accounts are locked, other threads cannot encode transactions that will modify the - // same account state + // same account state. let mut lock_time = Measure::start("lock_time"); - let batch = bank.prepare_sanitized_batch_with_results(txs, transactions_qos_results.iter()); + + let batch = { + // BundleStage locks ALL accounts in ALL transactions in a bundle to avoid race + // conditions with BankingStage + let account_locks = bundle_account_locker.account_locks(); + bank.prepare_sanitized_batch_with_results( + txs, + transactions_qos_results.iter(), + &account_locks.read_locks(), + &account_locks.write_locks(), + ) + }; lock_time.stop(); // retryable_txs includes AccountInUse, WouldExceedMaxBlockCostLimit @@ -1813,6 +1858,7 @@ impl BankingStage { gossip_vote_sender: &ReplayVoteSender, qos_service: &QosService, log_messages_bytes_limit: Option, + bundle_account_locker: &BundleAccountLocker, ) -> ProcessTransactionsSummary { let mut chunk_start = 0; let mut all_retryable_tx_indexes = vec![]; @@ -1845,6 +1891,7 @@ impl BankingStage { gossip_vote_sender, qos_service, log_messages_bytes_limit, + bundle_account_locker, ); let ProcessTransactionBatchOutput { @@ -2025,6 +2072,7 @@ impl BankingStage { qos_service: &'a QosService, slot_metrics_tracker: &'a mut LeaderSlotMetricsTracker, log_messages_bytes_limit: Option, + bundle_account_locker: &BundleAccountLocker, ) -> ProcessTransactionsSummary { // Process transactions let (mut process_transactions_summary, process_transactions_time) = measure!( @@ -2037,6 +2085,7 @@ impl BankingStage { gossip_vote_sender, qos_service, log_messages_bytes_limit, + bundle_account_locker ), "process_transaction_time", ); @@ -2309,7 +2358,7 @@ mod tests { super::*, crossbeam_channel::{unbounded, Receiver}, solana_address_lookup_table_program::state::{AddressLookupTable, LookupTableMeta}, - solana_entry::entry::{next_entry, next_versioned_entry, Entry, EntrySlice}, + solana_entry::entry::{next_entry, next_versioned_entry, EntrySlice}, solana_gossip::cluster_info::Node, solana_ledger::{ blockstore::{entries_to_test_shreds, Blockstore}, @@ -2378,6 +2427,8 @@ mod tests { let cluster_info = Arc::new(cluster_info); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let banking_stage = BankingStage::new( &cluster_info, &poh_recorder, @@ -2390,6 +2441,8 @@ mod tests { None, Arc::new(ConnectionCache::default()), bank_forks, + HashSet::default(), + bundle_locker, ); drop(verified_sender); drop(gossip_verified_vote_sender); @@ -2404,6 +2457,7 @@ mod tests { #[test] fn test_banking_stage_tick() { solana_logger::setup(); + let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(2); @@ -2432,6 +2486,8 @@ mod tests { let (verified_gossip_vote_sender, verified_gossip_vote_receiver) = unbounded(); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let banking_stage = BankingStage::new( &cluster_info, &poh_recorder, @@ -2444,6 +2500,8 @@ mod tests { None, Arc::new(ConnectionCache::default()), bank_forks, + HashSet::default(), + bundle_locker, ); trace!("sending bank"); drop(verified_sender); @@ -2456,7 +2514,12 @@ mod tests { trace!("getting entries"); let entries: Vec<_> = entry_receiver .iter() - .map(|(_bank, (entry, _tick_height))| entry) + .flat_map( + |WorkingBankEntry { + bank: _, + entries_ticks, + }| entries_ticks.into_iter().map(|e| e.0), + ) .collect(); trace!("done"); assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize); @@ -2481,6 +2544,7 @@ mod tests { #[test] fn test_banking_stage_entries_only() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -2511,6 +2575,7 @@ mod tests { let cluster_info = Arc::new(cluster_info); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); let banking_stage = BankingStage::new( &cluster_info, &poh_recorder, @@ -2523,6 +2588,8 @@ mod tests { None, Arc::new(ConnectionCache::default()), bank_forks, + HashSet::default(), + bundle_locker, ); // fund another account so we can send 2 good transactions in a single batch. @@ -2574,9 +2641,14 @@ mod tests { bank.process_transaction(&fund_tx).unwrap(); //receive entries + ticks loop { - let entries: Vec = entry_receiver + let entries: Vec<_> = entry_receiver .iter() - .map(|(_bank, (entry, _tick_height))| entry) + .flat_map( + |WorkingBankEntry { + bank: _, + entries_ticks, + }| entries_ticks.into_iter().map(|e| e.0), + ) .collect(); assert!(entries.verify(&blockhash)); @@ -2607,6 +2679,7 @@ mod tests { #[test] fn test_banking_stage_entryfication() { solana_logger::setup(); + // In this attack we'll demonstrate that a verifier can interpret the ledger // differently if either the server doesn't signal the ledger to add an // Entry OR if the verifier tries to parallelize across multiple Entries. @@ -2666,6 +2739,9 @@ mod tests { create_test_recorder(&bank, &blockstore, Some(poh_config), None); let cluster_info = new_test_cluster_info(Node::new_localhost().info); let cluster_info = Arc::new(cluster_info); + + let bundle_locker = BundleAccountLocker::default(); + let _banking_stage = BankingStage::new_num_threads( &cluster_info, &poh_recorder, @@ -2679,6 +2755,8 @@ mod tests { None, Arc::new(ConnectionCache::default()), bank_forks, + HashSet::default(), + bundle_locker, ); // wait for banking_stage to eat the packets @@ -2697,7 +2775,12 @@ mod tests { // check that the balance is what we expect. let entries: Vec<_> = entry_receiver .iter() - .map(|(_bank, (entry, _tick_height))| entry) + .flat_map( + |WorkingBankEntry { + bank: _, + entries_ticks, + }| entries_ticks.into_iter().map(|e| e.0), + ) .collect(); let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); @@ -2765,7 +2848,12 @@ mod tests { ]; let _ = BankingStage::record_transactions(bank.slot(), txs.clone(), &recorder); - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 1); + let entry = entries_ticks.get(0).unwrap().0.clone(); assert_eq!(entry.transactions, txs); // Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions), @@ -2797,7 +2885,7 @@ mod tests { Ok(()), Err(TransactionError::BlockhashNotFound), Ok(()), - Ok(()) + Ok(()), ] ); @@ -2933,6 +3021,7 @@ mod tests { #[test] fn test_bank_process_and_record_transactions() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -2972,6 +3061,8 @@ mod tests { poh_recorder.write().unwrap().set_bank(&bank, false); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let process_transactions_batch_output = BankingStage::process_and_record_transactions( &bank, &transactions, @@ -2981,6 +3072,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); let ExecuteAndCommitTransactionsOutput { @@ -3003,7 +3095,13 @@ mod tests { let mut done = false; // read entries until I find mine, might be ticks... - while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() { + while let Ok(WorkingBankEntry { + bank: _, + entries_ticks, + }) = entry_receiver.recv() + { + assert_eq!(entries_ticks.len(), 1); + let entry = entries_ticks.get(0).unwrap().0.clone(); if !entry.is_tick() { trace!("got entry"); assert_eq!(entry.transactions.len(), transactions.len()); @@ -3025,6 +3123,8 @@ mod tests { genesis_config.hash(), )]); + let bundle_locker = BundleAccountLocker::default(); + let process_transactions_batch_output = BankingStage::process_and_record_transactions( &bank, &transactions, @@ -3034,6 +3134,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); let ExecuteAndCommitTransactionsOutput { @@ -3069,6 +3170,7 @@ mod tests { #[test] fn test_bank_process_and_record_transactions_all_unexecuted() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -3109,6 +3211,8 @@ mod tests { poh_recorder.write().unwrap().set_bank(&bank, false); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let process_transactions_batch_output = BankingStage::process_and_record_transactions( &bank, &transactions, @@ -3118,6 +3222,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); let ExecuteAndCommitTransactionsOutput { @@ -3151,6 +3256,7 @@ mod tests { #[test] fn test_bank_process_and_record_transactions_cost_tracker() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -3201,6 +3307,8 @@ mod tests { genesis_config.hash(), )]); + let bundle_locker = BundleAccountLocker::default(); + let process_transactions_batch_output = BankingStage::process_and_record_transactions( &bank, &transactions, @@ -3210,6 +3318,7 @@ mod tests { &gossip_vote_sender, &qos_service, None, + &bundle_locker, ); let ExecuteAndCommitTransactionsOutput { @@ -3250,6 +3359,7 @@ mod tests { &gossip_vote_sender, &qos_service, None, + &bundle_locker, ); let ExecuteAndCommitTransactionsOutput { @@ -3299,6 +3409,7 @@ mod tests { #[test] fn test_bank_process_and_record_transactions_account_in_use() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -3337,6 +3448,7 @@ mod tests { let poh_simulator = simulate_poh(record_receiver, &poh_recorder); let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); let process_transactions_batch_output = BankingStage::process_and_record_transactions( &bank, @@ -3347,6 +3459,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); poh_recorder @@ -3463,6 +3576,7 @@ mod tests { #[test] fn test_process_transactions_returns_unprocessed_txs() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -3504,6 +3618,8 @@ mod tests { let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let process_transactions_summary = BankingStage::process_transactions( &bank, &Instant::now(), @@ -3513,6 +3629,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); let ProcessTransactionsSummary { @@ -3571,6 +3688,8 @@ mod tests { let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let process_transactions_summary = BankingStage::process_transactions( &bank, &Instant::now(), @@ -3580,6 +3699,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); poh_recorder @@ -3653,6 +3773,7 @@ mod tests { (1..transactions_count - 1).collect::>() ); } + #[test] fn test_process_transactions_account_in_use() { solana_logger::setup(); @@ -3673,7 +3794,7 @@ mod tests { &mint_keypair, &Pubkey::new_unique(), 1, - genesis_config.hash() + genesis_config.hash(), ); MAX_NUM_TRANSACTIONS_PER_BATCH ]; @@ -3715,6 +3836,7 @@ mod tests { #[test] fn test_write_persist_transaction_status() { solana_logger::setup(); + let GenesisConfigInfo { mut genesis_config, mint_keypair, @@ -3799,6 +3921,8 @@ mod tests { let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let _ = BankingStage::process_and_record_transactions( &bank, &transactions, @@ -3810,6 +3934,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); transaction_status_service.join().unwrap(); @@ -3876,6 +4001,7 @@ mod tests { #[test] fn test_write_persist_loaded_addresses() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, mint_keypair, @@ -3968,6 +4094,8 @@ mod tests { let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + let _ = BankingStage::process_and_record_transactions( &bank, &[sanitized_tx.clone()], @@ -3979,6 +4107,7 @@ mod tests { &gossip_vote_sender, &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), None, + &bundle_locker, ); transaction_status_service.join().unwrap(); @@ -4086,6 +4215,8 @@ mod tests { let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let bundle_locker = BundleAccountLocker::default(); + // When the working bank in poh_recorder is None, no packets should be processed assert!(!poh_recorder.read().unwrap().has_bank()); let max_tx_processing_ns = std::u128::MAX; @@ -4102,6 +4233,8 @@ mod tests { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), None, + &HashSet::default(), + &bundle_locker, ); assert_eq!(buffered_packet_batches.len(), num_conflicting_transactions); // When the working bank in poh_recorder is Some, all packets should be processed. @@ -4120,6 +4253,8 @@ mod tests { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), None, + &HashSet::default(), + &bundle_locker, ); assert!(buffered_packet_batches.is_empty()); poh_recorder @@ -4167,6 +4302,8 @@ mod tests { deserialized_packets.into_iter(), num_conflicting_transactions, ); + let bundle_locker = BundleAccountLocker::default(); + BankingStage::consume_buffered_packets( &Pubkey::default(), std::u128::MAX, @@ -4180,6 +4317,8 @@ mod tests { &QosService::new(Arc::new(RwLock::new(CostModel::default())), 1), &mut LeaderSlotMetricsTracker::new(0), None, + &HashSet::default(), + &bundle_locker, ); // Check everything is correct. All valid packets should be processed. @@ -4572,4 +4711,7 @@ mod tests { BankingStage::filter_processed_packets(retryable_indexes.iter(), f); assert_eq!(non_retryable_indexes, vec![(0, 1), (4, 5), (6, 8)]); } + + // TODO (LB): test that banking stage doesn't process packets that contain accounts + // in BundleAccountLocker } diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 3609569b7c..b966db1ce0 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -35,7 +35,7 @@ use { }, std::{ collections::{HashMap, HashSet}, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, RwLock, @@ -86,6 +86,7 @@ impl BroadcastStageType { blockstore: Arc, bank_forks: Arc>, shred_version: u16, + shred_receiver_address: Arc>>, ) -> BroadcastStage { match self { BroadcastStageType::Standard => BroadcastStage::new( @@ -97,6 +98,7 @@ impl BroadcastStageType { blockstore, bank_forks, StandardBroadcastRun::new(shred_version), + shred_receiver_address, ), BroadcastStageType::FailEntryVerification => BroadcastStage::new( @@ -108,6 +110,7 @@ impl BroadcastStageType { blockstore, bank_forks, FailEntryVerificationBroadcastRun::new(shred_version), + Arc::new(RwLock::new(None)), ), BroadcastStageType::BroadcastFakeShreds => BroadcastStage::new( @@ -119,6 +122,7 @@ impl BroadcastStageType { blockstore, bank_forks, BroadcastFakeShredsRun::new(0, shred_version), + Arc::new(RwLock::new(None)), ), BroadcastStageType::BroadcastDuplicates(config) => BroadcastStage::new( @@ -130,6 +134,7 @@ impl BroadcastStageType { blockstore, bank_forks, BroadcastDuplicatesRun::new(shred_version, config.clone()), + Arc::new(RwLock::new(None)), ), } } @@ -150,6 +155,7 @@ trait BroadcastRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + shred_receiver_address: &Arc>>, ) -> Result<()>; fn record(&mut self, receiver: &Mutex, blockstore: &Blockstore) -> Result<()>; } @@ -245,6 +251,7 @@ impl BroadcastStage { blockstore: Arc, bank_forks: Arc>, broadcast_stage_run: impl BroadcastRun + Send + 'static + Clone, + shred_receiver_address: Arc>>, ) -> Self { let (socket_sender, socket_receiver) = unbounded(); let (blockstore_sender, blockstore_receiver) = unbounded(); @@ -276,11 +283,17 @@ impl BroadcastStage { let mut bs_transmit = broadcast_stage_run.clone(); let cluster_info = cluster_info.clone(); let bank_forks = bank_forks.clone(); + let shred_receiver_address = shred_receiver_address.clone(); let t = Builder::new() .name("solBroadcastTx".to_string()) .spawn(move || loop { - let res = - bs_transmit.transmit(&socket_receiver, &cluster_info, &sock, &bank_forks); + let res = bs_transmit.transmit( + &socket_receiver, + &cluster_info, + &sock, + &bank_forks, + &shred_receiver_address, + ); let res = Self::handle_error(res, "solana-broadcaster-transmit"); if let Some(res) = res { return res; @@ -396,6 +409,7 @@ pub fn broadcast_shreds( cluster_info: &ClusterInfo, bank_forks: &RwLock, socket_addr_space: &SocketAddrSpace, + shred_receiver_address: &Option, ) -> Result<()> { let mut result = Ok(()); let mut shred_select = Measure::start("shred_select"); @@ -405,18 +419,23 @@ pub fn broadcast_shreds( }; let packets: Vec<_> = shreds .iter() - .group_by(|shred| shred.slot()) - .into_iter() - .flat_map(|(slot, shreds)| { - let cluster_nodes = - cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); - update_peer_stats(&cluster_nodes, last_datapoint_submit); - shreds.flat_map(move |shred| { - let node = cluster_nodes.get_broadcast_peer(&shred.id())?; - ContactInfo::is_valid_address(&node.tvu, socket_addr_space) - .then(|| (shred.payload(), node.tvu)) - }) - }) + .filter_map(|s| Some((s.payload(), (*shred_receiver_address)?))) + .chain( + shreds + .iter() + .group_by(|shred| shred.slot()) + .into_iter() + .flat_map(|(slot, shreds)| { + let cluster_nodes = + cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); + update_peer_stats(&cluster_nodes, last_datapoint_submit); + shreds.flat_map(move |shred| { + let node = cluster_nodes.get_broadcast_peer(&shred.id())?; + ContactInfo::is_valid_address(&node.tvu, socket_addr_space) + .then(|| (shred.payload(), node.tvu)) + }) + }), + ) .collect(); shred_select.stop(); transmit_stats.shred_select += shred_select.as_us(); @@ -617,6 +636,7 @@ pub mod test { blockstore.clone(), bank_forks, StandardBroadcastRun::new(0), + Arc::new(RwLock::new(None)), ); MockBroadcastStage { @@ -656,7 +676,10 @@ pub mod test { let ticks = create_ticks(max_tick_height - start_tick_height, 0, Hash::default()); for (i, tick) in ticks.into_iter().enumerate() { entry_sender - .send((bank.clone(), (tick, i as u64 + 1))) + .send(WorkingBankEntry { + bank: bank.clone(), + entries_ticks: vec![(tick, i as u64 + 1)], + }) .expect("Expect successful send to broadcast service"); } } diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index 29f901c3ad..c23efff3ca 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -266,6 +266,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + _shred_receiver_addr: &Arc>>, ) -> Result<()> { let (shreds, _) = receiver.lock().unwrap().recv()?; if shreds.is_empty() { diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index f035abf13b..c5c611a1eb 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -132,6 +132,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { cluster_info: &ClusterInfo, sock: &UdpSocket, _bank_forks: &RwLock, + _shred_receiver_addr: &Arc>>, ) -> Result<()> { for (data_shreds, batch_info) in receiver.lock().unwrap().iter() { let fake = batch_info.is_some(); diff --git a/core/src/broadcast_stage/broadcast_utils.rs b/core/src/broadcast_stage/broadcast_utils.rs index f9485d59a9..6150bf4fec 100644 --- a/core/src/broadcast_stage/broadcast_utils.rs +++ b/core/src/broadcast_stage/broadcast_utils.rs @@ -36,13 +36,22 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result 32 * ShredData::capacity(/*merkle_proof_size*/ None).unwrap() as u64; let timer = Duration::new(1, 0); let recv_start = Instant::now(); - let (mut bank, (entry, mut last_tick_height)) = receiver.recv_timeout(timer)?; - let mut entries = vec![entry]; + + let WorkingBankEntry { + mut bank, + entries_ticks, + } = receiver.recv_timeout(timer)?; + let mut last_tick_height = entries_ticks.iter().last().unwrap().1; + let mut entries: Vec = entries_ticks.into_iter().map(|(e, _)| e).collect(); + assert!(last_tick_height <= bank.max_tick_height()); // Drain channel while last_tick_height != bank.max_tick_height() { - let (try_bank, (entry, tick_height)) = match receiver.try_recv() { + let WorkingBankEntry { + bank: try_bank, + entries_ticks: new_entries_ticks, + } = match receiver.try_recv() { Ok(working_bank_entry) => working_bank_entry, Err(_) => break, }; @@ -53,8 +62,8 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result entries.clear(); bank = try_bank; } - last_tick_height = tick_height; - entries.push(entry); + last_tick_height = new_entries_ticks.iter().last().unwrap().1; + entries.extend(new_entries_ticks.into_iter().map(|(entry, _)| entry)); assert!(last_tick_height <= bank.max_tick_height()); } @@ -65,11 +74,13 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result while last_tick_height != bank.max_tick_height() && serialized_batch_byte_count < target_serialized_batch_byte_count { - let (try_bank, (entry, tick_height)) = - match receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) { - Ok(working_bank_entry) => working_bank_entry, - Err(_) => break, - }; + let WorkingBankEntry { + bank: try_bank, + entries_ticks: new_entries_ticks, + } = match receiver.recv_deadline(coalesce_start + ENTRY_COALESCE_DURATION) { + Ok(working_bank_entry) => working_bank_entry, + Err(_) => break, + }; // If the bank changed, that implies the previous slot was interrupted and we do not have to // broadcast its entries. if try_bank.slot() != bank.slot() { @@ -79,10 +90,12 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result bank = try_bank; coalesce_start = Instant::now(); } - last_tick_height = tick_height; - let entry_bytes = serialized_size(&entry)?; - serialized_batch_byte_count += entry_bytes; - entries.push(entry); + last_tick_height = new_entries_ticks.iter().last().unwrap().1; + + for (entry, _) in &new_entries_ticks { + serialized_batch_byte_count += serialized_size(entry)?; + } + entries.extend(new_entries_ticks.into_iter().map(|(entry, _)| entry)); assert!(last_tick_height <= bank.max_tick_height()); } let time_coalesced = coalesce_start.elapsed(); @@ -139,7 +152,11 @@ mod tests { .map(|i| { let entry = Entry::new(&last_hash, 1, vec![tx.clone()]); last_hash = entry.hash; - s.send((bank1.clone(), (entry.clone(), i))).unwrap(); + s.send(WorkingBankEntry { + bank: bank1.clone(), + entries_ticks: vec![(entry.clone(), i)], + }) + .unwrap(); entry }) .collect(); @@ -173,11 +190,18 @@ mod tests { last_hash = entry.hash; // Interrupt slot 1 right before the last tick if tick_height == expected_last_height { - s.send((bank2.clone(), (entry.clone(), tick_height))) - .unwrap(); + s.send(WorkingBankEntry { + bank: bank2.clone(), + entries_ticks: vec![(entry.clone(), tick_height)], + }) + .unwrap(); Some(entry) } else { - s.send((bank1.clone(), (entry, tick_height))).unwrap(); + s.send(WorkingBankEntry { + bank: bank1.clone(), + entries_ticks: vec![(entry, tick_height)], + }) + .unwrap(); None } }) diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index e7b899ab0f..61ea979402 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -3,7 +3,7 @@ use { crate::cluster_nodes::ClusterNodesCache, solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, solana_sdk::{hash::Hash, signature::Keypair}, - std::{thread::sleep, time::Duration}, + std::{net::SocketAddr, thread::sleep, time::Duration}, }; pub const NUM_BAD_SLOTS: u64 = 10; @@ -162,6 +162,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + shred_receiver_address: &Arc>>, ) -> Result<()> { let (shreds, _) = receiver.lock().unwrap().recv()?; broadcast_shreds( @@ -173,6 +174,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { cluster_info, bank_forks, cluster_info.socket_addr_space(), + &shred_receiver_address.read().unwrap(), ) } fn record(&mut self, receiver: &Mutex, blockstore: &Blockstore) -> Result<()> { diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index fceaa86d8b..d680bedf74 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -18,7 +18,7 @@ use { signature::Keypair, timing::{duration_as_us, AtomicInterval}, }, - std::{sync::RwLock, time::Duration}, + std::{net::SocketAddr, sync::RwLock, time::Duration}, }; #[derive(Clone)] @@ -200,10 +200,22 @@ impl StandardBroadcastRun { let brecv = Arc::new(Mutex::new(brecv)); //data - let _ = self.transmit(&srecv, cluster_info, sock, bank_forks); + let _ = self.transmit( + &srecv, + cluster_info, + sock, + bank_forks, + &Arc::new(RwLock::new(None)), + ); let _ = self.record(&brecv, blockstore); //coding - let _ = self.transmit(&srecv, cluster_info, sock, bank_forks); + let _ = self.transmit( + &srecv, + cluster_info, + sock, + bank_forks, + &Arc::new(RwLock::new(None)), + ); let _ = self.record(&brecv, blockstore); Ok(()) } @@ -402,6 +414,7 @@ impl StandardBroadcastRun { shreds: Arc>, broadcast_shred_batch_info: Option, bank_forks: &RwLock, + shred_receiver_addr: &Option, ) -> Result<()> { trace!("Broadcasting {:?} shreds", shreds.len()); let mut transmit_stats = TransmitShredsStats::default(); @@ -417,6 +430,7 @@ impl StandardBroadcastRun { cluster_info, bank_forks, cluster_info.socket_addr_space(), + shred_receiver_addr, )?; transmit_time.stop(); @@ -486,9 +500,17 @@ impl BroadcastRun for StandardBroadcastRun { cluster_info: &ClusterInfo, sock: &UdpSocket, bank_forks: &RwLock, + shred_receiver_address: &Arc>>, ) -> Result<()> { let (shreds, batch_info) = receiver.lock().unwrap().recv()?; - self.broadcast(sock, cluster_info, shreds, batch_info, bank_forks) + self.broadcast( + sock, + cluster_info, + shreds, + batch_info, + bank_forks, + &shred_receiver_address.read().unwrap(), + ) } fn record(&mut self, receiver: &Mutex, blockstore: &Blockstore) -> Result<()> { let (shreds, slot_start_ts) = receiver.lock().unwrap().recv()?; diff --git a/core/src/bundle_account_locker.rs b/core/src/bundle_account_locker.rs new file mode 100644 index 0000000000..166ca5dd4c --- /dev/null +++ b/core/src/bundle_account_locker.rs @@ -0,0 +1,333 @@ +///! Handles pre-locking bundle accounts so that accounts bundles touch can be reserved ahead +/// of time for execution. Also, ensures that ALL accounts mentioned across a bundle are locked +/// to avoid race conditions between BundleStage and BankingStage. +/// +/// For instance, imagine a bundle with three transactions and the set of accounts for each transaction +/// is: {{A, B}, {B, C}, {C, D}}. We need to lock A, B, and C even though only one is executed at a time. +/// Imagine BundleStage is in the middle of processing {C, D} and we didn't have a lock on accounts {A, B, C}. +/// In this situation, there's a chance that BankingStage can process a transaction containing A or B +/// and commit the results before the bundle completes. By the time the bundle commits the new account +/// state for {A, B, C}, A and B would be incorrect and the entries containing the bundle would be +/// replayed improperly and that leader would have produced an invalid block. +use { + solana_runtime::bank::Bank, + solana_sdk::{ + bundle::sanitized::SanitizedBundle, pubkey::Pubkey, transaction::TransactionAccountLocks, + }, + std::collections::{hash_map::Entry, HashMap, HashSet}, + std::sync::{Arc, Mutex, MutexGuard}, +}; + +#[derive(Debug)] +pub enum BundleAccountLockerError { + LockingError, +} + +pub type BundleAccountLockerResult = Result; + +pub struct LockedBundle<'a, 'b> { + bundle_account_locker: &'a BundleAccountLocker, + sanitized_bundle: &'b SanitizedBundle, + bank: Arc, +} + +impl<'a, 'b> LockedBundle<'a, 'b> { + pub fn new( + bundle_account_locker: &'a BundleAccountLocker, + sanitized_bundle: &'b SanitizedBundle, + bank: &Arc, + ) -> Self { + Self { + bundle_account_locker, + sanitized_bundle, + bank: bank.clone(), + } + } + + pub fn sanitized_bundle(&self) -> &SanitizedBundle { + self.sanitized_bundle + } +} + +// Automatically unlock bundle accounts when destructed +impl<'a, 'b> Drop for LockedBundle<'a, 'b> { + fn drop(&mut self) { + let _ = self + .bundle_account_locker + .unlock_bundle_accounts(self.sanitized_bundle, &self.bank); + } +} + +#[derive(Default, Clone)] +pub struct BundleAccountLocks { + read_locks: HashMap, + write_locks: HashMap, +} + +impl BundleAccountLocks { + pub fn read_locks(&self) -> HashSet { + self.read_locks.keys().cloned().collect() + } + + pub fn write_locks(&self) -> HashSet { + self.write_locks.keys().cloned().collect() + } + + pub fn lock_accounts( + &mut self, + read_locks: HashMap, + write_locks: HashMap, + ) { + for (acc, count) in read_locks { + *self.read_locks.entry(acc).or_insert(0) += count; + } + for (acc, count) in write_locks { + *self.write_locks.entry(acc).or_insert(0) += count; + } + } + + pub fn unlock_accounts( + &mut self, + read_locks: HashMap, + write_locks: HashMap, + ) { + for (acc, count) in read_locks { + if let Entry::Occupied(mut entry) = self.read_locks.entry(acc) { + let val = entry.get_mut(); + *val = val.saturating_sub(count); + if entry.get() == &0 { + let _ = entry.remove(); + } + } else { + warn!("error unlocking read-locked account, account: {:?}", acc); + } + } + for (acc, count) in write_locks { + if let Entry::Occupied(mut entry) = self.write_locks.entry(acc) { + let val = entry.get_mut(); + *val = val.saturating_sub(count); + if entry.get() == &0 { + let _ = entry.remove(); + } + } else { + warn!("error unlocking write-locked account, account: {:?}", acc); + } + } + } +} + +#[derive(Clone, Default)] +pub struct BundleAccountLocker { + account_locks: Arc>, +} + +impl BundleAccountLocker { + /// used in BankingStage during TransactionBatch construction to ensure that BankingStage + /// doesn't lock anything currently locked in the BundleAccountLocker + pub fn read_locks(&self) -> HashSet { + self.account_locks.lock().unwrap().read_locks() + } + + /// used in BankingStage during TransactionBatch construction to ensure that BankingStage + /// doesn't lock anything currently locked in the BundleAccountLocker + pub fn write_locks(&self) -> HashSet { + self.account_locks.lock().unwrap().write_locks() + } + + /// used in BankingStage during TransactionBatch construction to ensure that BankingStage + /// doesn't lock anything currently locked in the BundleAccountLocker + pub fn account_locks(&self) -> MutexGuard { + self.account_locks.lock().unwrap() + } + + /// Prepares a locked bundle and returns a LockedBundle containing locked accounts. + /// When a LockedBundle is dropped, the accounts are automatically unlocked + pub fn prepare_locked_bundle<'a, 'b>( + &'a self, + sanitized_bundle: &'b SanitizedBundle, + bank: &Arc, + ) -> BundleAccountLockerResult> { + let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?; + + self.account_locks + .lock() + .unwrap() + .lock_accounts(read_locks, write_locks); + Ok(LockedBundle::new(self, sanitized_bundle, bank)) + } + + /// Unlocks bundle accounts. Note that LockedBundle::drop will auto-drop the bundle account locks + fn unlock_bundle_accounts( + &self, + sanitized_bundle: &SanitizedBundle, + bank: &Bank, + ) -> BundleAccountLockerResult<()> { + let (read_locks, write_locks) = Self::get_read_write_locks(sanitized_bundle, bank)?; + + self.account_locks + .lock() + .unwrap() + .unlock_accounts(read_locks, write_locks); + Ok(()) + } + + /// Returns the read and write locks for this bundle + /// Each lock type contains a HashMap which maps Pubkey to number of locks held + fn get_read_write_locks( + bundle: &SanitizedBundle, + bank: &Bank, + ) -> BundleAccountLockerResult<(HashMap, HashMap)> { + let transaction_locks: Vec = bundle + .transactions + .iter() + .filter_map(|tx| { + tx.get_account_locks(bank.get_transaction_account_lock_limit()) + .ok() + }) + .collect(); + + if transaction_locks.len() != bundle.transactions.len() { + return Err(BundleAccountLockerError::LockingError); + } + + let bundle_read_locks = transaction_locks + .iter() + .flat_map(|tx| tx.readonly.iter().map(|a| **a)); + let bundle_read_locks = + bundle_read_locks + .into_iter() + .fold(HashMap::new(), |mut map, acc| { + *map.entry(acc).or_insert(0) += 1; + map + }); + + let bundle_write_locks = transaction_locks + .iter() + .flat_map(|tx| tx.writable.iter().map(|a| **a)); + let bundle_write_locks = + bundle_write_locks + .into_iter() + .fold(HashMap::new(), |mut map, acc| { + *map.entry(acc).or_insert(0) += 1; + map + }); + + Ok((bundle_read_locks, bundle_write_locks)) + } +} + +#[cfg(test)] +mod tests { + use { + crate::{ + bundle_account_locker::BundleAccountLocker, bundle_sanitizer::get_sanitized_bundle, + packet_bundle::PacketBundle, + }, + solana_ledger::genesis_utils::create_genesis_config, + solana_perf::packet::PacketBatch, + solana_runtime::{ + bank::Bank, genesis_utils::GenesisConfigInfo, + transaction_error_metrics::TransactionErrorMetrics, + }, + solana_sdk::{ + packet::Packet, signature::Signer, signer::keypair::Keypair, system_program, + system_transaction::transfer, transaction::VersionedTransaction, + }, + std::{collections::HashSet, sync::Arc}, + }; + + #[test] + fn test_simple_lock_bundles() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let bundle_account_locker = BundleAccountLocker::default(); + + let kp0 = Keypair::new(); + let kp1 = Keypair::new(); + + let tx0 = VersionedTransaction::from(transfer( + &mint_keypair, + &kp0.pubkey(), + 1, + genesis_config.hash(), + )); + let tx1 = VersionedTransaction::from(transfer( + &mint_keypair, + &kp1.pubkey(), + 1, + genesis_config.hash(), + )); + + let packet_bundle0 = PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, &tx0).unwrap()]), + bundle_id: tx0.signatures[0].to_string(), + }; + let packet_bundle1 = PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, &tx1).unwrap()]), + bundle_id: tx1.signatures[0].to_string(), + }; + + let mut transaction_errors = TransactionErrorMetrics::default(); + + let sanitized_bundle0 = get_sanitized_bundle( + &packet_bundle0, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors, + ) + .expect("sanitize bundle 0"); + let sanitized_bundle1 = get_sanitized_bundle( + &packet_bundle1, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors, + ) + .expect("sanitize bundle 1"); + + let locked_bundle0 = bundle_account_locker + .prepare_locked_bundle(&sanitized_bundle0, &bank) + .unwrap(); + + assert_eq!( + bundle_account_locker.write_locks(), + HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey()]) + ); + assert_eq!( + bundle_account_locker.read_locks(), + HashSet::from_iter([system_program::id()]) + ); + + let locked_bundle1 = bundle_account_locker + .prepare_locked_bundle(&sanitized_bundle1, &bank) + .unwrap(); + assert_eq!( + bundle_account_locker.write_locks(), + HashSet::from_iter([mint_keypair.pubkey(), kp0.pubkey(), kp1.pubkey()]) + ); + assert_eq!( + bundle_account_locker.read_locks(), + HashSet::from_iter([system_program::id()]) + ); + + drop(locked_bundle0); + assert_eq!( + bundle_account_locker.write_locks(), + HashSet::from_iter([mint_keypair.pubkey(), kp1.pubkey()]) + ); + assert_eq!( + bundle_account_locker.read_locks(), + HashSet::from_iter([system_program::id()]) + ); + + drop(locked_bundle1); + assert!(bundle_account_locker.write_locks().is_empty()); + assert!(bundle_account_locker.read_locks().is_empty()); + } +} diff --git a/core/src/bundle_sanitizer.rs b/core/src/bundle_sanitizer.rs new file mode 100644 index 0000000000..c561146a34 --- /dev/null +++ b/core/src/bundle_sanitizer.rs @@ -0,0 +1,617 @@ +use crate::unprocessed_packet_batches::ImmutableDeserializedPacket; +///! Turns packets into SanitizedTransactions and ensure they pass sanity checks +use { + crate::packet_bundle::PacketBundle, + crate::unprocessed_packet_batches::deserialize_packets, + solana_perf::sigverify::verify_packet, + solana_runtime::{bank::Bank, transaction_error_metrics::TransactionErrorMetrics}, + solana_sdk::{ + bundle::sanitized::SanitizedBundle, + clock::MAX_PROCESSING_AGE, + feature_set::FeatureSet, + pubkey::Pubkey, + signature::Signature, + transaction::{AddressLoader, SanitizedTransaction}, + }, + std::{ + collections::{hash_map::RandomState, HashSet}, + iter::repeat, + sync::Arc, + }, + thiserror::Error, +}; + +pub const MAX_PACKETS_PER_BUNDLE: usize = 5; + +#[derive(Error, Debug, PartialEq, Eq, Clone)] +pub enum BundleSanitizerError { + #[error("Bank is in vote-only mode")] + VoteOnlyMode, + #[error("Bundle packet batch failed pre-check")] + FailedPacketBatchPreCheck, + #[error("Bundle mentions blacklisted account")] + BlacklistedAccount, + #[error("Bundle contains a transaction that failed to serialize")] + FailedToSerializeTransaction, + #[error("Bundle contains a duplicate transaction")] + DuplicateTransaction, + #[error("Bundle failed check_transactions")] + FailedCheckTransactions, +} + +pub type BundleSanitizationResult = Result; + +/// An invalid bundle contains one of the following: +/// No packets. +/// Too many packets. +/// Packets marked for discard (not sure why someone would do this) +/// One of the packets fails signature verification. +/// Mentions an account in consensus or blacklisted accounts. +/// Contains a packet that failed to serialize to a transaction. +/// Contains duplicate transactions within the same bundle. +/// Contains a transaction that was already processed or one with an invalid blockhash. +/// NOTE: bundles need to be sanitized for a given bank. For instance, a bundle sanitized +/// on bank n-1 will be valid for all of bank n-1, and may or may not be valid for bank n +pub fn get_sanitized_bundle( + packet_bundle: &PacketBundle, + bank: &Arc, + consensus_accounts_cache: &HashSet, + blacklisted_accounts: &HashSet, + transaction_error_metrics: &mut TransactionErrorMetrics, +) -> BundleSanitizationResult { + if bank.vote_only_bank() { + return Err(BundleSanitizerError::VoteOnlyMode); + } + + if packet_bundle.batch.is_empty() + || packet_bundle.batch.len() > MAX_PACKETS_PER_BUNDLE + || packet_bundle.batch.iter().any(|p| p.meta.discard()) + || packet_bundle + .batch + .iter() + .any(|p| !verify_packet(&mut p.clone(), false)) + { + return Err(BundleSanitizerError::FailedPacketBatchPreCheck); + } + + let packet_indexes = (0..packet_bundle.batch.len()).collect::>(); + let deserialized_packets = deserialize_packets(&packet_bundle.batch, &packet_indexes); + let transactions: Vec = deserialized_packets + .filter_map(|p| { + let immutable_packet = p.immutable_section().clone(); + transaction_from_deserialized_packet( + &immutable_packet, + &bank.feature_set, + bank.as_ref(), + ) + }) + .collect(); + + let unique_signatures: HashSet<&Signature, RandomState> = + HashSet::from_iter(transactions.iter().map(|tx| tx.signature())); + let contains_blacklisted_account = transactions.iter().any(|tx| { + let accounts = tx.message().account_keys(); + accounts + .iter() + .any(|acc| blacklisted_accounts.contains(acc) || consensus_accounts_cache.contains(acc)) + }); + + if contains_blacklisted_account { + return Err(BundleSanitizerError::BlacklistedAccount); + } + + if transactions.is_empty() || packet_bundle.batch.len() != transactions.len() { + return Err(BundleSanitizerError::FailedToSerializeTransaction); + } + + if unique_signatures.len() != transactions.len() { + return Err(BundleSanitizerError::DuplicateTransaction); + } + + // assume everything locks okay to check for already-processed transaction or expired/invalid blockhash + let lock_results: Vec<_> = repeat(Ok(())).take(transactions.len()).collect(); + let check_results = bank.check_transactions( + &transactions, + &lock_results, + MAX_PROCESSING_AGE, + transaction_error_metrics, + ); + if check_results.iter().any(|r| r.0.is_err()) { + return Err(BundleSanitizerError::FailedCheckTransactions); + } + + Ok(SanitizedBundle { + transactions, + bundle_id: packet_bundle.bundle_id.clone(), + }) +} + +// This function deserializes packets into transactions, computes the blake3 hash of transaction +// messages, and verifies secp256k1 instructions. A list of sanitized transactions are returned +// with their packet indexes. +// NOTES on tx v2: +// - tx v2 can only load addresses set in previous slots +// - tx v2 can't reorg indices in a lookup table +// - tx v2 transaction loading fails if it tries to access an invalid index (either doesn't exist +// or exists but was set in the current slot +#[allow(clippy::needless_collect)] +fn transaction_from_deserialized_packet( + deserialized_packet: &ImmutableDeserializedPacket, + feature_set: &Arc, + address_loader: impl AddressLoader, +) -> Option { + let tx = SanitizedTransaction::try_new( + deserialized_packet.transaction().clone(), + *deserialized_packet.message_hash(), + deserialized_packet.is_simple_vote(), + address_loader, + ) + .ok()?; + tx.verify_precompiles(feature_set).ok()?; + Some(tx) +} + +#[cfg(test)] +mod tests { + use { + crate::{ + bundle_sanitizer::{get_sanitized_bundle, MAX_PACKETS_PER_BUNDLE}, + packet_bundle::PacketBundle, + tip_manager::{TipDistributionAccountConfig, TipManager, TipManagerConfig}, + }, + solana_address_lookup_table_program::instruction::create_lookup_table, + solana_ledger::genesis_utils::create_genesis_config, + solana_perf::packet::PacketBatch, + solana_runtime::{ + bank::Bank, genesis_utils::GenesisConfigInfo, + transaction_error_metrics::TransactionErrorMetrics, + }, + solana_sdk::{ + bundle::sanitized::derive_bundle_id, + hash::Hash, + instruction::Instruction, + packet::Packet, + pubkey::Pubkey, + signature::{Keypair, Signer}, + system_transaction::transfer, + transaction::{Transaction, VersionedTransaction}, + }, + std::{collections::HashSet, sync::Arc}, + }; + + #[test] + fn test_simple_get_sanitized_bundle() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp.pubkey(), + 1, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + let tx_signature = tx.signatures[0]; + let bundle_id = derive_bundle_id(&[tx]); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id, + }; + + let mut transaction_errors = TransactionErrorMetrics::default(); + let sanitized_bundle = get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors, + ) + .unwrap(); + assert_eq!(sanitized_bundle.transactions.len(), 1); + assert_eq!(sanitized_bundle.transactions[0].signature(), &tx_signature); + } + + #[test] + fn test_fail_to_sanitize_consensus_account() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp.pubkey(), + 1, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle_id = derive_bundle_id(&[tx]); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id, + }; + + let consensus_accounts_cache = HashSet::from([kp.pubkey()]); + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &consensus_accounts_cache, + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_fail_to_sanitize_duplicate_transaction() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp.pubkey(), + 1, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle_id = derive_bundle_id(&[tx]); + + // bundle with a duplicate transaction + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet.clone(), packet]), + bundle_id, + }; + + // fails to pop because bundle it locks the same transaction twice + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_fails_to_sanitize_bad_blockhash() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx = + VersionedTransaction::from(transfer(&mint_keypair, &kp.pubkey(), 1, Hash::default())); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle_id = derive_bundle_id(&[tx]); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet.clone(), packet]), + bundle_id, + }; + + // fails to pop because bundle has bad blockhash + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_fails_to_sanitize_already_processed() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp.pubkey(), + 1, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle_id = derive_bundle_id(&[tx]); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet.clone()]), + bundle_id: bundle_id.clone(), + }; + + let mut transaction_errors = TransactionErrorMetrics::default(); + let sanitized_bundle = get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors, + ) + .unwrap(); + + let results = bank.process_entry_transactions( + sanitized_bundle + .transactions + .into_iter() + .map(|tx| tx.to_versioned_transaction()) + .collect(), + ); + assert_eq!(results.len(), 1); + assert_eq!(results[0], Ok(())); + + // try to process the same one again shall fail + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id, + }; + + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_fails_to_sanitize_bundle_tip_program() { + solana_logger::setup(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let tip_manager = TipManager::new(TipManagerConfig { + tip_payment_program_id: Pubkey::new_unique(), + tip_distribution_program_id: Pubkey::new_unique(), + tip_distribution_account_config: TipDistributionAccountConfig { + merkle_root_upload_authority: Pubkey::new_unique(), + vote_account: Pubkey::new_unique(), + commission_bps: 0, + }, + }); + + let kp = Keypair::new(); + let tx = VersionedTransaction::from(Transaction::new_signed_with_payer( + &[Instruction::new_with_bytes( + tip_manager.tip_payment_program_id(), + &[0], + vec![], + )], + Some(&kp.pubkey()), + &[&kp], + genesis_config.hash(), + )); + tx.sanitize(false).unwrap(); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle_id = derive_bundle_id(&[tx]); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id, + }; + + // fails to pop because bundle mentions tip program + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::from_iter([tip_manager.tip_payment_program_id()]), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_txv2_sanitized_bundle_ok() { + solana_logger::setup(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + let tx = VersionedTransaction::from(Transaction::new_signed_with_payer( + &[create_lookup_table(kp.pubkey(), kp.pubkey(), bank.slot()).0], + Some(&kp.pubkey()), + &[&kp], + genesis_config.hash(), + )); + tx.sanitize(false).unwrap(); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle_id = derive_bundle_id(&[tx]); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id, + }; + + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_ok()); + } + + #[test] + fn test_fails_to_sanitize_empty_bundle() { + solana_logger::setup(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![]), + bundle_id: String::default(), + }; + // fails to pop because empty bundle + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_fails_to_sanitize_too_many_packets() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let txs = (0..MAX_PACKETS_PER_BUNDLE + 1) + .map(|i| { + VersionedTransaction::from(transfer( + &mint_keypair, + &kp.pubkey(), + i as u64, + genesis_config.hash(), + )) + }) + .collect::>(); + let packets = txs.iter().map(|tx| Packet::from_data(None, tx).unwrap()); + let packet_bundle = PacketBundle { + batch: PacketBatch::new(packets.collect()), + bundle_id: derive_bundle_id(&txs), + }; + // fails to pop because too many packets in a bundle + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_fails_to_sanitize_discarded() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp.pubkey(), + 1, + genesis_config.hash(), + )); + let mut packet = Packet::from_data(None, &tx).unwrap(); + packet.meta.set_discard(true); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id: derive_bundle_id(&[tx]), + }; + + // fails to pop because one of the packets is marked as discard + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } + + #[test] + fn test_fails_to_sanitize_bad_sigverify() { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(2); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + + let kp = Keypair::new(); + + let mut tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp.pubkey(), + 1, + genesis_config.hash(), + )); + + let _ = tx.signatures.pop(); + + let bad_kp = Keypair::new(); + let serialized = tx.message.serialize(); + let bad_sig = bad_kp.sign_message(&serialized); + tx.signatures.push(bad_sig); + + let packet = Packet::from_data(None, &tx).unwrap(); + + let packet_bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id: derive_bundle_id(&[tx]), + }; + let mut transaction_errors = TransactionErrorMetrics::default(); + assert!(get_sanitized_bundle( + &packet_bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + &mut transaction_errors + ) + .is_err()); + } +} diff --git a/core/src/bundle_stage.rs b/core/src/bundle_stage.rs new file mode 100644 index 0000000000..3b709984bc --- /dev/null +++ b/core/src/bundle_stage.rs @@ -0,0 +1,2145 @@ +//! The `banking_stage` processes Transaction messages. It is intended to be used +//! to contruct a software pipeline. The stage uses all available CPU cores and +//! can do its processing in parallel with signature verification on the GPU. +use { + crate::{ + banking_stage::{BatchedTransactionDetails, CommitTransactionDetails}, + bundle_account_locker::{BundleAccountLocker, BundleAccountLockerResult, LockedBundle}, + bundle_sanitizer::{get_sanitized_bundle, BundleSanitizerError}, + bundle_stage_leader_stats::{BundleStageLeaderSlotTrackingMetrics, BundleStageLeaderStats}, + consensus_cache_updater::ConsensusCacheUpdater, + leader_slot_banking_stage_timing_metrics::RecordTransactionsTimings, + packet_bundle::PacketBundle, + proxy::block_engine_stage::BlockBuilderFeeInfo, + qos_service::QosService, + tip_manager::TipManager, + }, + crossbeam_channel::{Receiver, RecvTimeoutError}, + solana_entry::entry::hash_transactions, + solana_gossip::cluster_info::ClusterInfo, + solana_ledger::{ + blockstore_processor::TransactionStatusSender, token_balances::collect_token_balances, + }, + solana_measure::measure, + solana_poh::poh_recorder::{ + BankStart, PohRecorder, + PohRecorderError::{self}, + TransactionRecorder, + }, + solana_program_runtime::timings::ExecuteTimings, + solana_runtime::{ + account_overrides::AccountOverrides, + accounts::TransactionLoadResult, + bank::{ + Bank, CommitTransactionCounts, LoadAndExecuteTransactionsOutput, TransactionBalances, + TransactionBalancesSet, TransactionExecutionResult, + }, + bank_utils, + block_cost_limits::MAX_BLOCK_UNITS, + cost_model::{CostModel, TransactionCost}, + transaction_batch::TransactionBatch, + vote_sender_types::ReplayVoteSender, + }, + solana_sdk::{ + bundle::{ + error::BundleExecutionError, sanitized::SanitizedBundle, + utils::check_bundle_lock_results, + }, + clock::{Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE}, + hash::Hash, + pubkey::Pubkey, + saturating_add_assign, + transaction::{self, SanitizedTransaction, TransactionError, VersionedTransaction}, + }, + solana_transaction_status::token_balances::{ + TransactionTokenBalances, TransactionTokenBalancesSet, + }, + std::{ + collections::{HashMap, HashSet, VecDeque}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, RwLock, + }, + thread::{self, Builder, JoinHandle}, + time::{Duration, Instant}, + }, +}; + +const MAX_BUNDLE_RETRY_DURATION: Duration = Duration::from_millis(10); +const SLOT_BOUNDARY_CHECK_PERIOD: Duration = Duration::from_millis(10); + +type BundleStageResult = Result; + +// Stats emitted periodically +struct BundleStageLoopStats { + last_report: Instant, + + num_bundles_received: u64, + num_bundles_dropped: u64, + receive_and_buffer_bundles_elapsed_us: u64, + process_buffered_bundles_elapsed_us: u64, +} + +impl Default for BundleStageLoopStats { + fn default() -> Self { + BundleStageLoopStats { + last_report: Instant::now(), + num_bundles_received: 0, + num_bundles_dropped: 0, + receive_and_buffer_bundles_elapsed_us: 0, + process_buffered_bundles_elapsed_us: 0, + } + } +} + +impl BundleStageLoopStats { + fn maybe_report(&mut self, id: u32, period: Duration) { + if self.last_report.elapsed() > period { + datapoint_info!( + "bundle_stage-loop_stats", + ("id", id, i64), + ("num_bundles_received", self.num_bundles_received, i64), + ("num_bundles_dropped", self.num_bundles_dropped, i64), + ( + "receive_and_buffer_bundles_elapsed_us", + self.receive_and_buffer_bundles_elapsed_us, + i64 + ), + ( + "process_buffered_bundles_elapsed_us", + self.process_buffered_bundles_elapsed_us, + i64 + ), + ); + *self = BundleStageLoopStats::default(); + } + } +} + +struct AllExecutionResults { + pub load_and_execute_tx_output: LoadAndExecuteTransactionsOutput, + pub sanitized_txs: Vec, + pub pre_balances: (TransactionBalances, TransactionTokenBalances), + pub post_balances: (TransactionBalances, TransactionTokenBalances), +} + +struct BundleReservedSpace { + current_tx_block_limit: u64, + current_bundle_block_limit: u64, + initial_allocated_cost: u64, + unreserved_ticks: u64, +} + +impl BundleReservedSpace { + fn reset_reserved_cost(&mut self, working_bank: &Arc) { + self.current_tx_block_limit = self + .current_bundle_block_limit + .saturating_sub(self.initial_allocated_cost); + + working_bank + .write_cost_tracker() + .unwrap() + .set_block_cost_limit(self.current_tx_block_limit); + + debug!( + "slot: {}. cost limits reset. bundle: {}, txn: {}", + working_bank.slot(), + self.current_bundle_block_limit, + self.current_tx_block_limit, + ); + } + + fn bundle_block_limit(&self) -> u64 { + self.current_bundle_block_limit + } + + fn tx_block_limit(&self) -> u64 { + self.current_tx_block_limit + } + + fn update_reserved_cost(&mut self, working_bank: &Arc) { + if self.current_tx_block_limit != self.current_bundle_block_limit + && working_bank + .max_tick_height() + .saturating_sub(working_bank.tick_height()) + < self.unreserved_ticks + { + self.current_tx_block_limit = self.current_bundle_block_limit; + working_bank + .write_cost_tracker() + .unwrap() + .set_block_cost_limit(self.current_tx_block_limit); + debug!( + "slot: {}. increased tx cost limit to {}", + working_bank.slot(), + self.current_tx_block_limit + ); + } + } +} + +pub struct BundleStage { + bundle_thread: JoinHandle<()>, +} + +impl BundleStage { + #[allow(clippy::new_ret_no_self)] + #[allow(clippy::too_many_arguments)] + pub fn new( + cluster_info: &Arc, + poh_recorder: &Arc>, + transaction_status_sender: Option, + gossip_vote_sender: ReplayVoteSender, + cost_model: Arc>, + bundle_receiver: Receiver>, + exit: Arc, + tip_manager: TipManager, + bundle_account_locker: BundleAccountLocker, + block_builder_fee_info: &Arc>, + preallocated_bundle_cost: u64, + ) -> Self { + Self::start_bundle_thread( + cluster_info, + poh_recorder, + transaction_status_sender, + gossip_vote_sender, + cost_model, + bundle_receiver, + exit, + tip_manager, + bundle_account_locker, + MAX_BUNDLE_RETRY_DURATION, + block_builder_fee_info, + preallocated_bundle_cost, + ) + } + + #[allow(clippy::too_many_arguments)] + fn start_bundle_thread( + cluster_info: &Arc, + poh_recorder: &Arc>, + transaction_status_sender: Option, + gossip_vote_sender: ReplayVoteSender, + cost_model: Arc>, + bundle_receiver: Receiver>, + exit: Arc, + tip_manager: TipManager, + bundle_account_locker: BundleAccountLocker, + max_bundle_retry_duration: Duration, + block_builder_fee_info: &Arc>, + preallocated_bundle_cost: u64, + ) -> Self { + const BUNDLE_STAGE_ID: u32 = 10_000; + let poh_recorder = poh_recorder.clone(); + let cluster_info = cluster_info.clone(); + let block_builder_fee_info = block_builder_fee_info.clone(); + + let bundle_thread = Builder::new() + .name("solana-bundle-stage".to_string()) + .spawn(move || { + Self::process_loop( + cluster_info, + &poh_recorder, + transaction_status_sender, + bundle_receiver, + gossip_vote_sender, + BUNDLE_STAGE_ID, + cost_model, + exit, + tip_manager, + bundle_account_locker, + max_bundle_retry_duration, + block_builder_fee_info, + preallocated_bundle_cost, + ); + }) + .unwrap(); + + Self { bundle_thread } + } + + // rollup transaction cost details, eg signature_cost, write_lock_cost, data_bytes_cost and + // execution_cost from the batch of transactions selected for block. + fn accumulate_batched_transaction_costs<'a>( + transactions_costs: impl Iterator, + transaction_results: impl Iterator>, + ) -> BatchedTransactionDetails { + let mut batched_transaction_details = BatchedTransactionDetails::default(); + transactions_costs + .zip(transaction_results) + .for_each(|(cost, result)| match result { + Ok(_) => { + saturating_add_assign!( + batched_transaction_details.costs.batched_signature_cost, + cost.signature_cost + ); + saturating_add_assign!( + batched_transaction_details.costs.batched_write_lock_cost, + cost.write_lock_cost + ); + saturating_add_assign!( + batched_transaction_details.costs.batched_data_bytes_cost, + cost.data_bytes_cost + ); + saturating_add_assign!( + batched_transaction_details + .costs + .batched_builtins_execute_cost, + cost.builtins_execution_cost + ); + saturating_add_assign!( + batched_transaction_details.costs.batched_bpf_execute_cost, + cost.bpf_execution_cost + ); + } + Err(transaction_error) => match transaction_error { + TransactionError::WouldExceedMaxBlockCostLimit => { + saturating_add_assign!( + batched_transaction_details + .errors + .batched_retried_txs_per_block_limit_count, + 1 + ); + } + TransactionError::WouldExceedMaxVoteCostLimit => { + saturating_add_assign!( + batched_transaction_details + .errors + .batched_retried_txs_per_vote_limit_count, + 1 + ); + } + TransactionError::WouldExceedMaxAccountCostLimit => { + saturating_add_assign!( + batched_transaction_details + .errors + .batched_retried_txs_per_account_limit_count, + 1 + ); + } + TransactionError::WouldExceedAccountDataBlockLimit => { + saturating_add_assign!( + batched_transaction_details + .errors + .batched_retried_txs_per_account_data_block_limit_count, + 1 + ); + } + TransactionError::WouldExceedAccountDataTotalLimit => { + saturating_add_assign!( + batched_transaction_details + .errors + .batched_dropped_txs_per_account_data_total_limit_count, + 1 + ); + } + _ => {} + }, + }); + batched_transaction_details + } + + /// Calculates QoS and reserves compute space for the bundle. If the bundle succeeds, commits + /// the results to the cost tracker. If the bundle fails, rolls back any QoS changes made. + /// Ensure that SanitizedBundle was returned by BundleAccountLocker to avoid parallelism issues + /// with banking stage + fn update_qos_and_execute_record_commit_bundle( + sanitized_bundle: &SanitizedBundle, + recorder: &TransactionRecorder, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + qos_service: &QosService, + bank_start: &BankStart, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + max_bundle_retry_duration: &Duration, + reserved_space: &mut BundleReservedSpace, + ) -> BundleStageResult<()> { + if sanitized_bundle.transactions.is_empty() { + return Ok(()); + } + + // Try to fit bundle into block using modified limits (scoped to guarantee cost_tracker is dropped) + let (tx_costs, transactions_qos_results, num_included) = { + let tx_costs = + qos_service.compute_transaction_costs(sanitized_bundle.transactions.iter()); + + let mut cost_tracker = bank_start.working_bank.write_cost_tracker().unwrap(); + + // Increase block cost limit for bundles + debug!( + "increasing cost limit for bundles: {}", + reserved_space.bundle_block_limit() + ); + cost_tracker.set_block_cost_limit(reserved_space.bundle_block_limit()); + let (transactions_qos_results, num_included) = qos_service + .select_transactions_per_cost( + sanitized_bundle.transactions.iter(), + tx_costs.iter(), + bank_start.working_bank.slot(), + &mut cost_tracker, + ); + debug!( + "resetting cost limit for normal transactions: {}", + reserved_space.tx_block_limit() + ); + + // Reset block cost limit for normal txs + cost_tracker.set_block_cost_limit(reserved_space.tx_block_limit()); + + (tx_costs, transactions_qos_results, num_included) + }; + + // accumulates QoS to metrics + qos_service.accumulate_estimated_transaction_costs( + &Self::accumulate_batched_transaction_costs( + tx_costs.iter(), + transactions_qos_results.iter(), + ), + ); + + // qos rate-limited a tx in here, drop the bundle + if sanitized_bundle.transactions.len() != num_included { + QosService::remove_transaction_costs( + tx_costs.iter(), + transactions_qos_results.iter(), + &bank_start.working_bank, + ); + warn!( + "bundle dropped, qos rate limit. bundle_id: {} bundle_cost: {}, block_cost: {}", + sanitized_bundle.bundle_id, + tx_costs.iter().map(|c| c.sum()).sum::(), + &bank_start + .working_bank + .read_cost_tracker() + .unwrap() + .block_cost() + ); + return Err(BundleExecutionError::ExceedsCostModel); + } + + match Self::execute_record_commit_bundle( + sanitized_bundle, + recorder, + transaction_status_sender, + gossip_vote_sender, + bank_start, + bundle_stage_leader_stats, + max_bundle_retry_duration, + ) { + Ok(commit_transaction_details) => { + // NOTE: Assumptions made on the QoS transaction costs: + // - commit_transaction_details are returned in the same ordering as the transactions + // in the sanitized_bundle, which is the same ordering as tx_costs. + // - all contents in the bundle are committed (it's executed all or nothing). + // When fancier execution algorithms are made that may execute transactions out of + // order (but resulting in same result as if they were executed sequentially), or + // allow failures in bundles, one should revisit this and the code that returns + // commit_transaction_details. + QosService::update_or_remove_transaction_costs( + tx_costs.iter(), + transactions_qos_results.iter(), + Some(&commit_transaction_details), + &bank_start.working_bank, + ); + let (cu, us) = Self::accumulate_execute_units_and_time( + &bundle_stage_leader_stats + .execute_and_commit_timings() + .execute_timings, + ); + qos_service.accumulate_actual_execute_cu(cu); + qos_service.accumulate_actual_execute_time(us); + qos_service.report_metrics(bank_start.working_bank.clone()); + Ok(()) + } + Err(e) => { + QosService::remove_transaction_costs( + tx_costs.iter(), + transactions_qos_results.iter(), + &bank_start.working_bank, + ); + qos_service.report_metrics(bank_start.working_bank.clone()); + Err(e) + } + } + } + + fn execute_bundle( + sanitized_bundle: &SanitizedBundle, + transaction_status_sender: &Option, + bank_start: &BankStart, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + max_bundle_retry_duration: &Duration, + ) -> BundleStageResult> { + let mut account_overrides = AccountOverrides::default(); + + let mut execution_results = Vec::new(); + let mut mint_decimals: HashMap = HashMap::new(); + + let BankStart { + working_bank: bank, + bank_creation_time, + } = bank_start; + + let mut chunk_start = 0; + let start_time = Instant::now(); + while chunk_start != sanitized_bundle.transactions.len() { + if !Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot) { + return Err(BundleExecutionError::PohMaxHeightError); + } + + // ************************************************************************ + // Build a TransactionBatch that ensures transactions in the bundle + // are executed sequentially. + // NOTE: The TransactionBatch is dropped before the results are committed, which + // would normally open up race conditions between this stage and BankingStage where + // a transaction here could read and execute state on a transaction and BankingStage + // could read-execute-store, invaliding the state produced by the bundle. + // Assuming the SanitizedBundle was locked with the BundleAccountLocker, that race + // condition shall be prevented as it holds an extra set of locks until the entire + // bundle is processed. + // ************************************************************************ + let chunk_end = std::cmp::min(sanitized_bundle.transactions.len(), chunk_start + 128); + let chunk = &sanitized_bundle.transactions[chunk_start..chunk_end]; + let batch = bank.prepare_sequential_sanitized_batch_with_results(chunk, None); + + // Ensures that bundle lock results only return either: + // Ok(()) + // Err(TransactionError::AccountInUse) + // Err(TransactionError::BundleNotContinuous) + // if unexpected failure case, the bundle can't be executed + // NOTE: previous logging around batch here caused issues with + // unit tests failing due to PoH hitting max height. Unknown why. Be advised. + if let Some((e, _)) = check_bundle_lock_results(batch.lock_results()) { + return Err(e.into()); + } + + let ((pre_balances, pre_token_balances), collect_balances_elapsed) = measure!( + Self::collect_balances( + bank, + &batch, + &account_overrides, + transaction_status_sender, + &mut mint_decimals, + ), + "collect_balances", + ); + saturating_add_assign!( + bundle_stage_leader_stats + .execute_and_commit_timings() + .collect_balances_us, + collect_balances_elapsed.as_us() + ); + + let (mut load_and_execute_transactions_output, load_execute_time) = measure!( + bank.load_and_execute_transactions( + &batch, + MAX_PROCESSING_AGE, + transaction_status_sender.is_some(), + transaction_status_sender.is_some(), + transaction_status_sender.is_some(), + &mut bundle_stage_leader_stats + .execute_and_commit_timings() + .execute_timings, + Some(&account_overrides), + None, + ), + "load_execute", + ); + + saturating_add_assign!( + bundle_stage_leader_stats + .execute_and_commit_timings() + .load_execute_us, + load_execute_time.as_us() + ); + bundle_stage_leader_stats + .transaction_errors() + .accumulate(&load_and_execute_transactions_output.error_counters); + + debug!( + "execution results: {:?}", + load_and_execute_transactions_output.execution_results + ); + // Return error if executed and failed or didn't execute because of an unexpected reason. + // The only acceptable reasons for not executing would be failure to lock errors from: + // Ok(()) + // Err(TransactionError::AccountInUse) + // Err(TransactionError::BundleNotContinuous) + // If there's another error (AlreadyProcessed, InsufficientFundsForFee, etc.), bail out + if let Err((e, _)) = TransactionExecutionResult::check_bundle_execution_results( + load_and_execute_transactions_output + .execution_results + .as_slice(), + batch.sanitized_transactions(), + ) { + debug!("execution error"); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_execution_failures(1); + + return Err(e); + } + + // The errors have been checked above, now check to see if any were executed at all + // If none were executed, check to see if the bundle timed out and if so, return timeout + // error + if !load_and_execute_transactions_output + .execution_results + .iter() + .any(|r| r.was_executed()) + { + debug!("retrying bundle"); + + let bundle_execution_elapsed = start_time.elapsed(); + if bundle_execution_elapsed >= *max_bundle_retry_duration { + warn!("bundle timed out: {:?}", sanitized_bundle); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_execution_timeouts(1); + return Err(BundleExecutionError::MaxRetriesExceeded( + bundle_execution_elapsed, + )); + } + + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_execution_retries(1); + continue; + } + + // ********************************************************************************* + // Cache results so next iterations of bundle execution can load cached state + // instead of using AccountsDB which contains stale execution data. + // ********************************************************************************* + Self::cache_accounts( + bank, + batch.sanitized_transactions(), + &load_and_execute_transactions_output.execution_results, + &mut load_and_execute_transactions_output.loaded_transactions, + &mut account_overrides, + ); + + let ((post_balances, post_token_balances), collect_balances_elapsed) = measure!( + Self::collect_balances( + bank, + &batch, + &account_overrides, + transaction_status_sender, + &mut mint_decimals, + ), + "collect_balances", + ); + + saturating_add_assign!( + bundle_stage_leader_stats + .execute_and_commit_timings() + .collect_balances_us, + collect_balances_elapsed.as_us() + ); + + execution_results.push(AllExecutionResults { + load_and_execute_tx_output: load_and_execute_transactions_output, + sanitized_txs: batch.sanitized_transactions().to_vec(), + pre_balances: (pre_balances, pre_token_balances), + post_balances: (post_balances, post_token_balances), + }); + + // start at the next available transaction in the batch that threw an error + let processing_end = batch.lock_results().iter().position(|lr| lr.is_err()); + if let Some(end) = processing_end { + chunk_start += end; + } else { + chunk_start = chunk_end; + } + + drop(batch); + } + Ok(execution_results) + } + + /// Executes a bundle, where all transactions in the bundle are executed all-or-nothing. + /// Executes all transactions until the end or the first failure. The account state between + /// iterations is cached to a temporary HashMap to be used on successive runs + #[allow(clippy::too_many_arguments)] + fn execute_record_commit_bundle( + sanitized_bundle: &SanitizedBundle, + recorder: &TransactionRecorder, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + bank_start: &BankStart, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + max_bundle_retry_duration: &Duration, + ) -> BundleStageResult> { + let execution_results = Self::execute_bundle( + sanitized_bundle, + transaction_status_sender, + bank_start, + bundle_stage_leader_stats, + max_bundle_retry_duration, + )?; + // in order for bundle to succeed, it most have something to record + commit + assert!(!execution_results.is_empty()); + + Self::record_commit_bundle( + execution_results, + &bank_start.working_bank, + recorder, + bundle_stage_leader_stats, + transaction_status_sender, + gossip_vote_sender, + ) + } + + /// Records the entire bundle to PoH and if successful, commits all transactions to the Bank + /// Note that the BundleAccountLocker still has a lock on these accounts in the bank + fn record_commit_bundle( + execution_results: Vec, + bank: &Arc, + recorder: &TransactionRecorder, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + ) -> BundleStageResult> { + // ********************************************************************************* + // All transactions are executed in the bundle. + // Record to PoH and send the saved execution results to the Bank. + // Note: Ensure that bank.commit_transactions is called on a per-batch basis and + // not all together + // ********************************************************************************* + debug!("grabbing freeze lock"); + let (_freeze_lock, freeze_lock_time) = measure!(bank.freeze_lock(), "freeze_lock"); + saturating_add_assign!( + bundle_stage_leader_stats + .execute_and_commit_timings() + .freeze_lock_us, + freeze_lock_time.as_us() + ); + + let (slot, mixins) = Self::prepare_poh_record_bundle( + &bank.slot(), + &execution_results, + &mut bundle_stage_leader_stats + .execute_and_commit_timings() + .record_transactions_timings, + ); + + debug!("recording bundle"); + let (mut transaction_index, record_elapsed) = measure!( + Self::try_record(recorder, slot, mixins) + .map_err(|e| { + error!("error recording bundle: {:?}", e); + e + })? + .unwrap_or_default(), + "record_elapsed" + ); + debug!("bundle recorded"); + + saturating_add_assign!( + bundle_stage_leader_stats + .execute_and_commit_timings() + .record_us, + record_elapsed.as_us() + ); + bundle_stage_leader_stats + .execute_and_commit_timings() + .record_transactions_timings + .accumulate(&RecordTransactionsTimings { + execution_results_to_transactions_us: 0, + hash_us: 0, + poh_record_us: record_elapsed.as_us(), + }); + + let mut commit_transaction_details = Vec::new(); + for r in execution_results { + let mut output = r.load_and_execute_tx_output; + let sanitized_txs = r.sanitized_txs; + + let (last_blockhash, lamports_per_signature) = + bank.last_blockhash_and_lamports_per_signature(); + + let (transaction_results, commit_elapsed) = measure!( + bank.commit_transactions( + &sanitized_txs, + &mut output.loaded_transactions, + output.execution_results.clone(), + last_blockhash, + lamports_per_signature, + CommitTransactionCounts { + committed_transactions_count: output.executed_transactions_count as u64, + committed_with_failure_result_count: output + .executed_transactions_count + .saturating_sub(output.executed_with_successful_result_count) + as u64, + signature_count: output.signature_count, + }, + &mut bundle_stage_leader_stats + .execute_and_commit_timings() + .execute_timings, + ), + "commit_elapsed" + ); + saturating_add_assign!( + bundle_stage_leader_stats + .execute_and_commit_timings() + .commit_us, + commit_elapsed.as_us() + ); + + let (_, find_and_send_votes_elapsed) = measure!( + { + bank_utils::find_and_send_votes( + &sanitized_txs, + &transaction_results, + Some(gossip_vote_sender), + ); + if let Some(transaction_status_sender) = transaction_status_sender { + let batch_transaction_indexes: Vec<_> = transaction_results + .execution_results + .iter() + .map(|result| { + if result.was_executed() { + let this_transaction_index = transaction_index; + saturating_add_assign!(transaction_index, 1); + this_transaction_index + } else { + 0 + } + }) + .collect(); + transaction_status_sender.send_transaction_status_batch( + bank.clone(), + sanitized_txs, + output.execution_results, + TransactionBalancesSet::new(r.pre_balances.0, r.post_balances.0), + TransactionTokenBalancesSet::new(r.pre_balances.1, r.post_balances.1), + transaction_results.rent_debits.clone(), + batch_transaction_indexes, + ); + } + }, + "find_and_send_votes", + ); + + saturating_add_assign!( + bundle_stage_leader_stats + .execute_and_commit_timings() + .find_and_send_votes_us, + find_and_send_votes_elapsed.as_us() + ); + + for tx_results in transaction_results.execution_results { + if let Some(details) = tx_results.details() { + commit_transaction_details.push(CommitTransactionDetails::Committed { + compute_units: details.executed_units, + }); + } + } + } + + Ok(commit_transaction_details) + } + + /// Returns true if any of the transactions in a bundle mention one of the tip PDAs + fn bundle_touches_tip_pdas( + transactions: &[SanitizedTransaction], + tip_pdas: &HashSet, + ) -> bool { + let mut bundle_touches_tip_pdas = false; + for tx in transactions { + if tx + .message() + .account_keys() + .iter() + .any(|a| tip_pdas.contains(a)) + { + bundle_touches_tip_pdas = true; + break; + } + } + bundle_touches_tip_pdas + } + + fn accumulate_execute_units_and_time(execute_timings: &ExecuteTimings) -> (u64, u64) { + let (units, times): (Vec<_>, Vec<_>) = execute_timings + .details + .per_program_timings + .iter() + .map(|(_program_id, program_timings)| { + ( + program_timings.accumulated_units, + program_timings.accumulated_us, + ) + }) + .unzip(); + (units.iter().sum(), times.iter().sum()) + } + + fn cache_accounts( + bank: &Arc, + txs: &[SanitizedTransaction], + res: &[TransactionExecutionResult], + loaded: &mut [TransactionLoadResult], + cached_accounts: &mut AccountOverrides, + ) { + let accounts = bank.collect_accounts_to_store(txs, res, loaded); + for (pubkey, data) in accounts { + cached_accounts.set_account(pubkey, Some(data.clone())); + } + } + + fn collect_balances( + bank: &Arc, + batch: &TransactionBatch, + cached_accounts: &AccountOverrides, + transaction_status_sender: &Option, + mint_decimals: &mut HashMap, + ) -> (TransactionBalances, TransactionTokenBalances) { + if transaction_status_sender.is_some() { + let balances = bank.collect_balances_with_cache(batch, Some(cached_accounts)); + let token_balances = + collect_token_balances(bank, batch, mint_decimals, Some(cached_accounts)); + (balances, token_balances) + } else { + (vec![], vec![]) + } + } + + /// When executed the first time, there's some accounts that need to be initialized. + /// This is only helpful for local testing, on testnet and mainnet these will never be executed. + /// TODO (LB): consider removing this for mainnet/testnet and move to program deployment? + fn get_initialize_tip_accounts_transactions( + bank: &Bank, + tip_manager: &TipManager, + cluster_info: &Arc, + ) -> BundleStageResult> { + let maybe_init_tip_payment_config_tx = + if tip_manager.should_initialize_tip_payment_program(bank) { + info!("building initialize_tip_payment_program_tx"); + Some(tip_manager.initialize_tip_payment_program_tx( + bank.last_blockhash(), + &cluster_info.keypair(), + )) + } else { + None + }; + + let maybe_init_tip_distro_config_tx = + if tip_manager.should_initialize_tip_distribution_config(bank) { + info!("building initialize_tip_distribution_config_tx"); + Some( + tip_manager + .initialize_tip_distribution_config_tx(bank.last_blockhash(), cluster_info), + ) + } else { + None + }; + + let maybe_init_tip_distro_account_tx = + if tip_manager.should_init_tip_distribution_account(bank) { + info!("building initialize_tip_distribution_account tx"); + Some(tip_manager.initialize_tip_distribution_account_tx( + bank.last_blockhash(), + bank.epoch(), + cluster_info, + )) + } else { + None + }; + + let transactions = [ + maybe_init_tip_payment_config_tx, + maybe_init_tip_distro_config_tx, + maybe_init_tip_distro_account_tx, + ] + .into_iter() + .flatten() + .collect::>(); + + Ok(transactions) + } + + /// Execute all unprocessed bundles until no more left or POH max tick height is reached. + /// For any bundles that didn't execute due to POH max tick height reached, add them + /// back onto the front of unprocessed_bundles in reverse order to preserve original ordering + #[allow(clippy::too_many_arguments)] + fn execute_bundles_until_empty_or_end_of_slot( + bundle_account_locker: &BundleAccountLocker, + unprocessed_bundles: &mut VecDeque, + cost_model_failed_bundles: &mut VecDeque, + blacklisted_accounts: &HashSet, + bank_start: &BankStart, + consensus_accounts_cache: &HashSet, + cluster_info: &Arc, + recorder: &TransactionRecorder, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + qos_service: &QosService, + tip_manager: &TipManager, + max_bundle_retry_duration: &Duration, + last_tip_update_slot: &mut Slot, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + block_builder_fee_info: &Arc>, + reserved_space: &mut BundleReservedSpace, + ) { + let (sanitized_bundles, sanitized_bundle_elapsed) = measure!( + unprocessed_bundles + .drain(..) + .into_iter() + .filter_map(|packet_bundle| { + match get_sanitized_bundle( + &packet_bundle, + &bank_start.working_bank, + consensus_accounts_cache, + blacklisted_accounts, + bundle_stage_leader_stats.transaction_errors(), + ) { + Ok(sanitized_bundle) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_transaction_ok(1); + Some((packet_bundle, sanitized_bundle)) + } + Err(BundleSanitizerError::VoteOnlyMode) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_transaction_vote_only_mode(1); + None + } + Err(BundleSanitizerError::FailedPacketBatchPreCheck) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_transaction_failed_precheck(1); + None + } + Err(BundleSanitizerError::BlacklistedAccount) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_transaction_blacklisted_account(1); + None + } + Err(BundleSanitizerError::FailedToSerializeTransaction) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_transaction_failed_to_serialize(1); + None + } + Err(BundleSanitizerError::DuplicateTransaction) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_transaction_duplicate_transaction(1); + None + } + Err(BundleSanitizerError::FailedCheckTransactions) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_transaction_failed_check(1); + None + } + } + }) + .collect::>(), + "sanitized_bundle_elapsed" + ); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_sanitize_bundle_elapsed_us(sanitized_bundle_elapsed.as_us()); + + // Prepare locked bundles, which will RW lock accounts in sanitized_bundles so + // BankingStage can't lock them. This adds a layer of protection since a transaction in a bundle + // will not hold the AccountLocks through TransactionBatch across load-execute-commit cycle. + // We collect here to ensure that all of the bundles are locked ahead of time for priority over + // BankingStage + #[allow(clippy::needless_collect)] + let (locked_bundles, locked_bundles_elapsed) = measure!( + sanitized_bundles + .iter() + .map(|(_, sanitized_bundle)| { + bundle_account_locker + .prepare_locked_bundle(sanitized_bundle, &bank_start.working_bank) + }) + .collect::>>(), + "locked_bundles_elapsed" + ); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_locked_bundle_elapsed_us(locked_bundles_elapsed.as_us()); + + let (execution_results, execute_locked_bundles_elapsed) = measure!( + Self::execute_locked_bundles( + bundle_account_locker, + locked_bundles, + bank_start, + cluster_info, + recorder, + transaction_status_sender, + gossip_vote_sender, + qos_service, + tip_manager, + max_bundle_retry_duration, + last_tip_update_slot, + bundle_stage_leader_stats, + block_builder_fee_info, + reserved_space, + ), + "execute_locked_bundles_elapsed" + ); + + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execute_locked_bundles_elapsed_us(execute_locked_bundles_elapsed.as_us()); + + execution_results + .into_iter() + .zip(sanitized_bundles.into_iter()) + .for_each( + |(bundle_execution_result, (packet_bundle, _))| match bundle_execution_result { + Ok(_) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execution_results_ok(1); + } + Err(BundleExecutionError::PohMaxHeightError) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execution_results_poh_max_height(1); + // retry the bundle + unprocessed_bundles.push_back(packet_bundle); + } + Err(BundleExecutionError::TransactionFailure(_)) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execution_results_transaction_failures(1); + } + Err(BundleExecutionError::ExceedsCostModel) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execution_results_exceeds_cost_model(1); + // retry the bundle + cost_model_failed_bundles.push_back(packet_bundle); + } + Err(BundleExecutionError::TipError(_)) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execution_results_tip_errors(1); + } + Err(BundleExecutionError::Shutdown) => {} + Err(BundleExecutionError::MaxRetriesExceeded(_)) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execution_results_max_retries(1); + } + Err(BundleExecutionError::LockError) => { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_execution_results_lock_errors(1); + } + }, + ); + } + + /// This only needs to be done once on program initialization + /// TODO (LB): may make sense to remove this and move to program deployment instead, but helpful + /// during development + #[allow(clippy::too_many_arguments)] + fn maybe_initialize_tip_accounts( + bundle_account_locker: &BundleAccountLocker, + bank_start: &BankStart, + cluster_info: &Arc, + recorder: &TransactionRecorder, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + qos_service: &QosService, + tip_manager: &TipManager, + max_bundle_retry_duration: &Duration, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + reserved_space: &mut BundleReservedSpace, + ) -> BundleStageResult<()> { + let initialize_tip_accounts_bundle = SanitizedBundle { + transactions: Self::get_initialize_tip_accounts_transactions( + &bank_start.working_bank, + tip_manager, + cluster_info, + )?, + bundle_id: String::default(), + }; + if !initialize_tip_accounts_bundle.transactions.is_empty() { + debug!("initialize tip account"); + + let locked_init_tip_bundle = bundle_account_locker + .prepare_locked_bundle(&initialize_tip_accounts_bundle, &bank_start.working_bank) + .map_err(|_| BundleExecutionError::LockError)?; + let result = Self::update_qos_and_execute_record_commit_bundle( + locked_init_tip_bundle.sanitized_bundle(), + recorder, + transaction_status_sender, + gossip_vote_sender, + qos_service, + bank_start, + bundle_stage_leader_stats, + max_bundle_retry_duration, + reserved_space, + ); + + match &result { + Ok(_) => { + debug!("initialize tip account: success"); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_init_tip_account_ok(1); + } + Err(e) => { + error!("initialize tip account error: {:?}", e); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_init_tip_account_errors(1); + } + } + result + } else { + Ok(()) + } + } + + /// change tip receiver, draining tips to the previous tip_receiver in the process + /// note that this needs to happen after the above tip-related bundle initializes + /// config accounts because get_configured_tip_receiver relies on an account + /// existing in the bank + #[allow(clippy::too_many_arguments)] + fn maybe_change_tip_receiver( + bundle_account_locker: &BundleAccountLocker, + bank_start: &BankStart, + cluster_info: &Arc, + recorder: &TransactionRecorder, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + qos_service: &QosService, + tip_manager: &TipManager, + max_bundle_retry_duration: &Duration, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + block_builder_fee_info: &Arc>, + reserved_space: &mut BundleReservedSpace, + ) -> BundleStageResult<()> { + let start_handle_tips = Instant::now(); + + let configured_tip_receiver = + tip_manager.get_configured_tip_receiver(&bank_start.working_bank)?; + let my_tip_distribution_pda = + tip_manager.get_my_tip_distribution_pda(bank_start.working_bank.epoch()); + if configured_tip_receiver != my_tip_distribution_pda { + info!( + "changing tip receiver from {} to {}", + configured_tip_receiver, my_tip_distribution_pda + ); + + let bb_info = block_builder_fee_info.lock().unwrap(); + let change_tip_receiver_tx = tip_manager.change_tip_receiver_and_block_builder_tx( + &my_tip_distribution_pda, + &bank_start.working_bank, + &cluster_info.keypair(), + &bb_info.block_builder, + bb_info.block_builder_commission, + )?; + + let change_tip_receiver_bundle = SanitizedBundle { + transactions: vec![change_tip_receiver_tx], + bundle_id: String::default(), + }; + let locked_change_tip_receiver_bundle = bundle_account_locker + .prepare_locked_bundle(&change_tip_receiver_bundle, &bank_start.working_bank) + .map_err(|_| BundleExecutionError::LockError)?; + let result = Self::update_qos_and_execute_record_commit_bundle( + locked_change_tip_receiver_bundle.sanitized_bundle(), + recorder, + transaction_status_sender, + gossip_vote_sender, + qos_service, + bank_start, + bundle_stage_leader_stats, + max_bundle_retry_duration, + reserved_space, + ); + + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_change_tip_receiver_elapsed_us( + start_handle_tips.elapsed().as_micros() as u64 + ); + + match &result { + Ok(_) => { + debug!("change tip receiver: success"); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_change_tip_receiver_ok(1); + } + Err(e) => { + error!("change tip receiver: error {:?}", e); + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_change_tip_receiver_errors(1); + } + } + result + } else { + Ok(()) + } + } + + #[allow(clippy::too_many_arguments)] + fn execute_locked_bundles( + bundle_account_locker: &BundleAccountLocker, + locked_bundles: Vec>, + bank_start: &BankStart, + cluster_info: &Arc, + recorder: &TransactionRecorder, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + qos_service: &QosService, + tip_manager: &TipManager, + max_bundle_retry_duration: &Duration, + last_tip_update_slot: &mut Slot, + bundle_stage_leader_stats: &mut BundleStageLeaderStats, + block_builder_fee_info: &Arc>, + reserved_space: &mut BundleReservedSpace, + ) -> Vec> { + let tip_pdas = tip_manager.get_tip_accounts(); + + // make sure each locked_bundle is dropped after processing to unlock BankingStage + locked_bundles + .into_iter() + .map(|maybe_locked_bundle| { + let locked_bundle = maybe_locked_bundle.as_ref().map_err(|_| { + bundle_stage_leader_stats + .bundle_stage_stats() + .increment_num_lock_errors(1); + + BundleExecutionError::LockError + })?; + + if !Bank::should_bank_still_be_processing_txs( + &bank_start.bank_creation_time, + bank_start.working_bank.ns_per_slot, + ) { + Err(BundleExecutionError::PohMaxHeightError) + } else { + let sanitized_bundle = locked_bundle.sanitized_bundle(); + + if Self::bundle_touches_tip_pdas(&sanitized_bundle.transactions, &tip_pdas) + && bank_start.working_bank.slot() != *last_tip_update_slot + { + Self::maybe_initialize_tip_accounts( + bundle_account_locker, + bank_start, + cluster_info, + recorder, + transaction_status_sender, + gossip_vote_sender, + qos_service, + tip_manager, + max_bundle_retry_duration, + bundle_stage_leader_stats, + reserved_space, + )?; + + Self::maybe_change_tip_receiver( + bundle_account_locker, + bank_start, + cluster_info, + recorder, + transaction_status_sender, + gossip_vote_sender, + qos_service, + tip_manager, + max_bundle_retry_duration, + bundle_stage_leader_stats, + block_builder_fee_info, + reserved_space, + )?; + + *last_tip_update_slot = bank_start.working_bank.slot(); + } + + Self::update_qos_and_execute_record_commit_bundle( + sanitized_bundle, + recorder, + transaction_status_sender, + gossip_vote_sender, + qos_service, + bank_start, + bundle_stage_leader_stats, + max_bundle_retry_duration, + reserved_space, + ) + } + }) + .collect() + } + + fn receive_and_buffer_bundles( + bundle_receiver: &Receiver>, + unprocessed_bundles: &mut VecDeque, + timeout: Duration, + ) -> Result { + let bundles = bundle_receiver.recv_timeout(timeout)?; + let num_bundles_before = unprocessed_bundles.len(); + unprocessed_bundles.extend(bundles); + unprocessed_bundles.extend(bundle_receiver.try_iter().flatten()); + let num_bundles_after = unprocessed_bundles.len(); + Ok(num_bundles_after - num_bundles_before) + } + + #[allow(clippy::too_many_arguments)] + fn process_buffered_bundles( + bundle_account_locker: &BundleAccountLocker, + unprocessed_bundles: &mut VecDeque, + cost_model_failed_bundles: &mut VecDeque, + blacklisted_accounts: &HashSet, + consensus_cache_updater: &mut ConsensusCacheUpdater, + cluster_info: &Arc, + recorder: &TransactionRecorder, + poh_recorder: &Arc>, + transaction_status_sender: &Option, + gossip_vote_sender: &ReplayVoteSender, + qos_service: &QosService, + tip_manager: &TipManager, + max_bundle_retry_duration: &Duration, + last_tip_update_slot: &mut u64, + bundle_stage_leader_stats: &mut BundleStageLeaderSlotTrackingMetrics, + bundle_stage_stats: &mut BundleStageLoopStats, + id: u32, + block_builder_fee_info: &Arc>, + reserved_space: &mut BundleReservedSpace, + ) { + const DROP_BUNDLE_SLOT_OFFSET: u64 = 4; + + let r_poh_recorder = poh_recorder.read().unwrap(); + let poh_recorder_bank = r_poh_recorder.get_poh_recorder_bank(); + let working_bank_start = poh_recorder_bank.working_bank_start(); + let would_be_leader_soon = + r_poh_recorder.would_be_leader(DROP_BUNDLE_SLOT_OFFSET * DEFAULT_TICKS_PER_SLOT); + drop(r_poh_recorder); + + let last_slot = bundle_stage_leader_stats.current_slot; + bundle_stage_leader_stats.maybe_report(id, &working_bank_start); + + if !would_be_leader_soon { + saturating_add_assign!( + bundle_stage_stats.num_bundles_dropped, + unprocessed_bundles.len() as u64 + cost_model_failed_bundles.len() as u64 + ); + + unprocessed_bundles.clear(); + cost_model_failed_bundles.clear(); + return; + } + + // leader now, insert new read bundles + as many as can read then return bank + if let Some(bank_start) = working_bank_start { + consensus_cache_updater.maybe_update(&bank_start.working_bank); + + let is_new_slot = match (last_slot, bundle_stage_leader_stats.current_slot) { + (Some(last_slot), Some(current_slot)) => last_slot != current_slot, + (None, Some(_)) => true, + (_, _) => false, + }; + + if is_new_slot { + reserved_space.reset_reserved_cost(&bank_start.working_bank); + // Re-Buffer any bundles that didn't fit into last block + if !cost_model_failed_bundles.is_empty() { + info!( + "slot {}: re-buffering {} bundles that failed cost model.", + &bank_start.working_bank.slot(), + cost_model_failed_bundles.len() + ); + unprocessed_bundles.extend(cost_model_failed_bundles.drain(..)); + } + } else { + reserved_space.update_reserved_cost(&bank_start.working_bank); + } + + Self::execute_bundles_until_empty_or_end_of_slot( + bundle_account_locker, + unprocessed_bundles, + cost_model_failed_bundles, + blacklisted_accounts, + bank_start, + consensus_cache_updater.consensus_accounts_cache(), + cluster_info, + recorder, + transaction_status_sender, + gossip_vote_sender, + qos_service, + tip_manager, + max_bundle_retry_duration, + last_tip_update_slot, + bundle_stage_leader_stats.bundle_stage_leader_stats(), + block_builder_fee_info, + reserved_space, + ); + } + } + + #[allow(clippy::too_many_arguments)] + fn process_loop( + cluster_info: Arc, + poh_recorder: &Arc>, + transaction_status_sender: Option, + bundle_receiver: Receiver>, + gossip_vote_sender: ReplayVoteSender, + id: u32, + cost_model: Arc>, + exit: Arc, + tip_manager: TipManager, + bundle_account_locker: BundleAccountLocker, + max_bundle_retry_duration: Duration, + block_builder_fee_info: Arc>, + preallocated_bundle_cost: u64, + ) { + const LOOP_STATS_METRICS_PERIOD: Duration = Duration::from_secs(1); + + let ticks_per_slot = poh_recorder.read().unwrap().ticks_per_slot(); + let recorder = poh_recorder.read().unwrap().recorder(); + let qos_service = QosService::new(cost_model, id); + + // Bundles can't mention any accounts related to consensus + let mut consensus_cache_updater = ConsensusCacheUpdater::default(); + let mut last_tip_update_slot = Slot::default(); + + let mut last_leader_slots_update_time = Instant::now(); + let mut bundle_stage_leader_stats = BundleStageLeaderSlotTrackingMetrics::default(); + let mut bundle_stage_stats = BundleStageLoopStats::default(); + + // Bundles can't mention the tip payment program to ensure that a malicious entity doesn't + // steal tips mid-slot + let blacklisted_accounts = HashSet::from_iter([tip_manager.tip_payment_program_id()]); + + let mut unprocessed_bundles: VecDeque = VecDeque::with_capacity(1000); + let mut cost_model_failed_bundles: VecDeque = VecDeque::with_capacity(1000); + // Initialize block limits and open up last 20% of ticks to non-bundle transactions + let mut reserved_space = BundleReservedSpace { + current_bundle_block_limit: MAX_BLOCK_UNITS, + current_tx_block_limit: MAX_BLOCK_UNITS.saturating_sub(preallocated_bundle_cost), + initial_allocated_cost: preallocated_bundle_cost, + unreserved_ticks: ticks_per_slot.saturating_div(5), // 20% for non-bundles + }; + debug!( + "initialize bundled reserved space: {preallocated_bundle_cost} cu for {} ticks", + ticks_per_slot.saturating_sub(reserved_space.unreserved_ticks) + ); + + while !exit.load(Ordering::Relaxed) { + if !unprocessed_bundles.is_empty() + || last_leader_slots_update_time.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD + { + let (_, process_buffered_bundles_elapsed) = measure!( + Self::process_buffered_bundles( + &bundle_account_locker, + &mut unprocessed_bundles, + &mut cost_model_failed_bundles, + &blacklisted_accounts, + &mut consensus_cache_updater, + &cluster_info, + &recorder, + poh_recorder, + &transaction_status_sender, + &gossip_vote_sender, + &qos_service, + &tip_manager, + &max_bundle_retry_duration, + &mut last_tip_update_slot, + &mut bundle_stage_leader_stats, + &mut bundle_stage_stats, + id, + &block_builder_fee_info, + &mut reserved_space, + ), + "process_buffered_bundles_elapsed" + ); + + saturating_add_assign!( + bundle_stage_stats.process_buffered_bundles_elapsed_us, + process_buffered_bundles_elapsed.as_us() + ); + last_leader_slots_update_time = Instant::now(); + } + + bundle_stage_stats.maybe_report(id, LOOP_STATS_METRICS_PERIOD); + + // ensure bundle stage can run immediately if bundles to process, otherwise okay + // chilling for a few + let sleep_time = if !unprocessed_bundles.is_empty() { + Duration::from_millis(0) + } else { + Duration::from_millis(10) + }; + + let (res, receive_and_buffer_elapsed) = measure!( + Self::receive_and_buffer_bundles( + &bundle_receiver, + &mut unprocessed_bundles, + sleep_time, + ), + "receive_and_buffer_elapsed" + ); + saturating_add_assign!( + bundle_stage_stats.receive_and_buffer_bundles_elapsed_us, + receive_and_buffer_elapsed.as_us() + ); + + match res { + Ok(num_bundles_received) => { + saturating_add_assign!( + bundle_stage_stats.num_bundles_received, + num_bundles_received as u64 + ); + } + Err(RecvTimeoutError::Timeout) => {} + Err(RecvTimeoutError::Disconnected) => { + break; + } + } + } + } + + fn prepare_poh_record_bundle( + bank_slot: &Slot, + execution_results_txs: &[AllExecutionResults], + record_transactions_timings: &mut RecordTransactionsTimings, + ) -> (Slot, Vec<(Hash, Vec)>) { + let mut new_record_transaction_timings = RecordTransactionsTimings::default(); + + let mixins_txs = execution_results_txs + .iter() + .map(|r| { + let (processed_transactions, results_to_transactions_elapsed) = measure!( + { + r.load_and_execute_tx_output + .execution_results + .iter() + .zip(r.sanitized_txs.iter()) + .filter_map(|(execution_result, tx)| { + if execution_result.was_executed() { + Some(tx.to_versioned_transaction()) + } else { + None + } + }) + .collect::>() + }, + "results_to_transactions_elapsed" + ); + + let (hash, hash_elapsed) = measure!( + hash_transactions(&processed_transactions[..]), + "hash_elapsed" + ); + + saturating_add_assign!( + new_record_transaction_timings.execution_results_to_transactions_us, + results_to_transactions_elapsed.as_us() + ); + saturating_add_assign!( + new_record_transaction_timings.hash_us, + hash_elapsed.as_us() + ); + + (hash, processed_transactions) + }) + .collect(); + + record_transactions_timings.accumulate(&new_record_transaction_timings); + + (*bank_slot, mixins_txs) + } + + pub fn join(self) -> thread::Result<()> { + self.bundle_thread.join() + } + + fn try_record( + recorder: &TransactionRecorder, + bank_slot: Slot, + mixins_txs: Vec<(Hash, Vec)>, + ) -> BundleStageResult> { + match recorder.record(bank_slot, mixins_txs) { + Ok(maybe_tx_index) => Ok(maybe_tx_index), + Err(PohRecorderError::MaxHeightReached) => Err(BundleExecutionError::PohMaxHeightError), + Err(e) => panic!("Poh recorder returned unexpected error: {:?}", e), + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::bundle_stage::tests::TestOption::{ + AssertDuplicateInBundleDropped, AssertNonZeroCostModel, AssertZeroedCostModel, + LowComputeBudget, + }, + crossbeam_channel::unbounded, + solana_ledger::{ + blockstore::Blockstore, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + get_tmp_ledger_path_auto_delete, + }, + solana_perf::packet::PacketBatch, + solana_poh::poh_recorder::create_test_recorder, + solana_sdk::{ + bundle::{ + error::BundleExecutionError::{ + ExceedsCostModel, PohMaxHeightError, TransactionFailure, + }, + sanitized::derive_bundle_id, + }, + compute_budget::ComputeBudgetInstruction, + genesis_config::GenesisConfig, + instruction::InstructionError, + message::Message, + packet::Packet, + poh_config::PohConfig, + signature::{Keypair, Signer}, + system_instruction, + system_transaction::{self, transfer}, + transaction::{ + Transaction, + TransactionError::{self, AccountNotFound}, + }, + }, + std::{collections::HashSet, sync::atomic::Ordering}, + }; + + const TEST_MAX_RETRY_DURATION: Duration = Duration::from_millis(500); + + enum TestOption { + LowComputeBudget, + AssertZeroedCostModel, + AssertNonZeroCostModel, + AssertDuplicateInBundleDropped, + } + + #[cfg(test)] + fn test_single_bundle( + genesis_config: GenesisConfig, + bundle: PacketBundle, + options: Option>, + ) -> Result<(), BundleExecutionError> { + solana_logger::setup(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + // start a banking_stage to eat verified receiver + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + if options.is_some() + && options + .as_ref() + .unwrap() + .iter() + .any(|option| matches!(option, LowComputeBudget)) + { + bank.write_cost_tracker().unwrap().set_limits(1, 1, 1); + } + let current_block_cost_limit = bank.read_cost_tracker().unwrap().block_cost_limit(); + debug!("current block cost limit: {current_block_cost_limit}"); + let blockstore = Arc::new( + Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"), + ); + let poh_config = PohConfig { + // limit tick count to avoid clearing working_bank at + // PohRecord then PohRecorderError(MaxHeightReached) at BankingStage + target_tick_count: Some(bank.max_tick_height() - 1), // == 1, only enough for ticks, not txs + ..PohConfig::default() + }; + let (exit, poh_recorder, poh_service, _entry_receiver) = + create_test_recorder(&bank, &blockstore, Some(poh_config), None); + let ticks_per_slot = poh_recorder.read().unwrap().ticks_per_slot(); + let recorder = poh_recorder.read().unwrap().recorder(); + let cost_model = Arc::new(RwLock::new(CostModel::default())); + let qos_service = QosService::new(cost_model, 0); + let mut bundle_stage_leader_stats = BundleStageLeaderStats::default(); + let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); + let sanitized_bundle = get_sanitized_bundle( + &bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + bundle_stage_leader_stats.transaction_errors(), + ) + .unwrap(); + + let results = BundleStage::update_qos_and_execute_record_commit_bundle( + &sanitized_bundle, + &recorder, + &None, + &gossip_vote_sender, + &qos_service, + &bank_start, + &mut bundle_stage_leader_stats, + &TEST_MAX_RETRY_DURATION, + &mut BundleReservedSpace { + current_tx_block_limit: current_block_cost_limit, + current_bundle_block_limit: current_block_cost_limit, + initial_allocated_cost: 0, + unreserved_ticks: ticks_per_slot, + }, + ); + + // This is ugly, not really an option for testing but a test itself. + // Still preferable to duplicating the entirety of this method + // just to test duplicate txs are dropped. + if options.is_some() + && options + .as_ref() + .unwrap() + .iter() + .any(|option| matches!(option, AssertDuplicateInBundleDropped)) + { + assert_eq!(results, Ok(())); + assert!(get_sanitized_bundle( + &bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + bundle_stage_leader_stats.transaction_errors(), + ) + .is_err()); + } + + // Transaction rolled back successfully if + // cost tracker has 0 transaction count + // cost tracker as 0 block cost + if options.is_some() + && options + .as_ref() + .unwrap() + .iter() + .any(|option| matches!(option, AssertZeroedCostModel)) + { + assert_eq!(bank.read_cost_tracker().unwrap().transaction_count(), 0); + assert_eq!(bank.read_cost_tracker().unwrap().block_cost(), 0); + } + + if options.is_some() + && options + .as_ref() + .unwrap() + .iter() + .any(|option| matches!(option, AssertNonZeroCostModel)) + { + assert_ne!(bank.read_cost_tracker().unwrap().transaction_count(), 0); + assert_ne!(bank.read_cost_tracker().unwrap().block_cost(), 0); + } + + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); + results + } + + #[test] + fn test_successful_bundle() { + let (genesis_config, bundle) = setup_successful_tx(); + assert_eq!( + test_single_bundle(genesis_config, bundle, Some(vec![AssertNonZeroCostModel])), + Ok(()) + ); + } + + #[test] + fn test_bundle_contains_processed_transaction() { + let (genesis_config, bundle) = setup_successful_tx(); + assert_eq!( + test_single_bundle( + genesis_config, + bundle, + Some(vec![AssertDuplicateInBundleDropped]), + ), + Ok(()) + ); + } + + #[cfg(test)] + fn setup_successful_tx() -> (GenesisConfig, PacketBundle) { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(5); + + let kp_a = Keypair::new(); + let kp_b = Keypair::new(); + let ix_mint_a = system_instruction::transfer(&mint_keypair.pubkey(), &kp_a.pubkey(), 1); + let ix_mint_b = system_instruction::transfer(&mint_keypair.pubkey(), &kp_b.pubkey(), 1); + let message = Message::new(&[ix_mint_a, ix_mint_b], Some(&mint_keypair.pubkey())); + let tx = VersionedTransaction::from(Transaction::new( + &[&mint_keypair], + message, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle_id = derive_bundle_id(&[tx]); + + ( + genesis_config, + PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id, + }, + ) + } + + #[test] + fn test_txs_exceed_cost_model() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(5); + + let kp = Keypair::new(); + let instruction = system_instruction::transfer(&mint_keypair.pubkey(), &kp.pubkey(), 1); + let message = Message::new( + &[ + ComputeBudgetInstruction::set_compute_unit_limit(1), + instruction, + ], + Some(&mint_keypair.pubkey()), + ); + let tx = VersionedTransaction::from(Transaction::new( + &[&mint_keypair], + message, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + + let bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id: derive_bundle_id(&[tx]), + }; + assert_eq!( + test_single_bundle(genesis_config, bundle, Some(vec![LowComputeBudget])), + Err(ExceedsCostModel) + ); + } + + #[test] + fn test_nonce_tx_failure() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(4); + + let kp_a = Keypair::new(); + let kp_nonce = Keypair::new(); + let kp_nonce_authority = Keypair::new(); + let tx = VersionedTransaction::from(system_transaction::nonced_transfer( + &mint_keypair, + &kp_a.pubkey(), + 1, + &kp_nonce.pubkey(), + &kp_nonce_authority, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id: derive_bundle_id(&[tx]), + }; + + assert_eq!( + test_single_bundle(genesis_config, bundle, None), + Err(TransactionFailure(TransactionError::InstructionError( + 0, + InstructionError::InvalidAccountData, + ))) + ); + } + + #[test] + fn test_qos_rollback() { + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(4); + + let kp_a = Keypair::new(); + let kp_b = Keypair::new(); + + let successful_tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp_b.pubkey(), + 1, + genesis_config.hash(), + )); + let failed_tx = + VersionedTransaction::from(transfer(&kp_a, &kp_b.pubkey(), 1, genesis_config.hash())); + + let successful_packet = Packet::from_data(None, &successful_tx).unwrap(); + let failed_packet = Packet::from_data(None, &failed_tx).unwrap(); + + let bundle = PacketBundle { + batch: PacketBatch::new(vec![successful_packet, failed_packet]), + bundle_id: derive_bundle_id(&[successful_tx, failed_tx]), + }; + + assert_eq!( + test_single_bundle(genesis_config, bundle, Some(vec![AssertZeroedCostModel])), + Err(TransactionFailure(AccountNotFound)) + ); + } + + #[test] + fn test_zero_balance_account() { + let GenesisConfigInfo { + genesis_config, + mint_keypair: _, + .. + } = create_genesis_config(4); + + let kp_a = Keypair::new(); + let kp_b = Keypair::new(); + let tx = + VersionedTransaction::from(transfer(&kp_a, &kp_b.pubkey(), 1, genesis_config.hash())); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id: derive_bundle_id(&[tx]), + }; + + assert_eq!( + test_single_bundle(genesis_config, bundle, None), + Err(TransactionFailure(AccountNotFound)) + ); + } + + #[test] + fn test_bundle_fails_poh_record() { + solana_logger::setup(); + let GenesisConfigInfo { + mut genesis_config, + mint_keypair, + .. + } = create_genesis_config(4); + genesis_config.ticks_per_slot = 1; // Reduce ticks so that POH fails + + let kp_b = Keypair::new(); + let tx = VersionedTransaction::from(transfer( + &mint_keypair, + &kp_b.pubkey(), + 1, + genesis_config.hash(), + )); + let packet = Packet::from_data(None, &tx).unwrap(); + let bundle = PacketBundle { + batch: PacketBatch::new(vec![packet]), + bundle_id: derive_bundle_id(&[tx]), + }; + assert_eq!( + test_single_bundle(genesis_config, bundle, None), + Err(PohMaxHeightError) + ); + } + + #[test] + fn test_bundle_max_retries() { + solana_logger::setup_with_default("INFO"); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_000_000); + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config)); + bank.write_cost_tracker() + .unwrap() + .set_limits(u64::MAX, u64::MAX, u64::MAX); + let current_block_cost_limit = bank.read_cost_tracker().unwrap().block_cost_limit(); + debug!("current block cost limit: {current_block_cost_limit}"); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Arc::new( + Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"), + ); + let poh_config = PohConfig { + // limit tick count to avoid clearing working_bank at + // PohRecord then PohRecorderError(MaxHeightReached) at BankingStage + target_tick_count: Some(bank.max_tick_height() - 1), // == 1, only enough for ticks, not txs + ..PohConfig::default() + }; + let (exit, poh_recorder, poh_service, _entry_receiver) = + create_test_recorder(&bank, &blockstore, Some(poh_config), None); + let ticks_per_slot = poh_recorder.read().unwrap().ticks_per_slot(); + let recorder = poh_recorder.read().unwrap().recorder(); + let (gossip_vote_sender, _gossip_vote_receiver) = unbounded(); + let cost_model = Arc::new(RwLock::new(CostModel::default())); + let qos_service = QosService::new(cost_model, 0); + let mut bundle_stage_leader_stats = BundleStageLeaderStats::default(); + let bank_start = poh_recorder.read().unwrap().bank_start().unwrap(); + + // Create two transfers + // 0. mint_keypair -> keypair0 + // 1. keypair0 -> keypair 1 + // Lock the accounts through the bank for tx1 and try to process tx0. + // It should timeout because BundleStage will continue to fail to get locks on keypair0. + + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + + let tx0 = VersionedTransaction::from(transfer( + &mint_keypair, + &keypair0.pubkey(), + 100_000, + genesis_config.hash(), + )); + + let tx1 = transfer(&keypair0, &keypair1.pubkey(), 50_000, genesis_config.hash()); + let sanitized_txs_1 = vec![SanitizedTransaction::from_transaction_for_tests(tx1)]; + + // grab lock on tx1 + let _batch = bank.prepare_sanitized_batch(&sanitized_txs_1); + + // push and pop tx0 + let bundle = PacketBundle { + batch: PacketBatch::new(vec![Packet::from_data(None, &tx0).unwrap()]), + bundle_id: derive_bundle_id(&[tx0]), + }; + info!("test_bundle_max_retries uuid: {:?}", bundle.bundle_id); + + let sanitized_bundle = get_sanitized_bundle( + &bundle, + &bank, + &HashSet::default(), + &HashSet::default(), + bundle_stage_leader_stats.transaction_errors(), + ) + .unwrap(); + + let result = BundleStage::update_qos_and_execute_record_commit_bundle( + &sanitized_bundle, + &recorder, + &None, + &gossip_vote_sender, + &qos_service, + &bank_start, + &mut bundle_stage_leader_stats, + &TEST_MAX_RETRY_DURATION, + &mut BundleReservedSpace { + current_tx_block_limit: current_block_cost_limit, + current_bundle_block_limit: current_block_cost_limit, + initial_allocated_cost: 0, + unreserved_ticks: ticks_per_slot, + }, + ); + info!("test_bundle_max_retries result: {:?}", result); + assert!(matches!( + result, + Err(BundleExecutionError::MaxRetriesExceeded(_)) + )); + + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); + } +} diff --git a/core/src/bundle_stage_leader_stats.rs b/core/src/bundle_stage_leader_stats.rs new file mode 100644 index 0000000000..efc771852f --- /dev/null +++ b/core/src/bundle_stage_leader_stats.rs @@ -0,0 +1,326 @@ +use { + crate::leader_slot_banking_stage_timing_metrics::LeaderExecuteAndCommitTimings, + solana_poh::poh_recorder::BankStart, + solana_runtime::transaction_error_metrics::TransactionErrorMetrics, + solana_sdk::{clock::Slot, saturating_add_assign}, +}; + +// Stats emitted only during leader slots +#[derive(Default)] +pub struct BundleStageLeaderSlotTrackingMetrics { + pub(crate) current_slot: Option, + bundle_stage_leader_stats: BundleStageLeaderStats, +} + +impl BundleStageLeaderSlotTrackingMetrics { + pub fn maybe_report(&mut self, id: u32, bank_start: &Option<&BankStart>) { + match (self.current_slot, bank_start) { + // not was leader, not is leader + (None, None) => {} + // was leader, not leader anymore + (Some(current_slot), None) => { + self.bundle_stage_leader_stats.report(id, current_slot); + self.bundle_stage_leader_stats = BundleStageLeaderStats::default(); + } + // was leader, is leader + (Some(current_slot), Some(bank_start)) => { + if current_slot != bank_start.working_bank.slot() { + self.bundle_stage_leader_stats.report(id, current_slot); + self.bundle_stage_leader_stats = BundleStageLeaderStats::default(); + } + } + // not was leader, is leader + (None, Some(_)) => { + self.bundle_stage_leader_stats = BundleStageLeaderStats::default(); + } + } + + self.current_slot = bank_start + .as_ref() + .map(|bank_start| bank_start.working_bank.slot()); + } + + pub fn bundle_stage_leader_stats(&mut self) -> &mut BundleStageLeaderStats { + &mut self.bundle_stage_leader_stats + } +} + +#[derive(Default)] +pub struct BundleStageLeaderStats { + transaction_errors: TransactionErrorMetrics, + execute_and_commit_timings: LeaderExecuteAndCommitTimings, + bundle_stage_stats: BundleStageStats, +} + +impl BundleStageLeaderStats { + pub fn transaction_errors(&mut self) -> &mut TransactionErrorMetrics { + &mut self.transaction_errors + } + + pub fn execute_and_commit_timings(&mut self) -> &mut LeaderExecuteAndCommitTimings { + &mut self.execute_and_commit_timings + } + + pub fn bundle_stage_stats(&mut self) -> &mut BundleStageStats { + &mut self.bundle_stage_stats + } + + pub fn report(&self, id: u32, slot: Slot) { + self.transaction_errors.report(id, slot); + self.execute_and_commit_timings.report(id, slot); + self.bundle_stage_stats.report(id, slot); + } +} + +#[derive(Default)] +pub struct BundleStageStats { + sanitize_transaction_ok: u64, + sanitize_transaction_vote_only_mode: u64, + sanitize_transaction_failed_precheck: u64, + sanitize_transaction_blacklisted_account: u64, + sanitize_transaction_failed_to_serialize: u64, + sanitize_transaction_duplicate_transaction: u64, + sanitize_transaction_failed_check: u64, + sanitize_bundle_elapsed_us: u64, + + locked_bundle_elapsed_us: u64, + + num_lock_errors: u64, + + num_init_tip_account_errors: u64, + num_init_tip_account_ok: u64, + + num_change_tip_receiver_errors: u64, + num_change_tip_receiver_ok: u64, + change_tip_receiver_elapsed_us: u64, + + num_execution_failures: u64, + num_execution_timeouts: u64, + num_execution_retries: u64, + + execute_locked_bundles_elapsed_us: u64, + + execution_results_ok: u64, + execution_results_poh_max_height: u64, + execution_results_transaction_failures: u64, + execution_results_exceeds_cost_model: u64, + execution_results_tip_errors: u64, + execution_results_max_retries: u64, + execution_results_lock_errors: u64, +} + +impl BundleStageStats { + pub fn report(&self, id: u32, slot: Slot) { + datapoint_info!( + "bundle_stage-stats", + ("id", id, i64), + ("slot", slot, i64), + ("num_sanitized_ok", self.sanitize_transaction_ok, i64), + ( + "sanitize_transaction_vote_only_mode", + self.sanitize_transaction_vote_only_mode, + i64 + ), + ( + "sanitize_transaction_failed_precheck", + self.sanitize_transaction_failed_precheck, + i64 + ), + ( + "sanitize_transaction_blacklisted_account", + self.sanitize_transaction_blacklisted_account, + i64 + ), + ( + "sanitize_transaction_failed_to_serialize", + self.sanitize_transaction_failed_to_serialize, + i64 + ), + ( + "sanitize_transaction_duplicate_transaction", + self.sanitize_transaction_duplicate_transaction, + i64 + ), + ( + "sanitize_transaction_failed_check", + self.sanitize_transaction_failed_check, + i64 + ), + ( + "sanitize_bundle_elapsed_us", + self.sanitize_bundle_elapsed_us, + i64 + ), + ( + "locked_bundle_elapsed_us", + self.locked_bundle_elapsed_us, + i64 + ), + ("num_lock_errors", self.num_lock_errors, i64), + ( + "num_init_tip_account_errors", + self.num_init_tip_account_errors, + i64 + ), + ("num_init_tip_account_ok", self.num_init_tip_account_ok, i64), + ( + "num_change_tip_receiver_errors", + self.num_change_tip_receiver_errors, + i64 + ), + ( + "num_change_tip_receiver_ok", + self.num_change_tip_receiver_ok, + i64 + ), + ( + "change_tip_receiver_elapsed_us", + self.change_tip_receiver_elapsed_us, + i64 + ), + ("num_execution_failures", self.num_execution_failures, i64), + ("num_execution_timeouts", self.num_execution_timeouts, i64), + ("num_execution_retries", self.num_execution_retries, i64), + ( + "execute_locked_bundles_elapsed_us", + self.execute_locked_bundles_elapsed_us, + i64 + ), + ("execution_results_ok", self.execution_results_ok, i64), + ( + "execution_results_poh_max_height", + self.execution_results_poh_max_height, + i64 + ), + ( + "execution_results_transaction_failures", + self.execution_results_transaction_failures, + i64 + ), + ( + "execution_results_exceeds_cost_model", + self.execution_results_exceeds_cost_model, + i64 + ), + ( + "execution_results_tip_errors", + self.execution_results_tip_errors, + i64 + ), + ( + "execution_results_max_retries", + self.execution_results_max_retries, + i64 + ), + ( + "execution_results_lock_errors", + self.execution_results_lock_errors, + i64 + ), + ); + } + + pub fn increment_sanitize_transaction_ok(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_transaction_ok, num); + } + + pub fn increment_sanitize_transaction_vote_only_mode(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_transaction_vote_only_mode, num); + } + + pub fn increment_sanitize_transaction_failed_precheck(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_transaction_failed_precheck, num); + } + + pub fn increment_sanitize_transaction_blacklisted_account(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_transaction_blacklisted_account, num); + } + + pub fn increment_sanitize_transaction_failed_to_serialize(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_transaction_failed_to_serialize, num); + } + + pub fn increment_sanitize_transaction_duplicate_transaction(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_transaction_duplicate_transaction, num); + } + + pub fn increment_sanitize_transaction_failed_check(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_transaction_failed_check, num); + } + + pub fn increment_sanitize_bundle_elapsed_us(&mut self, num: u64) { + saturating_add_assign!(self.sanitize_bundle_elapsed_us, num); + } + + pub fn increment_locked_bundle_elapsed_us(&mut self, num: u64) { + saturating_add_assign!(self.locked_bundle_elapsed_us, num); + } + + pub fn increment_num_lock_errors(&mut self, num: u64) { + saturating_add_assign!(self.num_lock_errors, num); + } + + pub fn increment_num_init_tip_account_errors(&mut self, num: u64) { + saturating_add_assign!(self.num_init_tip_account_errors, num); + } + + pub fn increment_num_init_tip_account_ok(&mut self, num: u64) { + saturating_add_assign!(self.num_init_tip_account_ok, num); + } + + pub fn increment_num_change_tip_receiver_errors(&mut self, num: u64) { + saturating_add_assign!(self.num_change_tip_receiver_errors, num); + } + + pub fn increment_num_change_tip_receiver_ok(&mut self, num: u64) { + saturating_add_assign!(self.num_change_tip_receiver_ok, num); + } + + pub fn increment_change_tip_receiver_elapsed_us(&mut self, num: u64) { + saturating_add_assign!(self.change_tip_receiver_elapsed_us, num); + } + + pub fn increment_num_execution_failures(&mut self, num: u64) { + saturating_add_assign!(self.num_execution_failures, num); + } + + pub fn increment_num_execution_timeouts(&mut self, num: u64) { + saturating_add_assign!(self.num_execution_timeouts, num); + } + + pub fn increment_num_execution_retries(&mut self, num: u64) { + saturating_add_assign!(self.num_execution_retries, num); + } + + pub fn increment_execute_locked_bundles_elapsed_us(&mut self, num: u64) { + saturating_add_assign!(self.execute_locked_bundles_elapsed_us, num); + } + + pub fn increment_execution_results_ok(&mut self, num: u64) { + saturating_add_assign!(self.execution_results_ok, num); + } + + pub fn increment_execution_results_poh_max_height(&mut self, num: u64) { + saturating_add_assign!(self.execution_results_poh_max_height, num); + } + + pub fn increment_execution_results_transaction_failures(&mut self, num: u64) { + saturating_add_assign!(self.execution_results_transaction_failures, num); + } + + pub fn increment_execution_results_exceeds_cost_model(&mut self, num: u64) { + saturating_add_assign!(self.execution_results_exceeds_cost_model, num); + } + + pub fn increment_execution_results_tip_errors(&mut self, num: u64) { + saturating_add_assign!(self.execution_results_tip_errors, num); + } + + pub fn increment_execution_results_max_retries(&mut self, num: u64) { + saturating_add_assign!(self.execution_results_max_retries, num); + } + + pub fn increment_execution_results_lock_errors(&mut self, num: u64) { + saturating_add_assign!(self.execution_results_lock_errors, num); + } +} diff --git a/core/src/consensus_cache_updater.rs b/core/src/consensus_cache_updater.rs new file mode 100644 index 0000000000..0514f4133b --- /dev/null +++ b/core/src/consensus_cache_updater.rs @@ -0,0 +1,52 @@ +use { + solana_runtime::bank::Bank, + solana_sdk::{clock::Epoch, pubkey::Pubkey}, + std::collections::HashSet, +}; + +#[derive(Default)] +pub(crate) struct ConsensusCacheUpdater { + last_epoch_updated: Epoch, + consensus_accounts_cache: HashSet, +} + +impl ConsensusCacheUpdater { + pub(crate) fn consensus_accounts_cache(&self) -> &HashSet { + &self.consensus_accounts_cache + } + + /// Builds a HashSet of all consensus related accounts for the Bank's epoch + fn get_consensus_accounts(bank: &Bank) -> HashSet { + let mut consensus_accounts: HashSet = HashSet::new(); + if let Some(epoch_stakes) = bank.epoch_stakes(bank.epoch()) { + // votes use the following accounts: + // - vote_account pubkey: writeable + // - authorized_voter_pubkey: read-only + // - node_keypair pubkey: payer (writeable) + let node_id_vote_accounts = epoch_stakes.node_id_to_vote_accounts(); + + let vote_accounts = node_id_vote_accounts + .values() + .into_iter() + .flat_map(|v| v.vote_accounts.clone()); + + // vote_account + consensus_accounts.extend(vote_accounts.into_iter()); + // authorized_voter_pubkey + consensus_accounts.extend(epoch_stakes.epoch_authorized_voters().keys().into_iter()); + // node_keypair + consensus_accounts.extend(epoch_stakes.node_id_to_vote_accounts().keys().into_iter()); + } + consensus_accounts + } + + /// Updates consensus-related accounts on epoch boundaries + /// Bundles must not contain any consensus related accounts in order to prevent starvation + /// of voting related transactions + pub(crate) fn maybe_update(&mut self, bank: &Bank) { + if bank.epoch() > self.last_epoch_updated { + self.consensus_accounts_cache = Self::get_consensus_accounts(bank); + self.last_epoch_updated = bank.epoch(); + } + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index d6a22a5a68..090e711c3f 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -13,6 +13,10 @@ pub mod admin_rpc_post_init; pub mod ancestor_hashes_service; pub mod banking_stage; pub mod broadcast_stage; +pub mod bundle_account_locker; +pub mod bundle_sanitizer; +pub mod bundle_stage; +mod bundle_stage_leader_stats; pub mod cache_block_meta_service; pub mod cluster_info_vote_listener; pub mod cluster_nodes; @@ -22,6 +26,7 @@ pub mod cluster_slots_service; pub mod commitment_service; pub mod completed_data_sets_service; pub mod consensus; +pub mod consensus_cache_updater; pub mod cost_update_service; pub mod drop_bank_service; pub mod duplicate_repair_status; @@ -39,10 +44,12 @@ pub mod ledger_metric_report_service; pub mod multi_iterator_scanner; pub mod optimistic_confirmation_verifier; pub mod outstanding_requests; +pub mod packet_bundle; pub mod packet_threshold; pub mod poh_timing_report_service; pub mod poh_timing_reporter; pub mod progress_map; +pub mod proxy; pub mod qos_service; pub mod repair_generic_traversal; pub mod repair_response; @@ -65,6 +72,7 @@ pub mod snapshot_packager_service; pub mod staked_nodes_updater_service; pub mod stats_reporter_service; pub mod system_monitor_service; +pub mod tip_manager; mod tower1_7_14; pub mod tower_storage; pub mod tpu; @@ -99,3 +107,42 @@ extern crate solana_frozen_abi_macro; #[cfg(test)] #[macro_use] extern crate matches; + +use { + solana_sdk::packet::{Meta, Packet, PacketFlags, PACKET_DATA_SIZE}, + std::{ + cmp::min, + net::{IpAddr, Ipv4Addr}, + }, +}; + +const UNKNOWN_IP: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); + +// NOTE: last profiled at around 180ns +pub fn proto_packet_to_packet(p: jito_protos::proto::packet::Packet) -> Packet { + let mut data = [0; PACKET_DATA_SIZE]; + let copy_len = min(data.len(), p.data.len()); + data[..copy_len].copy_from_slice(&p.data[..copy_len]); + let mut packet = Packet::new(data, Meta::default()); + if let Some(meta) = p.meta { + packet.meta.size = meta.size as usize; + packet.meta.addr = meta.addr.parse().unwrap_or(UNKNOWN_IP); + packet.meta.port = meta.port as u16; + if let Some(flags) = meta.flags { + if flags.simple_vote_tx { + packet.meta.flags.insert(PacketFlags::SIMPLE_VOTE_TX); + } + if flags.forwarded { + packet.meta.flags.insert(PacketFlags::FORWARDED); + } + if flags.tracer_packet { + packet.meta.flags.insert(PacketFlags::TRACER_PACKET); + } + if flags.repair { + packet.meta.flags.insert(PacketFlags::REPAIR); + } + } + packet.meta.sender_stake = meta.sender_stake; + } + packet +} diff --git a/core/src/packet_bundle.rs b/core/src/packet_bundle.rs new file mode 100644 index 0000000000..2158f37414 --- /dev/null +++ b/core/src/packet_bundle.rs @@ -0,0 +1,7 @@ +use solana_perf::packet::PacketBatch; + +#[derive(Clone, Debug)] +pub struct PacketBundle { + pub batch: PacketBatch, + pub bundle_id: String, +} diff --git a/core/src/proxy/auth.rs b/core/src/proxy/auth.rs new file mode 100644 index 0000000000..39821e12ef --- /dev/null +++ b/core/src/proxy/auth.rs @@ -0,0 +1,185 @@ +use { + crate::proxy::ProxyError, + chrono::Utc, + jito_protos::proto::auth::{ + auth_service_client::AuthServiceClient, GenerateAuthChallengeRequest, + GenerateAuthTokensRequest, RefreshAccessTokenRequest, Role, Token, + }, + solana_gossip::cluster_info::ClusterInfo, + solana_sdk::signature::{Keypair, Signer}, + std::{ + sync::{Arc, Mutex}, + time::Duration, + }, + tokio::time::timeout, + tonic::{service::Interceptor, transport::Channel, Code, Request, Status}, +}; + +/// Interceptor responsible for adding the access token to request headers. +pub(crate) struct AuthInterceptor { + /// The token added to each request header. + access_token: Arc>, +} + +impl AuthInterceptor { + pub(crate) fn new(access_token: Arc>) -> Self { + Self { access_token } + } +} + +impl Interceptor for AuthInterceptor { + fn call(&mut self, mut request: Request<()>) -> Result, Status> { + request.metadata_mut().insert( + "authorization", + format!("Bearer {}", self.access_token.lock().unwrap().value) + .parse() + .unwrap(), + ); + + Ok(request) + } +} + +/// Generates an auth challenge then generates and returns validated auth tokens. +pub async fn generate_auth_tokens( + auth_service_client: &mut AuthServiceClient, + // used to sign challenges + keypair: &Keypair, +) -> crate::proxy::Result<( + Token, /* access_token */ + Token, /* refresh_token */ +)> { + debug!("generate_auth_challenge"); + let challenge_response = auth_service_client + .generate_auth_challenge(GenerateAuthChallengeRequest { + role: Role::Validator as i32, + pubkey: keypair.pubkey().as_ref().to_vec(), + }) + .await + .map_err(|e: Status| { + if e.code() == Code::PermissionDenied { + ProxyError::AuthenticationPermissionDenied + } else { + ProxyError::AuthenticationError(e.to_string()) + } + })?; + + let formatted_challenge = format!( + "{}-{}", + keypair.pubkey(), + challenge_response.into_inner().challenge + ); + + let signed_challenge = keypair + .sign_message(formatted_challenge.as_bytes()) + .as_ref() + .to_vec(); + + debug!( + "formatted_challenge: {} signed_challenge: {:?}", + formatted_challenge, signed_challenge + ); + + debug!("generate_auth_tokens"); + let auth_tokens = auth_service_client + .generate_auth_tokens(GenerateAuthTokensRequest { + challenge: formatted_challenge, + client_pubkey: keypair.pubkey().as_ref().to_vec(), + signed_challenge, + }) + .await + .map_err(|e| ProxyError::AuthenticationError(e.to_string()))?; + + let inner = auth_tokens.into_inner(); + let access_token = get_validated_token(inner.access_token)?; + let refresh_token = get_validated_token(inner.refresh_token)?; + + Ok((access_token, refresh_token)) +} + +/// Tries to refresh the access token or run full-reauth if needed. +pub async fn maybe_refresh_auth_tokens( + auth_service_client: &mut AuthServiceClient, + access_token: &Arc>, + refresh_token: &Token, + cluster_info: &Arc, + connection_timeout: &Duration, + refresh_within_s: u64, +) -> crate::proxy::Result<( + Option, // access token + Option, // refresh token +)> { + let access_token_expiry: u64 = access_token + .lock() + .unwrap() + .expires_at_utc + .as_ref() + .map(|ts| ts.seconds as u64) + .unwrap_or_default(); + let refresh_token_expiry: u64 = refresh_token + .expires_at_utc + .as_ref() + .map(|ts| ts.seconds as u64) + .unwrap_or_default(); + + let now = Utc::now().timestamp() as u64; + + let should_refresh_access = + access_token_expiry.checked_sub(now).unwrap_or_default() <= refresh_within_s; + let should_generate_new_tokens = + refresh_token_expiry.checked_sub(now).unwrap_or_default() <= refresh_within_s; + + if should_generate_new_tokens { + let kp = cluster_info.keypair().clone(); + + let (new_access_token, new_refresh_token) = timeout( + *connection_timeout, + generate_auth_tokens(auth_service_client, kp.as_ref()), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("generate_auth_tokens".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))?; + + return Ok((Some(new_access_token), Some(new_refresh_token))); + } else if should_refresh_access { + let new_access_token = timeout( + *connection_timeout, + refresh_access_token(auth_service_client, refresh_token), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("refresh_access_token".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))?; + + return Ok((Some(new_access_token), None)); + } + + Ok((None, None)) +} + +pub async fn refresh_access_token( + auth_service_client: &mut AuthServiceClient, + refresh_token: &Token, +) -> crate::proxy::Result { + let response = auth_service_client + .refresh_access_token(RefreshAccessTokenRequest { + refresh_token: refresh_token.value.clone(), + }) + .await + .map_err(|e| ProxyError::AuthenticationError(e.to_string()))?; + get_validated_token(response.into_inner().access_token) +} + +/// An invalid token is one where any of its fields are None or the token itself is None. +/// Performs the necessary validations on the auth tokens before returning, +/// i.e. it is safe to call .unwrap() on the token fields from the call-site. +fn get_validated_token(maybe_token: Option) -> crate::proxy::Result { + let token = maybe_token + .ok_or_else(|| ProxyError::BadAuthenticationToken("received a null token".to_string()))?; + if token.expires_at_utc.is_none() { + Err(ProxyError::BadAuthenticationToken( + "expires_at_utc field is null".to_string(), + )) + } else { + Ok(token) + } +} diff --git a/core/src/proxy/block_engine_stage.rs b/core/src/proxy/block_engine_stage.rs new file mode 100644 index 0000000000..1bea7c6ad4 --- /dev/null +++ b/core/src/proxy/block_engine_stage.rs @@ -0,0 +1,561 @@ +//! Maintains a connection to the Block Engine. +//! +//! The Block Engine is responsible for the following: +//! - Acts as a system that sends high profit bundles and transactions to a validator. +//! - Sends transactions and bundles to the validator. +use { + crate::{ + packet_bundle::PacketBundle, + proto_packet_to_packet, + proxy::{ + auth::{generate_auth_tokens, maybe_refresh_auth_tokens, AuthInterceptor}, + ProxyError, + }, + sigverify::SigverifyTracerPacketStats, + }, + crossbeam_channel::Sender, + jito_protos::proto::{ + auth::{auth_service_client::AuthServiceClient, Token}, + block_engine::{ + self, block_engine_validator_client::BlockEngineValidatorClient, + BlockBuilderFeeInfoRequest, + }, + }, + solana_gossip::cluster_info::ClusterInfo, + solana_perf::packet::PacketBatch, + solana_sdk::{ + pubkey::Pubkey, saturating_add_assign, signature::Signer, signer::keypair::Keypair, + }, + std::{ + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + thread::{self, Builder, JoinHandle}, + time::Duration, + }, + tokio::time::{interval, sleep, timeout}, + tonic::{ + codegen::InterceptedService, + transport::{Channel, Endpoint}, + Status, Streaming, + }, +}; + +const CONNECTION_TIMEOUT_S: u64 = 10; +const CONNECTION_BACKOFF_S: u64 = 5; + +#[derive(Default)] +struct BlockEngineStageStats { + num_bundles: u64, + num_bundle_packets: u64, + num_packets: u64, + num_empty_packets: u64, +} + +impl BlockEngineStageStats { + pub(crate) fn report(&self) { + datapoint_info!( + "block_engine_stage-stats", + ("num_bundles", self.num_bundles, i64), + ("num_bundle_packets", self.num_bundle_packets, i64), + ("num_packets", self.num_packets, i64), + ("num_empty_packets", self.num_empty_packets, i64) + ); + } +} + +pub struct BlockBuilderFeeInfo { + pub block_builder: Pubkey, + pub block_builder_commission: u64, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct BlockEngineConfig { + /// Address to the external auth-service responsible for generating access tokens. + pub auth_service_addr: String, + + /// Block Engine grpc Address + pub backend_addr: String, + + /// If set then it will be assumed the backend verified packets so signature verification will be bypassed in the validator. + pub trust_packets: bool, +} + +pub struct BlockEngineStage { + t_hdls: Vec>, +} + +impl BlockEngineStage { + pub fn new( + block_engine_config: Arc>, + // Channel that bundles get piped through. + bundle_tx: Sender>, + // The keypair stored here is used to sign auth challenges. + cluster_info: Arc, + // Channel that non-trusted packets get piped through. + packet_tx: Sender, + // Channel that trusted packets get piped through. + verified_packet_tx: Sender<(Vec, Option)>, + exit: Arc, + block_builder_fee_info: &Arc>, + ) -> Self { + let block_builder_fee_info = block_builder_fee_info.clone(); + + let thread = Builder::new() + .name("block-engine-stage".to_string()) + .spawn(move || { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(Self::start( + block_engine_config, + cluster_info, + bundle_tx, + packet_tx, + verified_packet_tx, + exit, + block_builder_fee_info, + )); + }) + .unwrap(); + + Self { + t_hdls: vec![thread], + } + } + + pub fn join(self) -> thread::Result<()> { + for t in self.t_hdls { + t.join()?; + } + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn start( + block_engine_config: Arc>, + cluster_info: Arc, + bundle_tx: Sender>, + packet_tx: Sender, + verified_packet_tx: Sender<(Vec, Option)>, + exit: Arc, + block_builder_fee_info: Arc>, + ) { + const CONNECTION_TIMEOUT: Duration = Duration::from_secs(CONNECTION_TIMEOUT_S); + const CONNECTION_BACKOFF: Duration = Duration::from_secs(CONNECTION_BACKOFF_S); + let mut error_count: u64 = 0; + + while !exit.load(Ordering::Relaxed) { + // Wait until a valid config is supplied (either initially or by admin rpc) + // Use if!/else here to avoid extra CONNECTION_BACKOFF wait on successful termination + if !Self::is_valid_block_engine_config(&block_engine_config.lock().unwrap()) { + sleep(CONNECTION_BACKOFF).await; + } else if let Err(e) = Self::connect_auth_and_stream( + &block_engine_config, + &cluster_info, + &bundle_tx, + &packet_tx, + &verified_packet_tx, + &exit, + &block_builder_fee_info, + &CONNECTION_TIMEOUT, + ) + .await + { + match e { + // This error is frequent on hot spares, and the parsed string does not work + // with datapoints (incorrect escaping). + ProxyError::AuthenticationPermissionDenied => { + warn!("block engine permission denied. not on leader schedule. ignore if hot-spare.") + } + e => { + error_count += 1; + datapoint_warn!( + "block_engine_stage-proxy_error", + ("count", error_count, i64), + ("error", e.to_string(), String), + ); + } + } + sleep(CONNECTION_BACKOFF).await; + } + } + } + + async fn connect_auth_and_stream( + block_engine_config: &Arc>, + cluster_info: &Arc, + bundle_tx: &Sender>, + packet_tx: &Sender, + verified_packet_tx: &Sender<(Vec, Option)>, + exit: &Arc, + block_builder_fee_info: &Arc>, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + // Get a copy of configs here in case they have changed at runtime + let keypair = cluster_info.keypair().clone(); + let local_config = block_engine_config.lock().unwrap().clone(); + + let mut auth_service_endpoint = + Endpoint::from_shared(local_config.auth_service_addr.clone()).map_err(|_| { + ProxyError::AuthenticationConnectionError(format!( + "invalid block engine url value: {}", + local_config.auth_service_addr + )) + })?; + if local_config.auth_service_addr.starts_with("https") { + auth_service_endpoint = auth_service_endpoint + .tls_config(tonic::transport::ClientTlsConfig::new()) + .map_err(|_| { + ProxyError::AuthenticationConnectionError( + "failed to set tls_config for block engine auth service".to_string(), + ) + })?; + } + let mut backend_endpoint = Endpoint::from_shared(local_config.backend_addr.clone()) + .map_err(|_| { + ProxyError::BlockEngineConnectionError(format!( + "invalid block engine url value: {}", + local_config.backend_addr + )) + })? + .tcp_keepalive(Some(Duration::from_secs(60))); + + if local_config.backend_addr.starts_with("https") { + backend_endpoint = backend_endpoint + .tls_config(tonic::transport::ClientTlsConfig::new()) + .map_err(|_| { + ProxyError::BlockEngineConnectionError( + "failed to set tls_config for block engine service".to_string(), + ) + })?; + } + + debug!("connecting to auth: {}", &local_config.auth_service_addr); + let auth_channel = timeout(*connection_timeout, auth_service_endpoint.connect()) + .await + .map_err(|_| ProxyError::AuthenticationConnectionTimeout)? + .map_err(|e| ProxyError::AuthenticationConnectionError(e.to_string()))?; + + let mut auth_client = AuthServiceClient::new(auth_channel); + + debug!("generating authentication token"); + let (access_token, refresh_token) = timeout( + *connection_timeout, + generate_auth_tokens(&mut auth_client, &keypair), + ) + .await + .map_err(|_| ProxyError::AuthenticationTimeout)??; + + datapoint_info!( + "block_engine_stage-tokens_generated", + ("url", &local_config.auth_service_addr, String), + ("count", 1, i64), + ); + + debug!("connecting to block engine: {}", &local_config.backend_addr); + let block_engine_channel = timeout(*connection_timeout, backend_endpoint.connect()) + .await + .map_err(|_| ProxyError::BlockEngineConnectionTimeout)? + .map_err(|e| ProxyError::BlockEngineConnectionError(e.to_string()))?; + + let access_token = Arc::new(Mutex::new(access_token)); + let block_engine_client = BlockEngineValidatorClient::with_interceptor( + block_engine_channel, + AuthInterceptor::new(access_token.clone()), + ); + + Self::start_consuming_block_engine_bundles_and_packets( + bundle_tx, + block_engine_client, + packet_tx, + &local_config, + block_engine_config, + verified_packet_tx, + exit, + block_builder_fee_info, + auth_client, + access_token, + refresh_token, + connection_timeout, + keypair, + cluster_info, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn start_consuming_block_engine_bundles_and_packets( + bundle_tx: &Sender>, + mut client: BlockEngineValidatorClient>, + packet_tx: &Sender, + local_config: &BlockEngineConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + verified_packet_tx: &Sender<(Vec, Option)>, + exit: &Arc, + block_builder_fee_info: &Arc>, + auth_client: AuthServiceClient, + access_token: Arc>, + refresh_token: Token, + connection_timeout: &Duration, + keypair: Arc, + cluster_info: &Arc, + ) -> crate::proxy::Result<()> { + let subscribe_packets_stream = timeout( + *connection_timeout, + client.subscribe_packets(block_engine::SubscribePacketsRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("block_engine_subscribe_packets".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let subscribe_bundles_stream = timeout( + *connection_timeout, + client.subscribe_bundles(block_engine::SubscribeBundlesRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("subscribe_bundles".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let block_builder_info = timeout( + *connection_timeout, + client.get_block_builder_fee_info(BlockBuilderFeeInfoRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("get_block_builder_fee_info".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + { + let mut bb_fee = block_builder_fee_info.lock().unwrap(); + bb_fee.block_builder_commission = block_builder_info.commission; + bb_fee.block_builder = + Pubkey::from_str(&block_builder_info.pubkey).unwrap_or(bb_fee.block_builder); + } + + Self::consume_bundle_and_packet_stream( + client, + (subscribe_bundles_stream, subscribe_packets_stream), + bundle_tx, + packet_tx, + local_config, + global_config, + verified_packet_tx, + exit, + block_builder_fee_info, + auth_client, + access_token, + refresh_token, + keypair, + cluster_info, + connection_timeout, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn consume_bundle_and_packet_stream( + mut client: BlockEngineValidatorClient>, + (mut bundle_stream, mut packet_stream): ( + Streaming, + Streaming, + ), + bundle_tx: &Sender>, + packet_tx: &Sender, + local_config: &BlockEngineConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + verified_packet_tx: &Sender<(Vec, Option)>, + exit: &Arc, + block_builder_fee_info: &Arc>, + mut auth_client: AuthServiceClient, + access_token: Arc>, + mut refresh_token: Token, + keypair: Arc, + cluster_info: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + const METRICS_TICK: Duration = Duration::from_secs(1); + const MAINTENANCE_TICK: Duration = Duration::from_secs(10 * 60); + let refresh_within_s: u64 = METRICS_TICK.as_secs().saturating_mul(3).saturating_div(2); + + let mut num_full_refreshes: u64 = 1; + let mut num_refresh_access_token: u64 = 0; + let mut block_engine_stats = BlockEngineStageStats::default(); + let mut metrics_and_auth_tick = interval(METRICS_TICK); + let mut maintenance_tick = interval(MAINTENANCE_TICK); + + info!("connected to packet and bundle stream"); + + while !exit.load(Ordering::Relaxed) { + tokio::select! { + maybe_msg = packet_stream.message() => { + let resp = maybe_msg?.ok_or(ProxyError::GrpcStreamDisconnected)?; + Self::handle_block_engine_packets(resp, packet_tx, verified_packet_tx, local_config.trust_packets, &mut block_engine_stats)?; + } + maybe_bundles = bundle_stream.message() => { + Self::handle_block_engine_maybe_bundles(maybe_bundles, bundle_tx, &mut block_engine_stats)?; + } + _ = metrics_and_auth_tick.tick() => { + block_engine_stats.report(); + block_engine_stats = BlockEngineStageStats::default(); + + if cluster_info.id() != keypair.pubkey() { + return Err(ProxyError::AuthenticationConnectionError("validator identity changed".to_string())); + } + + if *global_config.lock().unwrap() != *local_config { + return Err(ProxyError::AuthenticationConnectionError("block engine config changed".to_string())); + } + + let (maybe_new_access, maybe_new_refresh) = maybe_refresh_auth_tokens(&mut auth_client, + &access_token, + &refresh_token, + cluster_info, + connection_timeout, + refresh_within_s, + ).await?; + + if let Some(new_token) = maybe_new_access { + num_refresh_access_token += 1; + datapoint_info!( + "block_engine_stage-refresh_access_token", + ("url", &local_config.auth_service_addr, String), + ("count", num_refresh_access_token, i64), + ); + *access_token.lock().unwrap() = new_token; + } + if let Some(new_token) = maybe_new_refresh { + num_full_refreshes += 1; + datapoint_info!( + "block_engine_stage-tokens_generated", + ("url", &local_config.auth_service_addr, String), + ("count", num_full_refreshes, i64), + ); + refresh_token = new_token; + } + } + _ = maintenance_tick.tick() => { + let block_builder_info = timeout( + *connection_timeout, + client.get_block_builder_fee_info(BlockBuilderFeeInfoRequest{}) + ) + .await + .map_err(|_| ProxyError::MethodTimeout("get_block_builder_fee_info".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let mut bb_fee = block_builder_fee_info.lock().unwrap(); + bb_fee.block_builder_commission = block_builder_info.commission; + bb_fee.block_builder = Pubkey::from_str(&block_builder_info.pubkey).unwrap_or(bb_fee.block_builder); + } + } + } + Ok(()) + } + + fn handle_block_engine_maybe_bundles( + maybe_bundles_response: Result, Status>, + bundle_sender: &Sender>, + block_engine_stats: &mut BlockEngineStageStats, + ) -> crate::proxy::Result<()> { + let bundles_response = maybe_bundles_response?.ok_or(ProxyError::GrpcStreamDisconnected)?; + let bundles: Vec = bundles_response + .bundles + .into_iter() + .filter_map(|bundle| { + Some(PacketBundle { + batch: PacketBatch::new( + bundle + .bundle? + .packets + .into_iter() + .map(proto_packet_to_packet) + .collect(), + ), + bundle_id: bundle.uuid, + }) + }) + .collect(); + + saturating_add_assign!(block_engine_stats.num_bundles, bundles.len() as u64); + saturating_add_assign!( + block_engine_stats.num_bundle_packets, + bundles.iter().map(|bundle| bundle.batch.len() as u64).sum() + ); + + // NOTE: bundles are sanitized in bundle_sanitizer module + bundle_sender + .send(bundles) + .map_err(|_| ProxyError::PacketForwardError) + } + + fn handle_block_engine_packets( + resp: block_engine::SubscribePacketsResponse, + packet_tx: &Sender, + verified_packet_tx: &Sender<(Vec, Option)>, + trust_packets: bool, + block_engine_stats: &mut BlockEngineStageStats, + ) -> crate::proxy::Result<()> { + if let Some(batch) = resp.batch { + if batch.packets.is_empty() { + saturating_add_assign!(block_engine_stats.num_empty_packets, 1); + return Ok(()); + } + + let packet_batch = PacketBatch::new( + batch + .packets + .into_iter() + .map(proto_packet_to_packet) + .collect(), + ); + + saturating_add_assign!(block_engine_stats.num_packets, packet_batch.len() as u64); + + if trust_packets { + verified_packet_tx + .send((vec![packet_batch], None)) + .map_err(|_| ProxyError::PacketForwardError)?; + } else { + packet_tx + .send(packet_batch) + .map_err(|_| ProxyError::PacketForwardError)?; + } + } else { + saturating_add_assign!(block_engine_stats.num_empty_packets, 1); + } + + Ok(()) + } + + pub fn is_valid_block_engine_config(config: &BlockEngineConfig) -> bool { + if config.auth_service_addr.is_empty() { + warn!("can't connect to block_engine auth. missing url."); + return false; + } + if config.backend_addr.is_empty() { + warn!("can't connect to block engine. missing url."); + return false; + } + if let Err(e) = Endpoint::from_str(&config.auth_service_addr) { + error!( + "can't connect to block engine. error creating block engine auth endpoint - {}", + e.to_string() + ); + return false; + } + if let Err(e) = Endpoint::from_str(&config.backend_addr) { + error!( + "can't connect to block engine. error creating block engine endpoint - {}", + e.to_string() + ); + return false; + } + true + } +} diff --git a/core/src/proxy/fetch_stage_manager.rs b/core/src/proxy/fetch_stage_manager.rs new file mode 100644 index 0000000000..72299edb64 --- /dev/null +++ b/core/src/proxy/fetch_stage_manager.rs @@ -0,0 +1,161 @@ +use { + crate::proxy::{HeartbeatEvent, ProxyError}, + crossbeam_channel::{select, tick, Receiver, Sender}, + solana_gossip::cluster_info::ClusterInfo, + solana_perf::packet::PacketBatch, + std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::{self, Builder, JoinHandle}, + time::{Duration, Instant}, + }, +}; + +const HEARTBEAT_TIMEOUT: Duration = Duration::from_millis(1500); // Empirically determined from load testing +const DISCONNECT_DELAY: Duration = Duration::from_secs(60); +const METRICS_CADENCE: Duration = Duration::from_secs(1); + +/// Manages switching between the validator's tpu ports and that of the proxy's. +/// Switch-overs are triggered by late and missed heartbeats. +pub struct FetchStageManager { + t_hdl: JoinHandle<()>, +} + +impl FetchStageManager { + pub fn new( + // ClusterInfo is used to switch between advertising the proxy's TPU ports and that of this validator's. + cluster_info: Arc, + // Channel that heartbeats are received from. Entirely responsible for triggering switch-overs. + heartbeat_rx: Receiver, + // Channel that packets from FetchStage are intercepted from. + packet_intercept_rx: Receiver, + // Intercepted packets get piped through here. + packet_tx: Sender, + exit: Arc, + ) -> Self { + let t_hdl = Self::start( + cluster_info, + heartbeat_rx, + packet_intercept_rx, + packet_tx, + exit, + ); + + Self { t_hdl } + } + + /// Disconnect fetch behaviour + /// Starts connected + /// When connected and a packet is received, forward it + /// When disconnected, packet is dropped + /// When receiving heartbeat while connected and not pending disconnect + /// Sets pending_disconnect to true and records time + /// When receiving heartbeat while connected, and pending for > DISCONNECT_DELAY_SEC + /// Sets fetch_connected to false, pending_disconnect to false + /// Advertises TPU ports sent in heartbeat + /// When tick is received without heartbeat_received + /// Sets fetch_connected to true, pending_disconnect to false + /// Advertises saved contact info + fn start( + cluster_info: Arc, + heartbeat_rx: Receiver, + packet_intercept_rx: Receiver, + packet_tx: Sender, + exit: Arc, + ) -> JoinHandle<()> { + Builder::new().name("fetch-stage-manager".into()).spawn(move || { + let my_fallback_contact_info = cluster_info.my_contact_info(); + + let mut fetch_connected = true; + let mut heartbeat_received = false; + let mut pending_disconnect = false; + + let mut pending_disconnect_ts = Instant::now(); + + let heartbeat_tick = tick(HEARTBEAT_TIMEOUT); + let metrics_tick = tick(METRICS_CADENCE); + let mut packets_forwarded = 0; + let mut heartbeats_received = 0; + loop { + select! { + recv(packet_intercept_rx) -> pkt => { + match pkt { + Ok(pkt) => { + if fetch_connected { + if packet_tx.send(pkt).is_err() { + error!("{:?}", ProxyError::PacketForwardError); + return; + } + packets_forwarded += 1; + } + } + Err(_) => { + warn!("packet intercept receiver disconnected, shutting down"); + return; + } + } + } + recv(heartbeat_tick) -> _ => { + if exit.load(Ordering::Relaxed) { + break; + } + if !heartbeat_received && (!fetch_connected || pending_disconnect) { + warn!("heartbeat late, reconnecting fetch stage"); + fetch_connected = true; + pending_disconnect = false; + Self::set_tpu_addresses(&cluster_info, my_fallback_contact_info.tpu, my_fallback_contact_info.tpu_forwards); + heartbeats_received = 0; + } + heartbeat_received = false; + } + recv(heartbeat_rx) -> tpu_info => { + if let Ok((tpu_addr, tpu_forward_addr)) = tpu_info { + heartbeats_received += 1; + heartbeat_received = true; + if fetch_connected && !pending_disconnect { + info!("received heartbeat while fetch stage connected, pending disconnect after delay"); + pending_disconnect_ts = Instant::now(); + pending_disconnect = true; + } + if fetch_connected && pending_disconnect && pending_disconnect_ts.elapsed() > DISCONNECT_DELAY { + info!("disconnecting fetch stage"); + fetch_connected = false; + pending_disconnect = false; + Self::set_tpu_addresses(&cluster_info, tpu_addr, tpu_forward_addr); + } + } else { + warn!("relayer heartbeat receiver disconnected, shutting down"); + return; + } + } + recv(metrics_tick) -> _ => { + datapoint_info!( + "relayer-heartbeat", + ("fetch_stage_packets_forwarded", packets_forwarded, i64), + ("heartbeats_received", heartbeats_received, i64), + ); + + } + } + } + }).unwrap() + } + + fn set_tpu_addresses( + cluster_info: &Arc, + tpu_address: SocketAddr, + tpu_forward_address: SocketAddr, + ) { + let mut new_contact_info = cluster_info.my_contact_info(); + new_contact_info.tpu = tpu_address; + new_contact_info.tpu_forwards = tpu_forward_address; + cluster_info.set_my_contact_info(new_contact_info); + } + + pub fn join(self) -> thread::Result<()> { + self.t_hdl.join() + } +} diff --git a/core/src/proxy/mod.rs b/core/src/proxy/mod.rs new file mode 100644 index 0000000000..86d48482aa --- /dev/null +++ b/core/src/proxy/mod.rs @@ -0,0 +1,100 @@ +//! This module contains logic for connecting to an external Relayer and Block Engine. +//! The Relayer acts as an external TPU and TPU Forward socket while the Block Engine +//! is tasked with streaming high value bundles to the validator. The validator can run +//! in one of 3 modes: +//! 1. Connected to Relayer and Block Engine. +//! - This is the ideal mode as it increases the probability of building the most profitable blocks. +//! 2. Connected only to Relayer. +//! - A validator may choose to run in this mode if the main concern is to offload ingress traffic deduplication and sig-verification. +//! 3. Connected only to Block Engine. +//! - Running in this mode means pending transactions are not exposed to external actors. This mode is ideal if the validator wishes +//! to accept bundles while maintaining some level of privacy for in-flight transactions. + +mod auth; +pub mod block_engine_stage; +pub mod fetch_stage_manager; +pub mod relayer_stage; + +use { + std::{ + net::{AddrParseError, SocketAddr}, + result, + }, + thiserror::Error, + tonic::Status, +}; + +type Result = result::Result; +type HeartbeatEvent = (SocketAddr, SocketAddr); + +#[derive(Error, Debug)] +pub enum ProxyError { + #[error("grpc error: {0}")] + GrpcError(#[from] Status), + + #[error("stream disconnected")] + GrpcStreamDisconnected, + + #[error("heartbeat error")] + HeartbeatChannelError, + + #[error("heartbeat expired")] + HeartbeatExpired, + + #[error("error forwarding packet to banking stage")] + PacketForwardError, + + #[error("missing tpu config: {0:?}")] + MissingTpuSocket(String), + + #[error("invalid socket address: {0:?}")] + InvalidSocketAddress(#[from] AddrParseError), + + #[error("invalid gRPC data: {0:?}")] + InvalidData(String), + + #[error("timeout: {0:?}")] + ConnectionError(#[from] tonic::transport::Error), + + #[error("AuthenticationConnectionTimeout")] + AuthenticationConnectionTimeout, + + #[error("AuthenticationTimeout")] + AuthenticationTimeout, + + #[error("AuthenticationConnectionError: {0:?}")] + AuthenticationConnectionError(String), + + #[error("BlockEngineConnectionTimeout")] + BlockEngineConnectionTimeout, + + #[error("BlockEngineTimeout")] + BlockEngineTimeout, + + #[error("BlockEngineConnectionError: {0:?}")] + BlockEngineConnectionError(String), + + #[error("RelayerConnectionTimeout")] + RelayerConnectionTimeout, + + #[error("RelayerTimeout")] + RelayerEngineTimeout, + + #[error("RelayerConnectionError: {0:?}")] + RelayerConnectionError(String), + + #[error("AuthenticationError: {0:?}")] + AuthenticationError(String), + + #[error("AuthenticationPermissionDenied")] + AuthenticationPermissionDenied, + + #[error("BadAuthenticationToken: {0:?}")] + BadAuthenticationToken(String), + + #[error("MethodTimeout: {0:?}")] + MethodTimeout(String), + + #[error("MethodError: {0:?}")] + MethodError(String), +} diff --git a/core/src/proxy/relayer_stage.rs b/core/src/proxy/relayer_stage.rs new file mode 100644 index 0000000000..0d1997e3cd --- /dev/null +++ b/core/src/proxy/relayer_stage.rs @@ -0,0 +1,524 @@ +//! Maintains a connection to the Relayer. +//! +//! The external Relayer is responsible for the following: +//! - Acts as a TPU proxy. +//! - Sends transactions to the validator. +//! - Does not bundles to avoid DOS vector. +//! - When validator connects, it changes its TPU and TPU forward address to the relayer. +//! - Expected to send heartbeat to validator as watchdog. If watchdog times out, the validator +//! disconnects and reverts the TPU and TPU forward settings. + +use { + crate::{ + proto_packet_to_packet, + proxy::{ + auth::{generate_auth_tokens, maybe_refresh_auth_tokens, AuthInterceptor}, + HeartbeatEvent, ProxyError, + }, + sigverify::SigverifyTracerPacketStats, + }, + crossbeam_channel::Sender, + jito_protos::proto::{ + auth::{auth_service_client::AuthServiceClient, Token}, + relayer::{self, relayer_client::RelayerClient}, + }, + solana_gossip::cluster_info::ClusterInfo, + solana_perf::packet::PacketBatch, + solana_sdk::{ + saturating_add_assign, + signature::{Keypair, Signer}, + }, + std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + str::FromStr, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Mutex, + }, + thread::{self, Builder, JoinHandle}, + time::{Duration, Instant}, + }, + tokio::time::{interval, sleep, timeout}, + tonic::{ + codegen::InterceptedService, + transport::{Channel, Endpoint}, + Streaming, + }, +}; + +const CONNECTION_TIMEOUT_S: u64 = 10; +const CONNECTION_BACKOFF_S: u64 = 5; + +#[derive(Default)] +struct RelayerStageStats { + num_empty_messages: u64, + num_packets: u64, + num_heartbeats: u64, +} + +impl RelayerStageStats { + pub(crate) fn report(&self) { + datapoint_info!( + "relayer_stage-stats", + ("num_empty_messages", self.num_empty_messages, i64), + ("num_packets", self.num_packets, i64), + ("num_heartbeats", self.num_heartbeats, i64), + ); + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct RelayerConfig { + /// Address to the external auth-service responsible for generating access tokens. + pub auth_service_addr: String, + + /// Relayer grpc Address + pub backend_addr: String, + + /// Interval at which heartbeats are expected. + pub expected_heartbeat_interval: Duration, + + /// The max tolerable age of the last heartbeat. + pub oldest_allowed_heartbeat: Duration, + + /// If set then it will be assumed the backend verified packets so signature verification will be bypassed in the validator. + pub trust_packets: bool, +} + +pub struct RelayerStage { + t_hdls: Vec>, +} + +impl RelayerStage { + pub fn new( + relayer_config: Arc>, + // The keypair stored here is used to sign auth challenges. + cluster_info: Arc, + // Channel that server-sent heartbeats are piped through. + heartbeat_tx: Sender, + // Channel that non-trusted streamed packets are piped through. + packet_tx: Sender, + // Channel that trusted streamed packets are piped through. + verified_packet_tx: Sender<(Vec, Option)>, + exit: Arc, + ) -> Self { + let thread = Builder::new() + .name("relayer-stage".to_string()) + .spawn(move || { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + + rt.block_on(Self::start( + relayer_config, + cluster_info, + heartbeat_tx, + packet_tx, + verified_packet_tx, + exit, + )); + }) + .unwrap(); + + Self { + t_hdls: vec![thread], + } + } + + pub fn join(self) -> thread::Result<()> { + for t in self.t_hdls { + t.join()?; + } + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn start( + relayer_config: Arc>, + cluster_info: Arc, + heartbeat_tx: Sender, + packet_tx: Sender, + verified_packet_tx: Sender<(Vec, Option)>, + exit: Arc, + ) { + const CONNECTION_TIMEOUT: Duration = Duration::from_secs(CONNECTION_TIMEOUT_S); + const CONNECTION_BACKOFF: Duration = Duration::from_secs(CONNECTION_BACKOFF_S); + let mut error_count: u64 = 0; + + while !exit.load(Ordering::Relaxed) { + // Wait until a valid config is supplied (either initially or by admin rpc) + // Use if!/else here to avoid extra CONNECTION_BACKOFF wait on successful termination + if !Self::is_valid_relayer_config(&relayer_config.lock().unwrap()) { + sleep(CONNECTION_BACKOFF).await; + } else if let Err(e) = Self::connect_auth_and_stream( + &relayer_config, + &cluster_info, + &heartbeat_tx, + &packet_tx, + &verified_packet_tx, + &exit, + &CONNECTION_TIMEOUT, + ) + .await + { + match e { + // This error is frequent on hot spares, and the parsed string does not work + // with datapoints (incorrect escaping). + ProxyError::AuthenticationPermissionDenied => { + warn!("relayer permission denied. not on leader schedule. ignore if hot-spare.") + } + e => { + error_count += 1; + datapoint_warn!( + "relayer_stage-proxy_error", + ("count", error_count, i64), + ("error", e.to_string(), String), + ); + } + } + sleep(CONNECTION_BACKOFF).await; + } + } + } + + async fn connect_auth_and_stream( + relayer_config: &Arc>, + cluster_info: &Arc, + heartbeat_tx: &Sender, + packet_tx: &Sender, + verified_packet_tx: &Sender<(Vec, Option)>, + exit: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + // Get a copy of configs here in case they have changed at runtime + let keypair = cluster_info.keypair().clone(); + let local_config = relayer_config.lock().unwrap().clone(); + + let mut auth_service_endpoint = + Endpoint::from_shared(local_config.auth_service_addr.clone()).map_err(|_| { + ProxyError::AuthenticationConnectionError(format!( + "invalid relayer url value: {}", + local_config.auth_service_addr + )) + })?; + if local_config.auth_service_addr.starts_with("https") { + auth_service_endpoint = auth_service_endpoint + .tls_config(tonic::transport::ClientTlsConfig::new()) + .map_err(|_| { + ProxyError::AuthenticationConnectionError( + "failed to set tls_config for relayer auth service".to_string(), + ) + })?; + } + let mut backend_endpoint = Endpoint::from_shared(local_config.backend_addr.clone()) + .map_err(|_| { + ProxyError::RelayerConnectionError(format!( + "invalid relayer url value: {}", + local_config.backend_addr + )) + })? + .tcp_keepalive(Some(Duration::from_secs(60))); + if local_config.backend_addr.starts_with("https") { + backend_endpoint = backend_endpoint + .tls_config(tonic::transport::ClientTlsConfig::new()) + .map_err(|_| { + ProxyError::RelayerConnectionError( + "failed to set tls_config for relayer service".to_string(), + ) + })?; + } + + debug!("connecting to auth: {}", local_config.auth_service_addr); + let auth_channel = timeout(*connection_timeout, auth_service_endpoint.connect()) + .await + .map_err(|_| ProxyError::AuthenticationConnectionTimeout)? + .map_err(|e| ProxyError::AuthenticationConnectionError(e.to_string()))?; + + let mut auth_client = AuthServiceClient::new(auth_channel); + + debug!("generating authentication token"); + let (access_token, refresh_token) = timeout( + *connection_timeout, + generate_auth_tokens(&mut auth_client, &keypair), + ) + .await + .map_err(|_| ProxyError::AuthenticationTimeout)??; + + datapoint_info!( + "relayer_stage-tokens_generated", + ("url", local_config.auth_service_addr, String), + ("count", 1, i64), + ); + + debug!("connecting to relayer: {}", local_config.backend_addr); + let relayer_channel = timeout(*connection_timeout, backend_endpoint.connect()) + .await + .map_err(|_| ProxyError::RelayerConnectionTimeout)? + .map_err(|e| ProxyError::RelayerConnectionError(e.to_string()))?; + + let access_token = Arc::new(Mutex::new(access_token)); + let relayer_client = RelayerClient::with_interceptor( + relayer_channel, + AuthInterceptor::new(access_token.clone()), + ); + + Self::start_consuming_relayer_packets( + relayer_client, + heartbeat_tx, + packet_tx, + verified_packet_tx, + &local_config, + relayer_config, + exit, + auth_client, + access_token, + refresh_token, + keypair, + cluster_info, + connection_timeout, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn start_consuming_relayer_packets( + mut client: RelayerClient>, + heartbeat_tx: &Sender, + packet_tx: &Sender, + verified_packet_tx: &Sender<(Vec, Option)>, + local_config: &RelayerConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + exit: &Arc, + auth_client: AuthServiceClient, + access_token: Arc>, + refresh_token: Token, + keypair: Arc, + cluster_info: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + let heartbeat_event: HeartbeatEvent = { + let tpu_config = timeout( + *connection_timeout, + client.get_tpu_configs(relayer::GetTpuConfigsRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("relayer_get_tpu_configs".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + let tpu_addr = tpu_config + .tpu + .ok_or_else(|| ProxyError::MissingTpuSocket("tpu".to_string()))?; + let tpu_forward_addr = tpu_config + .tpu_forward + .ok_or_else(|| ProxyError::MissingTpuSocket("tpu_fwd".to_string()))?; + + let tpu_ip = IpAddr::from(tpu_addr.ip.parse::()?); + let tpu_forward_ip = IpAddr::from(tpu_forward_addr.ip.parse::()?); + + let tpu_socket = SocketAddr::new(tpu_ip, tpu_addr.port as u16); + let tpu_forward_socket = SocketAddr::new(tpu_forward_ip, tpu_forward_addr.port as u16); + (tpu_socket, tpu_forward_socket) + }; + + let packet_stream = timeout( + *connection_timeout, + client.subscribe_packets(relayer::SubscribePacketsRequest {}), + ) + .await + .map_err(|_| ProxyError::MethodTimeout("relayer_subscribe_packets".to_string()))? + .map_err(|e| ProxyError::MethodError(e.to_string()))? + .into_inner(); + + Self::consume_packet_stream( + heartbeat_event, + heartbeat_tx, + packet_stream, + packet_tx, + local_config, + global_config, + verified_packet_tx, + exit, + auth_client, + access_token, + refresh_token, + keypair, + cluster_info, + connection_timeout, + ) + .await + } + + #[allow(clippy::too_many_arguments)] + async fn consume_packet_stream( + heartbeat_event: HeartbeatEvent, + heartbeat_tx: &Sender, + mut packet_stream: Streaming, + packet_tx: &Sender, + local_config: &RelayerConfig, // local copy of config with current connections + global_config: &Arc>, // guarded reference for detecting run-time updates + verified_packet_tx: &Sender<(Vec, Option)>, + exit: &Arc, + mut auth_client: AuthServiceClient, + access_token: Arc>, + mut refresh_token: Token, + keypair: Arc, + cluster_info: &Arc, + connection_timeout: &Duration, + ) -> crate::proxy::Result<()> { + const METRICS_TICK: Duration = Duration::from_secs(1); + let refresh_within_s: u64 = METRICS_TICK.as_secs().saturating_mul(3).saturating_div(2); + + let mut relayer_stats = RelayerStageStats::default(); + let mut metrics_and_auth_tick = interval(METRICS_TICK); + + let mut num_full_refreshes: u64 = 1; + let mut num_refresh_access_token: u64 = 0; + + let mut heartbeat_check_interval = interval(local_config.expected_heartbeat_interval); + let mut last_heartbeat_ts = Instant::now(); + + info!("connected to packet stream"); + + while !exit.load(Ordering::Relaxed) { + tokio::select! { + maybe_msg = packet_stream.message() => { + let resp = maybe_msg?.ok_or(ProxyError::GrpcStreamDisconnected)?; + Self::handle_relayer_packets(resp, heartbeat_event, heartbeat_tx, &mut last_heartbeat_ts, packet_tx, local_config.trust_packets, verified_packet_tx, &mut relayer_stats)?; + } + _ = heartbeat_check_interval.tick() => { + if last_heartbeat_ts.elapsed() > local_config.oldest_allowed_heartbeat { + return Err(ProxyError::HeartbeatExpired); + } + } + _ = metrics_and_auth_tick.tick() => { + relayer_stats.report(); + relayer_stats = RelayerStageStats::default(); + + if cluster_info.id() != keypair.pubkey() { + return Err(ProxyError::AuthenticationConnectionError("validator identity changed".to_string())); + } + + if *global_config.lock().unwrap() != *local_config { + return Err(ProxyError::AuthenticationConnectionError("relayer config changed".to_string())); + } + + let (maybe_new_access, maybe_new_refresh) = maybe_refresh_auth_tokens(&mut auth_client, + &access_token, + &refresh_token, + cluster_info, + connection_timeout, + refresh_within_s, + ).await?; + + if let Some(new_token) = maybe_new_access { + num_refresh_access_token += 1; + datapoint_info!( + "relayer_stage-refresh_access_token", + ("url", &local_config.auth_service_addr, String), + ("count", num_refresh_access_token, i64), + ); + *access_token.lock().unwrap() = new_token; + } + if let Some(new_token) = maybe_new_refresh { + num_full_refreshes += 1; + datapoint_info!( + "relayer_stage-tokens_generated", + ("url", &local_config.auth_service_addr, String), + ("count", num_full_refreshes, i64), + ); + refresh_token = new_token; + } + } + } + } + Ok(()) + } + + fn handle_relayer_packets( + subscribe_packets_resp: relayer::SubscribePacketsResponse, + heartbeat_event: HeartbeatEvent, + heartbeat_tx: &Sender, + last_heartbeat_ts: &mut Instant, + packet_tx: &Sender, + trust_packets: bool, + verified_packet_tx: &Sender<(Vec, Option)>, + relayer_stats: &mut RelayerStageStats, + ) -> crate::proxy::Result<()> { + match subscribe_packets_resp.msg { + None => { + saturating_add_assign!(relayer_stats.num_empty_messages, 1); + } + Some(relayer::subscribe_packets_response::Msg::Batch(proto_batch)) => { + if proto_batch.packets.is_empty() { + saturating_add_assign!(relayer_stats.num_empty_messages, 1); + return Ok(()); + } + + let packet_batch = PacketBatch::new( + proto_batch + .packets + .into_iter() + .map(proto_packet_to_packet) + .collect(), + ); + + saturating_add_assign!(relayer_stats.num_packets, packet_batch.len() as u64); + + if trust_packets { + verified_packet_tx + .send((vec![packet_batch], None)) + .map_err(|_| ProxyError::PacketForwardError)?; + } else { + packet_tx + .send(packet_batch) + .map_err(|_| ProxyError::PacketForwardError)?; + } + } + Some(relayer::subscribe_packets_response::Msg::Heartbeat(_)) => { + saturating_add_assign!(relayer_stats.num_heartbeats, 1); + + *last_heartbeat_ts = Instant::now(); + heartbeat_tx + .send(heartbeat_event) + .map_err(|_| ProxyError::HeartbeatChannelError)?; + } + } + Ok(()) + } + + pub fn is_valid_relayer_config(config: &RelayerConfig) -> bool { + if config.auth_service_addr.is_empty() { + warn!("can't connect to relayer auth. missing url."); + return false; + } + if config.backend_addr.is_empty() { + warn!("can't connect to relayer. missing url."); + return false; + } + if config.oldest_allowed_heartbeat.is_zero() { + error!("can't connect to relayer. oldest allowed heartbeat must be greater than 0."); + return false; + } + if config.expected_heartbeat_interval.is_zero() { + error!("can't connect to relayer. expected heartbeat interval must be greater than 0."); + return false; + } + if let Err(e) = Endpoint::from_str(&config.auth_service_addr) { + error!( + "can't connect to relayer. error creating relayer auth endpoint - {}", + e.to_string() + ); + return false; + } + if let Err(e) = Endpoint::from_str(&config.backend_addr) { + error!( + "can't connect to relayer. error creating relayer endpoint - {}", + e.to_string() + ); + return false; + } + true + } +} diff --git a/core/src/qos_service.rs b/core/src/qos_service.rs index 9b54e2a302..1e3ad1012d 100644 --- a/core/src/qos_service.rs +++ b/core/src/qos_service.rs @@ -9,7 +9,7 @@ use { solana_runtime::{ bank::Bank, cost_model::{CostModel, TransactionCost}, - cost_tracker::CostTrackerError, + cost_tracker::{CostTracker, CostTrackerError}, }, solana_sdk::{ clock::Slot, @@ -126,22 +126,22 @@ impl QosService { &self, transactions: impl Iterator, transactions_costs: impl Iterator, - bank: &Arc, + slot: Slot, + cost_tracker: &mut CostTracker, ) -> (Vec>, usize) { let mut cost_tracking_time = Measure::start("cost_tracking_time"); - let mut cost_tracker = bank.write_cost_tracker().unwrap(); let mut num_included = 0; let select_results = transactions .zip(transactions_costs) .map(|(tx, cost)| match cost_tracker.try_add(cost) { Ok(current_block_cost) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}", bank.slot(), tx, cost, current_block_cost); + debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}", slot, tx, cost, current_block_cost); self.metrics.stats.selected_txs_count.fetch_add(1, Ordering::Relaxed); num_included += 1; Ok(()) }, Err(e) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", bank.slot(), tx, cost, e); + debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", slot, tx, cost, e); match e { CostTrackerError::WouldExceedBlockMaxLimit => { Err(TransactionError::WouldExceedMaxBlockCostLimit) @@ -219,7 +219,7 @@ impl QosService { ); } - fn remove_transaction_costs<'a>( + pub fn remove_transaction_costs<'a>( transaction_costs: impl Iterator, transaction_qos_results: impl Iterator>, bank: &Arc, @@ -640,8 +640,12 @@ mod tests { bank.write_cost_tracker() .unwrap() .set_limits(cost_limit, cost_limit, cost_limit); - let (results, num_selected) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.iter(), &bank); + let (results, num_selected) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!(num_selected, 2); // verify that first transfer tx and first vote are allowed @@ -675,8 +679,12 @@ mod tests { let qos_service = QosService::new(Arc::new(RwLock::new(CostModel::default())), 1); let txs_costs = qos_service.compute_transaction_costs(txs.iter()); let total_txs_cost: u64 = txs_costs.iter().map(|cost| cost.sum()).sum(); - let (qos_results, _num_included) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.iter(), &bank); + let (qos_results, _num_included) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!( total_txs_cost, bank.read_cost_tracker().unwrap().block_cost() @@ -728,8 +736,12 @@ mod tests { let qos_service = QosService::new(Arc::new(RwLock::new(CostModel::default())), 1); let txs_costs = qos_service.compute_transaction_costs(txs.iter()); let total_txs_cost: u64 = txs_costs.iter().map(|cost| cost.sum()).sum(); - let (qos_results, _num_included) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.iter(), &bank); + let (qos_results, _num_included) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!( total_txs_cost, bank.read_cost_tracker().unwrap().block_cost() @@ -768,8 +780,12 @@ mod tests { let qos_service = QosService::new(Arc::new(RwLock::new(CostModel::default())), 1); let txs_costs = qos_service.compute_transaction_costs(txs.iter()); let total_txs_cost: u64 = txs_costs.iter().map(|cost| cost.sum()).sum(); - let (qos_results, _num_included) = - qos_service.select_transactions_per_cost(txs.iter(), txs_costs.iter(), &bank); + let (qos_results, _num_included) = qos_service.select_transactions_per_cost( + txs.iter(), + txs_costs.iter(), + bank.slot(), + &mut bank.write_cost_tracker().unwrap(), + ); assert_eq!( total_txs_cost, bank.read_cost_tracker().unwrap().block_cost() diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index 3f048a2433..e9a3ebee92 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -29,7 +29,7 @@ use { std::{ collections::HashMap, iter::repeat, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, ops::AddAssign, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, @@ -178,6 +178,7 @@ fn retransmit( shred_deduper: &mut ShredDeduper<2>, max_slots: &MaxSlots, rpc_subscriptions: Option<&RpcSubscriptions>, + shred_receiver_address: &Arc>>, ) -> Result<(), RecvTimeoutError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?; @@ -260,6 +261,7 @@ fn retransmit( socket_addr_space, &sockets[index % sockets.len()], stats, + &shred_receiver_address.read().unwrap(), ) .map_err(|err| { stats.record_error(&err); @@ -284,6 +286,7 @@ fn retransmit( socket_addr_space, &sockets[index % sockets.len()], stats, + &shred_receiver_address.read().unwrap(), ) .map_err(|err| { stats.record_error(&err); @@ -312,11 +315,17 @@ fn retransmit_shred( socket_addr_space: &SocketAddrSpace, socket: &UdpSocket, stats: &RetransmitStats, + shred_receiver_addr: &Option, ) -> Result<(/*root_distance:*/ usize, /*num_nodes:*/ usize), Error> { let mut compute_turbine_peers = Measure::start("turbine_start"); + let data_plane_fanout = cluster_nodes::get_data_plane_fanout(key.slot(), root_bank); - let (root_distance, addrs) = + let (root_distance, mut addrs) = cluster_nodes.get_retransmit_addrs(slot_leader, key, root_bank, data_plane_fanout)?; + if let Some(addr) = shred_receiver_addr { + addrs.push(*addr); + } + let addrs: Vec<_> = addrs .into_iter() .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) @@ -366,6 +375,7 @@ pub fn retransmitter( shreds_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, + shred_receiver_addr: Arc>>, ) -> JoinHandle<()> { let cluster_nodes_cache = ClusterNodesCache::::new( CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, @@ -395,6 +405,7 @@ pub fn retransmitter( &mut shred_deduper, &max_slots, rpc_subscriptions.as_deref(), + &shred_receiver_addr, ) { Ok(()) => (), Err(RecvTimeoutError::Timeout) => (), @@ -417,6 +428,7 @@ impl RetransmitStage { retransmit_receiver: Receiver>>, max_slots: Arc, rpc_subscriptions: Option>, + shred_receiver_addr: Arc>>, ) -> Self { let retransmit_thread_handle = retransmitter( retransmit_sockets, @@ -426,6 +438,7 @@ impl RetransmitStage { retransmit_receiver, max_slots, rpc_subscriptions, + shred_receiver_addr, ); Self { diff --git a/core/src/tip_manager.rs b/core/src/tip_manager.rs new file mode 100644 index 0000000000..fe15d1dc05 --- /dev/null +++ b/core/src/tip_manager.rs @@ -0,0 +1,472 @@ +use { + anchor_lang::{ + solana_program::hash::Hash, AccountDeserialize, InstructionData, ToAccountMetas, + }, + log::warn, + solana_gossip::cluster_info::ClusterInfo, + solana_runtime::bank::Bank, + solana_sdk::{ + account::ReadableAccount, + bundle::error::TipPaymentError, + instruction::Instruction, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + stake_history::Epoch, + system_program, + transaction::{SanitizedTransaction, Transaction}, + }, + std::{ + collections::HashSet, + sync::{Arc, Mutex, MutexGuard}, + }, + tip_distribution::sdk::{ + derive_config_account_address, derive_tip_distribution_account_address, + instruction::{ + initialize_ix, initialize_tip_distribution_account_ix, InitializeAccounts, + InitializeArgs, InitializeTipDistributionAccountAccounts, + InitializeTipDistributionAccountArgs, + }, + }, + tip_payment::{ + Config, InitBumps, TipPaymentAccount, CONFIG_ACCOUNT_SEED, TIP_ACCOUNT_SEED_0, + TIP_ACCOUNT_SEED_1, TIP_ACCOUNT_SEED_2, TIP_ACCOUNT_SEED_3, TIP_ACCOUNT_SEED_4, + TIP_ACCOUNT_SEED_5, TIP_ACCOUNT_SEED_6, TIP_ACCOUNT_SEED_7, + }, +}; + +pub type Result = std::result::Result; + +#[derive(Debug, Clone)] +struct TipPaymentProgramInfo { + program_id: Pubkey, + + config_pda_bump: (Pubkey, u8), + tip_pda_0: (Pubkey, u8), + tip_pda_1: (Pubkey, u8), + tip_pda_2: (Pubkey, u8), + tip_pda_3: (Pubkey, u8), + tip_pda_4: (Pubkey, u8), + tip_pda_5: (Pubkey, u8), + tip_pda_6: (Pubkey, u8), + tip_pda_7: (Pubkey, u8), +} + +/// Contains metadata regarding the tip-distribution account. +/// The PDAs contained in this struct are presumed to be owned by the program. +#[derive(Debug, Clone)] +struct TipDistributionProgramInfo { + /// The tip-distribution program_id. + program_id: Pubkey, + + /// Singleton [Config] PDA and bump tuple. + config_pda_and_bump: (Pubkey, u8), +} + +/// This config is used on each invocation to the `initialize_tip_distribution_account` instruction. +#[derive(Debug, Clone)] +pub struct TipDistributionAccountConfig { + /// The account with authority to upload merkle-roots to this validator's [TipDistributionAccount]. + pub merkle_root_upload_authority: Pubkey, + + /// This validator's vote account. + pub vote_account: Pubkey, + + /// This validator's commission rate BPS for tips in the [TipDistributionAccount]. + pub commission_bps: u16, +} + +impl Default for TipDistributionAccountConfig { + fn default() -> Self { + Self { + merkle_root_upload_authority: Pubkey::new_unique(), + vote_account: Pubkey::new_unique(), + commission_bps: 0, + } + } +} + +#[derive(Debug, Clone)] +pub struct TipManager { + tip_payment_program_info: TipPaymentProgramInfo, + tip_distribution_program_info: TipDistributionProgramInfo, + tip_distribution_account_config: TipDistributionAccountConfig, + lock: Arc>, +} + +#[derive(Clone)] +pub struct TipManagerConfig { + pub tip_payment_program_id: Pubkey, + pub tip_distribution_program_id: Pubkey, + pub tip_distribution_account_config: TipDistributionAccountConfig, +} + +impl Default for TipManagerConfig { + fn default() -> Self { + TipManagerConfig { + tip_payment_program_id: Pubkey::new_unique(), + tip_distribution_program_id: Pubkey::new_unique(), + tip_distribution_account_config: TipDistributionAccountConfig::default(), + } + } +} + +impl TipManager { + pub fn new(config: TipManagerConfig) -> TipManager { + let TipManagerConfig { + tip_payment_program_id, + tip_distribution_program_id, + tip_distribution_account_config, + } = config; + + let config_pda_bump = + Pubkey::find_program_address(&[CONFIG_ACCOUNT_SEED], &tip_payment_program_id); + + let tip_pda_0 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_0], &tip_payment_program_id); + let tip_pda_1 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_1], &tip_payment_program_id); + let tip_pda_2 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_2], &tip_payment_program_id); + let tip_pda_3 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_3], &tip_payment_program_id); + let tip_pda_4 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_4], &tip_payment_program_id); + let tip_pda_5 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_5], &tip_payment_program_id); + let tip_pda_6 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_6], &tip_payment_program_id); + let tip_pda_7 = + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_7], &tip_payment_program_id); + + let config_pda_and_bump = derive_config_account_address(&tip_distribution_program_id); + + TipManager { + tip_payment_program_info: TipPaymentProgramInfo { + program_id: tip_payment_program_id, + config_pda_bump, + tip_pda_0, + tip_pda_1, + tip_pda_2, + tip_pda_3, + tip_pda_4, + tip_pda_5, + tip_pda_6, + tip_pda_7, + }, + tip_distribution_program_info: TipDistributionProgramInfo { + program_id: tip_distribution_program_id, + config_pda_and_bump, + }, + tip_distribution_account_config, + lock: Arc::new(Mutex::new(())), + } + } + + pub fn tip_payment_program_id(&self) -> Pubkey { + self.tip_payment_program_info.program_id + } + + /// Returns the [Config] account owned by the tip-payment program. + pub fn tip_payment_config_pubkey(&self) -> Pubkey { + self.tip_payment_program_info.config_pda_bump.0 + } + + /// Returns the [Config] account owned by the tip-distribution program. + pub fn tip_distribution_config_pubkey(&self) -> Pubkey { + self.tip_distribution_program_info.config_pda_and_bump.0 + } + + /// Given a bank, returns the current `tip_receiver` configured with the tip-payment program. + pub fn get_configured_tip_receiver(&self, bank: &Bank) -> Result { + Ok(self.get_tip_payment_config_account(bank)?.tip_receiver) + } + + pub fn get_tip_accounts(&self) -> HashSet { + HashSet::from([ + self.tip_payment_program_info.tip_pda_0.0, + self.tip_payment_program_info.tip_pda_1.0, + self.tip_payment_program_info.tip_pda_2.0, + self.tip_payment_program_info.tip_pda_3.0, + self.tip_payment_program_info.tip_pda_4.0, + self.tip_payment_program_info.tip_pda_5.0, + self.tip_payment_program_info.tip_pda_6.0, + self.tip_payment_program_info.tip_pda_7.0, + ]) + } + + pub fn get_tip_payment_config_account(&self, bank: &Bank) -> Result { + let config_data = bank + .get_account(&self.tip_payment_program_info.config_pda_bump.0) + .ok_or(TipPaymentError::AccountMissing( + self.tip_payment_program_info.config_pda_bump.0, + ))?; + + Ok(Config::try_deserialize(&mut config_data.data())?) + } + + /// Only called once during contract creation. + pub fn initialize_tip_payment_program_tx( + &self, + recent_blockhash: Hash, + keypair: &Keypair, + ) -> SanitizedTransaction { + let init_ix = Instruction { + program_id: self.tip_payment_program_info.program_id, + data: tip_payment::instruction::Initialize { + _bumps: InitBumps { + config: self.tip_payment_program_info.config_pda_bump.1, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.1, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.1, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.1, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.1, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.1, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.1, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.1, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.1, + }, + } + .data(), + accounts: tip_payment::accounts::Initialize { + config: self.tip_payment_program_info.config_pda_bump.0, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.0, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.0, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.0, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.0, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.0, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.0, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.0, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.0, + system_program: system_program::id(), + payer: keypair.pubkey(), + } + .to_account_metas(None), + }; + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[init_ix], + Some(&keypair.pubkey()), + &[keypair], + recent_blockhash, + )) + .unwrap() + } + + pub fn lock(&self) -> MutexGuard<()> { + self.lock.lock().unwrap() + } + + /// Returns this validator's [TipDistributionAccount] PDA derived from the provided epoch. + pub fn get_my_tip_distribution_pda(&self, epoch: Epoch) -> Pubkey { + derive_tip_distribution_account_address( + &self.tip_distribution_program_info.program_id, + &self.tip_distribution_account_config.vote_account, + epoch, + ) + .0 + } + + /// Returns whether or not the tip-payment program should be initialized. + pub fn should_initialize_tip_payment_program(&self, bank: &Bank) -> bool { + match bank.get_account(&self.tip_payment_config_pubkey()) { + None => true, + Some(account) => account.owner() != &self.tip_payment_program_info.program_id, + } + } + + /// Returns whether or not the tip-distribution program's [Config] PDA should be initialized. + pub fn should_initialize_tip_distribution_config(&self, bank: &Bank) -> bool { + match bank.get_account(&self.tip_distribution_config_pubkey()) { + None => true, + Some(account) => account.owner() != &self.tip_distribution_program_info.program_id, + } + } + + /// Returns whether or not the current [TipDistributionAccount] PDA should be initialized for this epoch. + pub fn should_init_tip_distribution_account(&self, bank: &Bank) -> bool { + let pda = derive_tip_distribution_account_address( + &self.tip_distribution_program_info.program_id, + &self.tip_distribution_account_config.vote_account, + bank.epoch(), + ) + .0; + match bank.get_account(&pda) { + None => true, + // Since anyone can derive the PDA and send it lamports we must also check the owner is the program. + Some(account) => account.owner() != &self.tip_distribution_program_info.program_id, + } + } + + /// Creates an [Initialize] transaction object. + pub fn initialize_tip_distribution_config_tx( + &self, + recent_blockhash: Hash, + cluster_info: &Arc, + ) -> SanitizedTransaction { + let ix = initialize_ix( + self.tip_distribution_program_info.program_id, + InitializeArgs { + authority: cluster_info.id(), + expired_funds_account: cluster_info.id(), + num_epochs_valid: 10, + max_validator_commission_bps: 10_000, + bump: self.tip_distribution_program_info.config_pda_and_bump.1, + }, + InitializeAccounts { + config: self.tip_distribution_program_info.config_pda_and_bump.0, + system_program: system_program::id(), + initializer: cluster_info.id(), + }, + ); + + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[ix], + Some(&cluster_info.id()), + &[cluster_info.keypair().as_ref()], + recent_blockhash, + )) + .unwrap() + } + + /// Creates an [InitializeTipDistributionAccount] transaction object using the provided Epoch. + pub fn initialize_tip_distribution_account_tx( + &self, + recent_blockhash: Hash, + epoch: Epoch, + cluster_info: &Arc, + ) -> SanitizedTransaction { + let (tip_distribution_account, bump) = derive_tip_distribution_account_address( + &self.tip_distribution_program_info.program_id, + &self.tip_distribution_account_config.vote_account, + epoch, + ); + + let ix = initialize_tip_distribution_account_ix( + self.tip_distribution_program_info.program_id, + InitializeTipDistributionAccountArgs { + merkle_root_upload_authority: self + .tip_distribution_account_config + .merkle_root_upload_authority, + validator_commission_bps: self.tip_distribution_account_config.commission_bps, + bump, + }, + InitializeTipDistributionAccountAccounts { + config: self.tip_distribution_program_info.config_pda_and_bump.0, + tip_distribution_account, + system_program: system_program::id(), + signer: cluster_info.id(), + validator_vote_account: self.tip_distribution_account_config.vote_account, + }, + ); + + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[ix], + Some(&cluster_info.id()), + &[cluster_info.keypair().as_ref()], + recent_blockhash, + )) + .unwrap() + } + + /// Builds a transaction that changes the current tip receiver to new_tip_receiver. + /// The on-chain program will transfer tips sitting in the tip accounts to the tip receiver + /// before changing ownership. + pub fn change_tip_receiver_and_block_builder_tx( + &self, + new_tip_receiver: &Pubkey, + bank: &Bank, + keypair: &Keypair, + block_builder: &Pubkey, + block_builder_commission: u64, + ) -> Result { + let config = self.get_tip_payment_config_account(bank)?; + + let change_tip_ix = Instruction { + program_id: self.tip_payment_program_info.program_id, + data: tip_payment::instruction::ChangeTipReceiver {}.data(), + accounts: tip_payment::accounts::ChangeTipReceiver { + config: self.tip_payment_program_info.config_pda_bump.0, + old_tip_receiver: config.tip_receiver, + new_tip_receiver: *new_tip_receiver, + block_builder: config.block_builder, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.0, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.0, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.0, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.0, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.0, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.0, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.0, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.0, + signer: keypair.pubkey(), + } + .to_account_metas(None), + }; + let change_block_builder_ix = Instruction { + program_id: self.tip_payment_program_info.program_id, + data: tip_payment::instruction::ChangeBlockBuilder { + block_builder_commission, + } + .data(), + accounts: tip_payment::accounts::ChangeBlockBuilder { + config: self.tip_payment_program_info.config_pda_bump.0, + tip_receiver: *new_tip_receiver, // tip receiver will have just changed in previous ix + old_block_builder: config.block_builder, + new_block_builder: *block_builder, + tip_payment_account_0: self.tip_payment_program_info.tip_pda_0.0, + tip_payment_account_1: self.tip_payment_program_info.tip_pda_1.0, + tip_payment_account_2: self.tip_payment_program_info.tip_pda_2.0, + tip_payment_account_3: self.tip_payment_program_info.tip_pda_3.0, + tip_payment_account_4: self.tip_payment_program_info.tip_pda_4.0, + tip_payment_account_5: self.tip_payment_program_info.tip_pda_5.0, + tip_payment_account_6: self.tip_payment_program_info.tip_pda_6.0, + tip_payment_account_7: self.tip_payment_program_info.tip_pda_7.0, + signer: keypair.pubkey(), + } + .to_account_metas(None), + }; + Ok( + SanitizedTransaction::try_from_legacy_transaction(Transaction::new_signed_with_payer( + &[change_tip_ix, change_block_builder_ix], + Some(&keypair.pubkey()), + &[keypair], + bank.last_blockhash(), + )) + .unwrap(), + ) + } + + /// Returns the balance of all the MEV tip accounts + pub fn get_tip_account_balances(&self, bank: &Arc) -> Vec<(Pubkey, u64)> { + let accounts = self.get_tip_accounts(); + accounts + .into_iter() + .map(|account| { + let balance = bank.get_balance(&account); + (account, balance) + }) + .collect() + } + + /// Returns the balance of all the MEV tip accounts above the rent-exempt amount. + /// NOTE: the on-chain program has rent_exempt = force + pub fn get_tip_account_balances_above_rent_exempt( + &self, + bank: &Arc, + ) -> Vec<(Pubkey, u64)> { + let accounts = self.get_tip_accounts(); + accounts + .into_iter() + .map(|account| { + let account_data = bank.get_account(&account).unwrap_or_default(); + let balance = bank.get_balance(&account); + let rent_exempt = + bank.get_minimum_balance_for_rent_exemption(account_data.data().len()); + // NOTE: don't unwrap here in case bug in on-chain program, don't want all validators to crash + // if program gets stuck in bad state + (account, balance.checked_sub(rent_exempt).unwrap_or_else(|| { + warn!("balance is below rent exempt amount. balance: {} rent_exempt: {} acc size: {}", balance, rent_exempt, TipPaymentAccount::SIZE); + 0 + })) + }) + .collect() + } +} diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 21b633c55b..13e16b4e52 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -5,15 +5,23 @@ use { crate::{ banking_stage::BankingStage, broadcast_stage::{BroadcastStage, BroadcastStageType, RetransmitSlotsReceiver}, + bundle_account_locker::BundleAccountLocker, + bundle_stage::BundleStage, cluster_info_vote_listener::{ ClusterInfoVoteListener, GossipDuplicateConfirmedSlotsSender, GossipVerifiedVoteHashSender, VerifiedVoteSender, VoteTracker, }, fetch_stage::FetchStage, find_packet_sender_stake_stage::FindPacketSenderStakeStage, + proxy::{ + block_engine_stage::{BlockBuilderFeeInfo, BlockEngineConfig, BlockEngineStage}, + fetch_stage_manager::FetchStageManager, + relayer_stage::{RelayerConfig, RelayerStage}, + }, sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage, staked_nodes_updater_service::StakedNodesUpdaterService, + tip_manager::{TipManager, TipManagerConfig}, }, crossbeam_channel::{unbounded, Receiver}, solana_client::connection_cache::ConnectionCache, @@ -29,14 +37,15 @@ use { cost_model::CostModel, vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender}, }, - solana_sdk::signature::Keypair, + solana_sdk::signature::{Keypair, Signer}, solana_streamer::{ quic::{spawn_server, StreamStats, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, streamer::StakedNodes, }, std::{ - net::UdpSocket, - sync::{atomic::AtomicBool, Arc, RwLock}, + collections::HashSet, + net::{SocketAddr, UdpSocket}, + sync::{atomic::AtomicBool, Arc, Mutex, RwLock}, thread, }, }; @@ -59,6 +68,9 @@ pub struct Tpu { fetch_stage: FetchStage, sigverify_stage: SigVerifyStage, vote_sigverify_stage: SigVerifyStage, + relayer_stage: RelayerStage, + block_engine_stage: BlockEngineStage, + fetch_stage_manager: FetchStageManager, banking_stage: BankingStage, cluster_info_vote_listener: ClusterInfoVoteListener, broadcast_stage: BroadcastStage, @@ -67,6 +79,7 @@ pub struct Tpu { find_packet_sender_stake_stage: FindPacketSenderStakeStage, vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage, staked_nodes_updater_service: StakedNodesUpdaterService, + bundle_stage: BundleStage, } impl Tpu { @@ -97,7 +110,12 @@ impl Tpu { keypair: &Keypair, log_messages_bytes_limit: Option, staked_nodes: &Arc>, + block_engine_config: Arc>, + relayer_config: Arc>, + tip_manager_config: TipManagerConfig, + shred_receiver_address: Arc>>, tpu_enable_udp: bool, + preallocated_bundle_cost: u64, ) -> Self { let TpuSockets { transactions: transactions_sockets, @@ -108,7 +126,11 @@ impl Tpu { transactions_forwards_quic: transactions_forwards_quic_sockets, } = sockets; + // Packets from fetch stage and quic server are intercepted and sent through fetch_stage_manager + // If relayer is connected, packets are dropped. If not, packets are forwarded on to packet_sender + let (packet_intercept_sender, packet_intercept_receiver) = unbounded(); let (packet_sender, packet_receiver) = unbounded(); + let (vote_packet_sender, vote_packet_receiver) = unbounded(); let (forwarded_packet_sender, forwarded_packet_receiver) = unbounded(); let fetch_stage = FetchStage::new_with_sender( @@ -116,7 +138,7 @@ impl Tpu { tpu_forwards_sockets, tpu_vote_sockets, exit, - &packet_sender, + &packet_intercept_sender, &vote_packet_sender, &forwarded_packet_sender, forwarded_packet_receiver, @@ -159,7 +181,7 @@ impl Tpu { transactions_quic_sockets, keypair, cluster_info.my_contact_info().tpu.ip(), - packet_sender, + packet_intercept_sender, exit.clone(), MAX_QUIC_CONNECTIONS_PER_PEER, staked_nodes.clone(), @@ -184,7 +206,7 @@ impl Tpu { .unwrap(); let sigverify_stage = { - let verifier = TransactionSigVerifier::new(verified_sender); + let verifier = TransactionSigVerifier::new(verified_sender.clone()); SigVerifyStage::new(find_packet_sender_stake_receiver, verifier, "tpu-verifier") }; @@ -200,6 +222,40 @@ impl Tpu { ) }; + let block_builder_fee_info = Arc::new(Mutex::new(BlockBuilderFeeInfo { + block_builder: cluster_info.keypair().pubkey(), + block_builder_commission: 0, + })); + + let (bundle_sender, bundle_receiver) = unbounded(); + let block_engine_stage = BlockEngineStage::new( + block_engine_config, + bundle_sender, + cluster_info.clone(), + packet_sender.clone(), + verified_sender.clone(), + exit.clone(), + &block_builder_fee_info, + ); + + let (heartbeat_tx, heartbeat_rx) = unbounded(); + let fetch_stage_manager = FetchStageManager::new( + cluster_info.clone(), + heartbeat_rx, + packet_intercept_receiver, + packet_sender.clone(), + exit.clone(), + ); + + let relayer_stage = RelayerStage::new( + relayer_config, + cluster_info.clone(), + heartbeat_tx, + packet_sender, + verified_sender, + exit.clone(), + ); + let (verified_gossip_vote_packets_sender, verified_gossip_vote_packets_receiver) = unbounded(); let cluster_info_vote_listener = ClusterInfoVoteListener::new( @@ -218,18 +274,43 @@ impl Tpu { cluster_confirmed_slot_sender, ); + let tip_manager = TipManager::new(tip_manager_config); + + let bundle_account_locker = BundleAccountLocker::default(); + + // tip accounts can't be used in BankingStage to avoid someone from stealing tips mid-slot. + // it also helps reduce surface area for potential account contention + let mut blacklisted_accounts = HashSet::new(); + blacklisted_accounts.insert(tip_manager.tip_payment_config_pubkey()); + blacklisted_accounts.extend(tip_manager.get_tip_accounts()); let banking_stage = BankingStage::new( cluster_info, poh_recorder, verified_receiver, verified_tpu_vote_packets_receiver, verified_gossip_vote_packets_receiver, - transaction_status_sender, - replay_vote_sender, + transaction_status_sender.clone(), + replay_vote_sender.clone(), cost_model.clone(), log_messages_bytes_limit, connection_cache.clone(), bank_forks.clone(), + blacklisted_accounts, + bundle_account_locker.clone(), + ); + + let bundle_stage = BundleStage::new( + cluster_info, + poh_recorder, + transaction_status_sender, + replay_vote_sender, + cost_model.clone(), + bundle_receiver, + exit.clone(), + tip_manager, + bundle_account_locker, + &block_builder_fee_info, + preallocated_bundle_cost, ); let broadcast_stage = broadcast_type.new_broadcast_stage( @@ -241,12 +322,16 @@ impl Tpu { blockstore.clone(), bank_forks, shred_version, + shred_receiver_address, ); Self { fetch_stage, sigverify_stage, vote_sigverify_stage, + block_engine_stage, + relayer_stage, + fetch_stage_manager, banking_stage, cluster_info_vote_listener, broadcast_stage, @@ -255,6 +340,7 @@ impl Tpu { find_packet_sender_stake_stage, vote_find_packet_sender_stake_stage, staked_nodes_updater_service, + bundle_stage, } } @@ -270,7 +356,13 @@ impl Tpu { self.staked_nodes_updater_service.join(), self.tpu_quic_t.join(), self.tpu_forwards_quic_t.join(), + self.bundle_stage.join(), ]; + + self.relayer_stage.join()?; + self.block_engine_stage.join()?; + self.fetch_stage_manager.join()?; + let broadcast_result = self.broadcast_stage.join(); for result in results { result?; diff --git a/core/src/tvu.rs b/core/src/tvu.rs index c4fa37cf25..9347a43e52 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -48,7 +48,7 @@ use { solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Keypair}, std::{ collections::HashSet, - net::UdpSocket, + net::{SocketAddr, UdpSocket}, sync::{atomic::AtomicBool, Arc, RwLock}, thread::{self, JoinHandle}, }, @@ -130,6 +130,7 @@ impl Tvu { log_messages_bytes_limit: Option, connection_cache: &Arc, prioritization_fee_cache: &Arc, + shred_receiver_addr: Arc>>, ) -> Self { let TvuSockets { repair: repair_socket, @@ -177,6 +178,7 @@ impl Tvu { retransmit_receiver, max_slots.clone(), Some(rpc_subscriptions.clone()), + shred_receiver_addr, ); let cluster_slots = Arc::new(ClusterSlots::default()); @@ -456,6 +458,7 @@ pub mod tests { None, &Arc::new(ConnectionCache::default()), &_ignored_prioritization_fee_cache, + Arc::new(RwLock::new(None)), ); exit.store(true, Ordering::Relaxed); tvu.join().unwrap(); diff --git a/core/src/validator.rs b/core/src/validator.rs index f2bdc04a56..d07c2d1623 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -12,6 +12,7 @@ use { consensus::{reconcile_blockstore_roots_with_external_source, ExternalRootSource, Tower}, ledger_metric_report_service::LedgerMetricReportService, poh_timing_report_service::PohTimingReportService, + proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig}, rewards_recorder_service::{RewardsRecorderSender, RewardsRecorderService}, sample_performance_service::SamplePerformanceService, serve_repair::ServeRepair, @@ -20,6 +21,7 @@ use { snapshot_packager_service::SnapshotPackagerService, stats_reporter_service::StatsReporterService, system_monitor_service::{verify_net_stats_access, SystemMonitorService}, + tip_manager::TipManagerConfig, tower_storage::TowerStorage, tpu::{Tpu, TpuSockets, DEFAULT_TPU_COALESCE_MS}, tvu::{Tvu, TvuConfig, TvuSockets}, @@ -109,7 +111,7 @@ use { path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, RwLock, + Arc, Mutex, RwLock, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -178,6 +180,12 @@ pub struct ValidatorConfig { pub wait_to_vote_slot: Option, pub ledger_column_options: LedgerColumnOptions, pub runtime_config: RuntimeConfig, + pub relayer_config: Arc>, + pub block_engine_config: Arc>, + // Using Option inside RwLock is ugly, but only convenient way to allow toggle on/off + pub shred_receiver_address: Arc>>, + pub tip_manager_config: TipManagerConfig, + pub preallocated_bundle_cost: u64, } impl Default for ValidatorConfig { @@ -241,6 +249,11 @@ impl Default for ValidatorConfig { wait_to_vote_slot: None, ledger_column_options: LedgerColumnOptions::default(), runtime_config: RuntimeConfig::default(), + relayer_config: Arc::new(Mutex::new(RelayerConfig::default())), + block_engine_config: Arc::new(Mutex::new(BlockEngineConfig::default())), + shred_receiver_address: Arc::new(RwLock::new(None)), + tip_manager_config: TipManagerConfig::default(), + preallocated_bundle_cost: u64::default(), } } } @@ -938,6 +951,9 @@ impl Validator { bank_forks: bank_forks.clone(), cluster_info: cluster_info.clone(), vote_account: *vote_account, + block_engine_config: config.block_engine_config.clone(), + relayer_config: config.relayer_config.clone(), + shred_receiver_address: config.shred_receiver_address.clone(), }); let waited_for_supermajority = if let Ok(waited) = wait_for_supermajority( @@ -1041,6 +1057,7 @@ impl Validator { config.runtime_config.log_messages_bytes_limit, &connection_cache, &prioritization_fee_cache, + config.shred_receiver_address.clone(), ); let tpu = Tpu::new( @@ -1076,13 +1093,22 @@ impl Validator { &identity_keypair, config.runtime_config.log_messages_bytes_limit, &staked_nodes, + config.block_engine_config.clone(), + config.relayer_config.clone(), + config.tip_manager_config.clone(), + config.shred_receiver_address.clone(), tpu_enable_udp, + config.preallocated_bundle_cost, ); datapoint_info!( "validator-new", ("id", id.to_string(), String), - ("version", solana_version::version!(), String) + ( + "version", + format!("jito-{}", solana_version::version!()), + String + ) ); *start_progress.write().unwrap() = ValidatorStartProgress::Running; @@ -2163,7 +2189,7 @@ mod tests { Arc::new(validator_keypair), &validator_ledger_path, &voting_keypair.pubkey(), - Arc::new(RwLock::new(vec![voting_keypair.clone()])), + Arc::new(RwLock::new(vec![voting_keypair])), vec![leader_node.info], &config, true, // should_check_duplicate_instance diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 0e57371a37..d6fa027c92 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -847,6 +847,7 @@ fn restore_from_snapshots_and_check_banks_are_equal( false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, + None, )?; assert_eq!(bank, &deserialized_bank); @@ -1038,6 +1039,7 @@ fn test_snapshots_with_background_services( false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, + None, ) .unwrap(); diff --git a/deploy_programs b/deploy_programs new file mode 100755 index 0000000000..cbdf837e92 --- /dev/null +++ b/deploy_programs @@ -0,0 +1,17 @@ +#!/usr/bin/env sh +# Deploys the tip payment and tip distribution programs on local validator at predetermined address +set -eux + +WALLET_LOCATION=~/.config/solana/id.json + +# build this solana binary to ensure we're using a version compatible with the validator +cargo b --release --bin solana + +./target/release/solana airdrop -ul 1000 $WALLET_LOCATION + +(cd jito-programs/tip-payment && anchor build) + +# NOTE: make sure the declare_id! is set correctly in the programs +# Also, || true to make sure if fails the first time around, tip_payment can still be deployed +RUST_INFO=trace ./target/release/solana deploy --keypair $WALLET_LOCATION -ul ./jito-programs/tip-payment/target/deploy/tip_distribution.so ./jito-programs/tip-payment/dev/dev_tip_distribution.json || true +RUST_INFO=trace ./target/release/solana deploy --keypair $WALLET_LOCATION -ul ./jito-programs/tip-payment/target/deploy/tip_payment.so ./jito-programs/tip-payment/dev/dev_tip_payment.json diff --git a/dev/Dockerfile b/dev/Dockerfile new file mode 100644 index 0000000000..bab9a1c02f --- /dev/null +++ b/dev/Dockerfile @@ -0,0 +1,48 @@ +FROM rust:1.64-slim-bullseye as builder + +# Add Google Protocol Buffers for Libra's metrics library. +ENV PROTOC_VERSION 3.8.0 +ENV PROTOC_ZIP protoc-$PROTOC_VERSION-linux-x86_64.zip + +RUN set -x \ + && apt update \ + && apt install -y \ + clang \ + cmake \ + libudev-dev \ + make \ + unzip \ + libssl-dev \ + pkg-config \ + zlib1g-dev \ + curl \ + && rustup component add rustfmt \ + && rustup component add clippy \ + && rustc --version \ + && cargo --version \ + && curl -OL https://github.com/google/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP \ + && unzip -o $PROTOC_ZIP -d /usr/local bin/protoc \ + && unzip -o $PROTOC_ZIP -d /usr/local include/* \ + && rm -f $PROTOC_ZIP + + +WORKDIR /solana +COPY . . +RUN mkdir -p docker-output + +ARG ci_commit +# NOTE: Keep this here before build since variable is referenced during CI build step. +ENV CI_COMMIT=$ci_commit + +ARG debug + +# Uses docker buildkit to cache the image. +# /usr/local/cargo/git needed for crossbeam patch +RUN --mount=type=cache,mode=0777,target=/solana/target \ + --mount=type=cache,mode=0777,target=/usr/local/cargo/registry \ + --mount=type=cache,mode=0777,target=/usr/local/cargo/git \ + if [ "$debug" = "false" ] ; then \ + ./cargo stable build --release && cp target/release/solana* ./docker-output; \ + else \ + RUSTFLAGS='-g -C force-frame-pointers=yes' ./cargo stable build --release && cp target/release/solana* ./docker-output; \ + fi diff --git a/entry/src/entry.rs b/entry/src/entry.rs index c732f19ab6..b2b8cd9fac 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -203,7 +203,7 @@ pub fn hash_transactions(transactions: &[VersionedTransaction]) -> Hash { .iter() .flat_map(|tx| tx.signatures.iter()) .collect(); - let merkle_tree = MerkleTree::new(&signatures); + let merkle_tree = MerkleTree::new(&signatures, false); if let Some(root_hash) = merkle_tree.get_root() { *root_hash } else { diff --git a/entry/src/poh.rs b/entry/src/poh.rs index 8a27a3ac1b..f017b7e5d6 100644 --- a/entry/src/poh.rs +++ b/entry/src/poh.rs @@ -73,19 +73,30 @@ impl Poh { } pub fn record(&mut self, mixin: Hash) -> Option { - if self.remaining_hashes == 1 { + let entries = self.record_bundle(&[mixin]); + entries.unwrap_or_default().pop() + } + + pub fn record_bundle(&mut self, mixins: &[Hash]) -> Option> { + if self.remaining_hashes <= mixins.len() as u64 { return None; // Caller needs to `tick()` first } - self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]); - let num_hashes = self.num_hashes + 1; - self.num_hashes = 0; - self.remaining_hashes -= 1; + let entries = mixins + .iter() + .map(|m| { + self.hash = hashv(&[self.hash.as_ref(), m.as_ref()]); + let num_hashes = self.num_hashes + 1; + self.num_hashes = 0; + self.remaining_hashes -= 1; + PohEntry { + num_hashes, + hash: self.hash, + } + }) + .collect(); - Some(PohEntry { - num_hashes, - hash: self.hash, - }) + Some(entries) } pub fn tick(&mut self) -> Option { diff --git a/f b/f new file mode 100755 index 0000000000..fa748c791a --- /dev/null +++ b/f @@ -0,0 +1,30 @@ +#!/usr/bin/env sh +# Builds jito-solana in a docker container. +# Useful for running on machines that might not have cargo installed but can run docker (Flatcar Linux). +# run `./f true` to compile with debug flags + +set -eux + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" + +GIT_SHA="$(git describe --always --dirty)" + +echo $GIT_SHA + +DEBUG_FLAGS=${1-false} + +DOCKER_BUILDKIT=1 docker build \ + --build-arg debug=$DEBUG_FLAGS \ + --build-arg ci_commit=$GIT_SHA \ + -t jitolabs/build-solana \ + -f dev/Dockerfile . \ + --progress=plain + +# Creates a temporary container, copies solana-validator built inside container there and +# removes the temporary container. +docker rm temp || true +docker container create --name temp jitolabs/build-solana +mkdir -p $SCRIPT_DIR/docker-output +# Outputs the solana-validator binary to $SOLANA/docker-output/solana-validator +docker container cp temp:/solana/docker-output $SCRIPT_DIR/ +docker rm temp diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 0cd6d2485e..03cb5f10b3 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -538,6 +538,10 @@ impl ClusterInfo { *self.entrypoints.write().unwrap() = entrypoints; } + pub fn set_my_contact_info(&self, my_contact_info: ContactInfo) { + *self.my_contact_info.write().unwrap() = my_contact_info; + } + pub fn save_contact_info(&self) { let nodes = { let entrypoint_gossip_addrs = self diff --git a/jito-programs b/jito-programs new file mode 160000 index 0000000000..4fc368901b --- /dev/null +++ b/jito-programs @@ -0,0 +1 @@ +Subproject commit 4fc368901be002e3c49d590e789662d7b40c5423 diff --git a/jito-protos/Cargo.toml b/jito-protos/Cargo.toml new file mode 100644 index 0000000000..631b68f87f --- /dev/null +++ b/jito-protos/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "jito-protos" +version = "1.14.24" +edition = "2021" +publish = false + +[dependencies] +bytes = "1.1.0" +prost = "0.8.0" +prost-types = "0.8.0" +tonic = "0.5.2" + +[build-dependencies] +tonic-build = "0.5.2" diff --git a/jito-protos/build.rs b/jito-protos/build.rs new file mode 100644 index 0000000000..54c8b8d5e7 --- /dev/null +++ b/jito-protos/build.rs @@ -0,0 +1,17 @@ +use tonic_build::configure; + +fn main() { + configure() + .compile( + &[ + "protos/auth.proto", + "protos/block_engine.proto", + "protos/bundle.proto", + "protos/packet.proto", + "protos/relayer.proto", + "protos/shared.proto", + ], + &["protos"], + ) + .unwrap(); +} diff --git a/jito-protos/protos b/jito-protos/protos new file mode 160000 index 0000000000..05d210980f --- /dev/null +++ b/jito-protos/protos @@ -0,0 +1 @@ +Subproject commit 05d210980f34a7c974d7ed1a4dbcb2ce1fca00b3 diff --git a/jito-protos/src/lib.rs b/jito-protos/src/lib.rs new file mode 100644 index 0000000000..cf630c53d2 --- /dev/null +++ b/jito-protos/src/lib.rs @@ -0,0 +1,25 @@ +pub mod proto { + pub mod auth { + tonic::include_proto!("auth"); + } + + pub mod block_engine { + tonic::include_proto!("block_engine"); + } + + pub mod bundle { + tonic::include_proto!("bundle"); + } + + pub mod packet { + tonic::include_proto!("packet"); + } + + pub mod relayer { + tonic::include_proto!("relayer"); + } + + pub mod shared { + tonic::include_proto!("shared"); + } +} diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 008c76ed65..a7fcb0612c 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -853,13 +853,15 @@ fn load_bank_forks( snapshot_archive_path.unwrap_or_else(|| blockstore.ledger_path().to_path_buf()); let incremental_snapshot_archives_dir = incremental_snapshot_archive_path.unwrap_or_else(|| full_snapshot_archives_dir.clone()); - if let Some(full_snapshot_slot) = - snapshot_utils::get_highest_full_snapshot_archive_slot(&full_snapshot_archives_dir) - { + if let Some(full_snapshot_slot) = snapshot_utils::get_highest_full_snapshot_archive_slot( + &full_snapshot_archives_dir, + process_options.halt_at_slot, + ) { let incremental_snapshot_slot = snapshot_utils::get_highest_incremental_snapshot_archive_slot( &incremental_snapshot_archives_dir, full_snapshot_slot, + process_options.halt_at_slot, ) .unwrap_or_default(); starting_slot = std::cmp::max(full_snapshot_slot, incremental_snapshot_slot); @@ -880,8 +882,8 @@ fn load_bank_forks( // - This will not catch the case when loading from genesis without a full slot 0. if !blockstore.slot_range_connected(starting_slot, halt_slot) { eprintln!( - "Unable to load bank forks at slot {} due to disconnected blocks.", - halt_slot, + "Unable to load bank forks [start {} end {}] due to disconnected blocks.", + starting_slot, halt_slot, ); exit(1); } @@ -2730,6 +2732,21 @@ fn main() { "snapshot slot doesn't exist" ); + if let Ok(metas) = blockstore.slot_meta_iterator(0) { + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + if slots.is_empty() { + eprintln!("Ledger is empty, can't create snapshot"); + exit(1); + } else { + let first = slots.first().unwrap(); + let last = slots.last().unwrap_or(first); + if first > &snapshot_slot || &snapshot_slot > last { + eprintln!("Slot {} is out of bounds of ledger [{}, {}], cannot create snapshot", &snapshot_slot, first, last); + exit(1); + } + } + } + let ending_slot = if is_minimized { let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); if ending_slot <= snapshot_slot { diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index d5018bc29b..1d2bfadea7 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -100,6 +100,7 @@ pub fn load_bank_forks( if snapshot_utils::get_highest_full_snapshot_archive_info( &snapshot_config.full_snapshot_archives_dir, + process_options.halt_at_slot, ) .is_some() { @@ -179,7 +180,7 @@ pub fn load_bank_forks( } #[allow(clippy::too_many_arguments)] -fn bank_forks_from_snapshot( +pub fn bank_forks_from_snapshot( genesis_config: &GenesisConfig, account_paths: Vec, shrink_paths: Option>, @@ -213,6 +214,7 @@ fn bank_forks_from_snapshot( process_options.verify_index, process_options.accounts_db_config.clone(), accounts_update_notifier, + process_options.halt_at_slot, ) .expect("Load from snapshot failed"); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index b5eb84567c..564293b1f1 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -140,7 +140,7 @@ fn execute_batch( let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if record_token_balances { - collect_token_balances(bank, batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals, None) } else { vec![] }; @@ -174,7 +174,7 @@ fn execute_batch( if let Some(transaction_status_sender) = transaction_status_sender { let transactions = batch.sanitized_transactions().to_vec(); let post_token_balances = if record_token_balances { - collect_token_balances(bank, batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals, None) } else { vec![] }; diff --git a/ledger/src/token_balances.rs b/ledger/src/token_balances.rs index 673cc5556b..586bbd8526 100644 --- a/ledger/src/token_balances.rs +++ b/ledger/src/token_balances.rs @@ -5,7 +5,9 @@ use { }, solana_measure::measure::Measure, solana_metrics::datapoint_debug, - solana_runtime::{bank::Bank, transaction_batch::TransactionBatch}, + solana_runtime::{ + account_overrides::AccountOverrides, bank::Bank, transaction_batch::TransactionBatch, + }, solana_sdk::{account::ReadableAccount, pubkey::Pubkey}, solana_transaction_status::{ token_balances::TransactionTokenBalances, TransactionTokenBalance, @@ -39,6 +41,7 @@ pub fn collect_token_balances( bank: &Bank, batch: &TransactionBatch, mint_decimals: &mut HashMap, + cached_accounts: Option<&AccountOverrides>, ) -> TransactionTokenBalances { let mut balances: TransactionTokenBalances = vec![]; let mut collect_time = Measure::start("collect_token_balances"); @@ -59,8 +62,12 @@ pub fn collect_token_balances( ui_token_amount, owner, program_id, - }) = collect_token_balance_from_account(bank, account_id, mint_decimals) - { + }) = collect_token_balance_from_account( + bank, + account_id, + mint_decimals, + cached_accounts, + ) { transaction_balances.push(TransactionTokenBalance { account_index: index as u8, mint, @@ -93,8 +100,17 @@ fn collect_token_balance_from_account( bank: &Bank, account_id: &Pubkey, mint_decimals: &mut HashMap, + account_overrides: Option<&AccountOverrides>, ) -> Option { - let account = bank.get_account(account_id)?; + let account = { + if let Some(account_override) = + account_overrides.and_then(|overrides| overrides.get(account_id)) + { + Some(account_override.clone()) + } else { + bank.get_account(account_id) + } + }?; if !is_known_spl_token_id(account.owner()) { return None; @@ -237,13 +253,13 @@ mod test { // Account is not owned by spl_token (nor does it have TokenAccount state) assert_eq!( - collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals, None), None ); // Mint does not have TokenAccount state assert_eq!( - collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals, None), None ); @@ -252,7 +268,8 @@ mod test { collect_token_balance_from_account( &bank, &spl_token_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), Some(TokenBalanceData { mint: mint_pubkey.to_string(), @@ -269,7 +286,12 @@ mod test { // TokenAccount is not owned by known spl-token program_id assert_eq!( - collect_token_balance_from_account(&bank, &other_account_pubkey, &mut mint_decimals), + collect_token_balance_from_account( + &bank, + &other_account_pubkey, + &mut mint_decimals, + None + ), None ); @@ -278,7 +300,8 @@ mod test { collect_token_balance_from_account( &bank, &other_mint_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), None ); @@ -428,13 +451,13 @@ mod test { // Account is not owned by spl_token (nor does it have TokenAccount state) assert_eq!( - collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &account_pubkey, &mut mint_decimals, None), None ); // Mint does not have TokenAccount state assert_eq!( - collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals), + collect_token_balance_from_account(&bank, &mint_pubkey, &mut mint_decimals, None), None ); @@ -443,7 +466,8 @@ mod test { collect_token_balance_from_account( &bank, &spl_token_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), Some(TokenBalanceData { mint: mint_pubkey.to_string(), @@ -460,7 +484,12 @@ mod test { // TokenAccount is not owned by known spl-token program_id assert_eq!( - collect_token_balance_from_account(&bank, &other_account_pubkey, &mut mint_decimals), + collect_token_balance_from_account( + &bank, + &other_account_pubkey, + &mut mint_decimals, + None + ), None ); @@ -469,7 +498,8 @@ mod test { collect_token_balance_from_account( &bank, &other_mint_account_pubkey, - &mut mint_decimals + &mut mint_decimals, + None ), None ); diff --git a/local-cluster/src/local_cluster_snapshot_utils.rs b/local-cluster/src/local_cluster_snapshot_utils.rs index 852aa41286..6407357613 100644 --- a/local-cluster/src/local_cluster_snapshot_utils.rs +++ b/local-cluster/src/local_cluster_snapshot_utils.rs @@ -79,7 +79,10 @@ impl LocalCluster { ); loop { if let Some(full_snapshot_archive_info) = - snapshot_utils::get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir) + snapshot_utils::get_highest_full_snapshot_archive_info( + &full_snapshot_archives_dir, + None, + ) { match next_snapshot_type { NextSnapshotType::FullSnapshot => { @@ -92,6 +95,7 @@ impl LocalCluster { snapshot_utils::get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir.as_ref().unwrap(), full_snapshot_archive_info.slot(), + None, ) { if incremental_snapshot_archive_info.slot() >= last_slot { diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 7ad91f47c2..9e62d939cf 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -65,6 +65,11 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { wait_to_vote_slot: config.wait_to_vote_slot, ledger_column_options: config.ledger_column_options.clone(), runtime_config: config.runtime_config.clone(), + relayer_config: config.relayer_config.clone(), + block_engine_config: config.block_engine_config.clone(), + shred_receiver_address: config.shred_receiver_address.clone(), + tip_manager_config: config.tip_manager_config.clone(), + preallocated_bundle_cost: config.preallocated_bundle_cost, } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index aaf312ddd8..743d4519a7 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -837,6 +837,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) .unwrap(); info!( @@ -880,6 +881,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st .incremental_snapshot_archives_dir .path(), full_snapshot_archive.slot(), + None, ) .unwrap(); info!( @@ -1022,6 +1024,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) .unwrap(); @@ -1083,6 +1086,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) .unwrap(); @@ -1112,6 +1116,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st validator_snapshot_test_config .full_snapshot_archives_dir .path(), + None, ) { if full_snapshot_slot >= validator_next_full_snapshot_slot { if let Some(incremental_snapshot_slot) = @@ -1120,6 +1125,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st .incremental_snapshot_archives_dir .path(), full_snapshot_slot, + None, ) { if incremental_snapshot_slot >= validator_next_incremental_snapshot_slot { @@ -1336,8 +1342,10 @@ fn test_snapshots_blockstore_floor() { trace!("Waiting for snapshot tar to be generated with slot",); let archive_info = loop { - let archive = - snapshot_utils::get_highest_full_snapshot_archive_info(full_snapshot_archives_dir); + let archive = snapshot_utils::get_highest_full_snapshot_archive_info( + full_snapshot_archives_dir, + None, + ); if archive.is_some() { trace!("snapshot exists"); break archive.unwrap(); diff --git a/merkle-tree/src/merkle_tree.rs b/merkle-tree/src/merkle_tree.rs index 70e09c27ec..9638c26812 100644 --- a/merkle-tree/src/merkle_tree.rs +++ b/merkle-tree/src/merkle_tree.rs @@ -18,7 +18,7 @@ macro_rules! hash_intermediate { } } -#[derive(Debug)] +#[derive(Default, Debug, Eq, Hash, PartialEq)] pub struct MerkleTree { leaf_count: usize, nodes: Vec, @@ -36,6 +36,14 @@ impl<'a> ProofEntry<'a> { assert!((None == left_sibling) ^ (None == right_sibling)); Self(target, left_sibling, right_sibling) } + + pub fn get_left_sibling(&self) -> Option<&'a Hash> { + self.1 + } + + pub fn get_right_sibling(&self) -> Option<&'a Hash> { + self.2 + } } #[derive(Debug, Default, PartialEq, Eq)] @@ -60,6 +68,10 @@ impl<'a> Proof<'a> { }); matches!(result, Some(_)) } + + pub fn get_proof_entries(self) -> Vec> { + self.0 + } } impl MerkleTree { @@ -95,7 +107,7 @@ impl MerkleTree { } } - pub fn new>(items: &[T]) -> Self { + pub fn new>(items: &[T], sorted_hashes: bool) -> Self { let cap = MerkleTree::calculate_vec_capacity(items.len()); let mut mt = MerkleTree { leaf_count: items.len(), @@ -123,8 +135,20 @@ impl MerkleTree { &mt.nodes[prev_level_start + prev_level_idx] }; - let hash = hash_intermediate!(lsib, rsib); - mt.nodes.push(hash); + // tip-distribution verification uses sorted hashing + if sorted_hashes { + if lsib <= rsib { + let hash = hash_intermediate!(lsib, rsib); + mt.nodes.push(hash); + } else { + let hash = hash_intermediate!(rsib, lsib); + mt.nodes.push(hash); + } + } else { + // hashing for solana internals + let hash = hash_intermediate!(lsib, rsib); + mt.nodes.push(hash); + } } prev_level_start = level_start; prev_level_len = level_len; @@ -189,21 +213,21 @@ mod tests { #[test] fn test_tree_from_empty() { - let mt = MerkleTree::new::<[u8; 0]>(&[]); + let mt = MerkleTree::new::<[u8; 0]>(&[], false); assert_eq!(mt.get_root(), None); } #[test] fn test_tree_from_one() { let input = b"test"; - let mt = MerkleTree::new(&[input]); + let mt = MerkleTree::new(&[input], false); let expected = hash_leaf!(input); assert_eq!(mt.get_root(), Some(&expected)); } #[test] fn test_tree_from_many() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); // This golden hash will need to be updated whenever the contents of `TEST` change in any // way, including addition, removal and reordering or any of the tree calculation algo // changes @@ -215,7 +239,7 @@ mod tests { #[test] fn test_path_creation() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); for (i, _s) in TEST.iter().enumerate() { let _path = mt.find_path(i).unwrap(); } @@ -223,13 +247,13 @@ mod tests { #[test] fn test_path_creation_bad_index() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); assert_eq!(mt.find_path(TEST.len()), None); } #[test] fn test_path_verify_good() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); for (i, s) in TEST.iter().enumerate() { let hash = hash_leaf!(s); let path = mt.find_path(i).unwrap(); @@ -239,7 +263,7 @@ mod tests { #[test] fn test_path_verify_bad() { - let mt = MerkleTree::new(TEST); + let mt = MerkleTree::new(TEST, false); for (i, s) in BAD.iter().enumerate() { let hash = hash_leaf!(s); let path = mt.find_path(i).unwrap(); diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index fb6353f76b..e898bac83e 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -106,6 +106,42 @@ while [[ -n $1 ]]; do elif [[ $1 == --skip-require-tower ]]; then maybeRequireTower=false shift + elif [[ $1 == --trust-relayer-packets ]]; then + args+=("$1") + shift + elif [[ $1 == --trust-block-engine-packets ]]; then + args+=("$1") + shift + elif [[ $1 == --relayer-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --relayer-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --block-engine-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --block-engine-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --block-engine-auth-service-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --relayer-auth-service-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-payment-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-distribution-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --commission-bps ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --shred-receiver-address ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = --log-messages-bytes-limit ]]; then args+=("$1" "$2") shift 2 @@ -144,6 +180,7 @@ args+=( --no-incremental-snapshots --identity "$identity" --vote-account "$vote_account" + --merkle-root-upload-authority "$identity" --rpc-faucet-address 127.0.0.1:9900 --no-poh-speed-test --no-os-network-limits-test @@ -152,6 +189,9 @@ args+=( ) default_arg --gossip-port 8001 default_arg --log - +default_arg --tip-payment-program-pubkey "DThZmRNNXh7kvTQW9hXeGoWGPKktK8pgVAyoTLjH7UrT" +default_arg --tip-distribution-program-pubkey "FjrdANjvo76aCYQ4kf9FM1R8aESUcEE6F8V7qyoVUQcM" +default_arg --commission-bps 0 pid= diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index 1ccfc72f9d..4789db074a 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -86,6 +86,36 @@ while [[ -n $1 ]]; do vote_account=$2 args+=("$1" "$2") shift 2 + elif [[ $1 == --relayer-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --block-engine-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --block-engine-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --block-engine-auth-service-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --relayer-url ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --relayer-auth-service-address ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 = --merkle-root-upload-authority ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-payment-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --tip-distribution-program-pubkey ]]; then + args+=("$1" "$2") + shift 2 + elif [[ $1 == --commission-bps ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 = --init-complete-file ]]; then args+=("$1" "$2") shift 2 @@ -261,6 +291,10 @@ fi default_arg --identity "$identity" default_arg --vote-account "$vote_account" +default_arg --merkle-root-upload-authority "$identity" +default_arg --tip-payment-program-pubkey "DThZmRNNXh7kvTQW9hXeGoWGPKktK8pgVAyoTLjH7UrT" +default_arg --tip-distribution-program-pubkey "FjrdANjvo76aCYQ4kf9FM1R8aESUcEE6F8V7qyoVUQcM" +default_arg --commission-bps 0 default_arg --ledger "$ledger_dir" default_arg --log - default_arg --full-rpc-api diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index b50ef21d16..8788772a93 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -120,7 +120,7 @@ pub fn init() { /// Returns true if the signatrue on the packet verifies. /// Caller must do packet.set_discard(true) if this returns false. #[must_use] -fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool { +pub fn verify_packet(packet: &mut Packet, reject_non_vote: bool) -> bool { // If this packet was already marked as discard, drop it if packet.meta.discard() { return false; diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index aef2d7393e..f06119f9a3 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -57,9 +57,14 @@ pub enum PohRecorderError { SendError(#[from] SendError), } -type Result = std::result::Result; +pub type Result = std::result::Result; -pub type WorkingBankEntry = (Arc, (Entry, u64)); +#[derive(Clone, Debug)] +pub struct WorkingBankEntry { + pub bank: Arc, + // normal entries have len == 1, bundles have len > 1 + pub entries_ticks: Vec<(Entry, u64)>, +} #[derive(Clone)] pub struct BankStart { @@ -85,21 +90,19 @@ impl BankStart { type RecordResultSender = Sender>>; pub struct Record { - pub mixin: Hash, - pub transactions: Vec, + // non-bundles shall have mixins_txs.len() == 1, bundles shall have mixins_txs.len() > 1 + pub mixins_txs: Vec<(Hash, Vec)>, pub slot: Slot, pub sender: RecordResultSender, } impl Record { pub fn new( - mixin: Hash, - transactions: Vec, + mixins_txs: Vec<(Hash, Vec)>, slot: Slot, sender: RecordResultSender, ) -> Self { Self { - mixin, - transactions, + mixins_txs, slot, sender, } @@ -127,18 +130,18 @@ impl TransactionRecorder { is_exited, } } + // Returns the index of `transactions.first()` in the slot, if being tracked by WorkingBank pub fn record( &self, bank_slot: Slot, - mixin: Hash, - transactions: Vec, + mixins_txs: Vec<(Hash, Vec)>, ) -> Result> { // create a new channel so that there is only 1 sender and when it goes out of scope, the receiver fails let (result_sender, result_receiver) = unbounded(); - let res = - self.record_sender - .send(Record::new(mixin, transactions, bank_slot, result_sender)); + let res = self + .record_sender + .send(Record::new(mixins_txs, bank_slot, result_sender)); if res.is_err() { // If the channel is dropped, then the validator is shutting down so return that we are hitting // the max tick height to stop transaction processing and flush any transactions in the pipeline. @@ -566,7 +569,10 @@ impl PohRecorder { for tick in &self.tick_cache[..entry_count] { working_bank.bank.register_tick(&tick.0.hash); - send_result = self.sender.send((working_bank.bank.clone(), tick.clone())); + send_result = self.sender.send(WorkingBankEntry { + bank: working_bank.bank.clone(), + entries_ticks: vec![tick.clone()], + }); if send_result.is_err() { break; } @@ -746,16 +752,23 @@ impl PohRecorder { pub fn record( &mut self, bank_slot: Slot, - mixin: Hash, - transactions: Vec, + mixins_txs: &[(Hash, Vec)], ) -> Result> { // Entries without transactions are used to track real-time passing in the ledger and // cannot be generated by `record()` - assert!(!transactions.is_empty(), "No transactions provided"); + assert!(!mixins_txs.is_empty(), "No transactions provided"); + assert!( + !mixins_txs.iter().any(|(_, txs)| txs.is_empty()), + "One of mixins is missing txs" + ); let ((), report_metrics_time) = measure!(self.report_metrics(bank_slot), "report_metrics"); self.report_metrics_us += report_metrics_time.as_us(); + let mixins: Vec = mixins_txs.iter().map(|(m, _)| *m).collect(); + let transactions: Vec> = + mixins_txs.iter().map(|(_, tx)| tx.clone()).collect(); + loop { let (flush_cache_res, flush_cache_time) = measure!(self.flush_cache(false), "flush_cache"); @@ -773,23 +786,36 @@ impl PohRecorder { let (mut poh_lock, poh_lock_time) = measure!(self.poh.lock().unwrap(), "poh_lock"); self.record_lock_contention_us += poh_lock_time.as_us(); - let (record_mixin_res, record_mixin_time) = - measure!(poh_lock.record(mixin), "record_mixin"); + let (maybe_entries, record_mixin_time) = + measure!(poh_lock.record_bundle(&mixins), "record_mixin"); self.record_us += record_mixin_time.as_us(); drop(poh_lock); - if let Some(poh_entry) = record_mixin_res { - let num_transactions = transactions.len(); + if let Some(entries) = maybe_entries { + assert_eq!(entries.len(), transactions.len()); + let num_transactions = transactions.iter().map(|txs| txs.len()).sum(); let (send_entry_res, send_entry_time) = measure!( { - let entry = Entry { - num_hashes: poh_entry.num_hashes, - hash: poh_entry.hash, - transactions, - }; + let entries_tick_heights: Vec<(Entry, u64)> = entries + .into_iter() + .zip(transactions.into_iter()) + .map(|(poh_entry, transactions)| { + ( + Entry { + num_hashes: poh_entry.num_hashes, + hash: poh_entry.hash, + transactions, + }, + self.tick_height, + ) + }) + .collect(); let bank_clone = working_bank.bank.clone(); - self.sender.send((bank_clone, (entry, self.tick_height))) + self.sender.send(WorkingBankEntry { + bank: bank_clone, + entries_ticks: entries_tick_heights, + }) }, "send_poh_entry", ); @@ -1167,13 +1193,17 @@ mod tests { assert_eq!(poh_recorder.tick_height, tick_height_before + 1); assert_eq!(poh_recorder.tick_cache.len(), 0); let mut num_entries = 0; - while let Ok((wbank, (_entry, _tick_height))) = entry_receiver.try_recv() { + while let Ok(WorkingBankEntry { + bank: wbank, + entries_ticks, + }) = entry_receiver.try_recv() + { assert_eq!(wbank.slot(), bank1.slot()); - num_entries += 1; + num_entries += entries_ticks.len(); } // All the cached ticks, plus the new tick above should have been flushed - assert_eq!(num_entries, num_new_ticks + 1); + assert_eq!(num_entries as u64, num_new_ticks + 1); } Blockstore::destroy(&ledger_path).unwrap(); } @@ -1262,7 +1292,7 @@ mod tests { // We haven't yet reached the minimum tick height for the working bank, // so record should fail assert_matches!( - poh_recorder.record(bank1.slot(), h1, vec![tx.into()]), + poh_recorder.record(bank1.slot(), &[(h1, vec![tx.into()])]), Err(PohRecorderError::MinHeightNotReached) ); assert!(entry_receiver.try_recv().is_err()); @@ -1305,7 +1335,7 @@ mod tests { // However we hand over a bad slot so record fails let bad_slot = bank.slot() + 1; assert_matches!( - poh_recorder.record(bad_slot, h1, vec![tx.into()]), + poh_recorder.record(bad_slot, &[(h1, vec![tx.into()])]), Err(PohRecorderError::MaxHeightReached) ); } @@ -1352,17 +1382,27 @@ mod tests { let tx = test_tx(); let h1 = hash(b"hello world!"); assert!(poh_recorder - .record(bank1.slot(), h1, vec![tx.into()]) + .record(bank1.slot(), &[(h1, vec![tx.into()])]) .is_ok()); assert_eq!(poh_recorder.tick_cache.len(), 0); //tick in the cache + entry for _ in 0..min_tick_height { - let (_bank, (e, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 1); + let e = entries_ticks.get(0).unwrap().0.clone(); assert!(e.is_tick()); } - let (_bank, (e, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 1); + let e = entries_ticks.get(0).unwrap().0.clone(); assert!(!e.is_tick()); } Blockstore::destroy(&ledger_path).unwrap(); @@ -1398,10 +1438,16 @@ mod tests { let tx = test_tx(); let h1 = hash(b"hello world!"); assert!(poh_recorder - .record(bank.slot(), h1, vec![tx.into()]) + .record(bank.slot(), &[(h1, vec![tx.into()])]) .is_err()); + for _ in 0..num_ticks_to_max { - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 1); + let entry = entries_ticks.get(0).unwrap().0.clone(); assert!(entry.is_tick()); } } @@ -1446,7 +1492,7 @@ mod tests { let tx1 = test_tx(); let h1 = hash(b"hello world!"); let record_result = poh_recorder - .record(bank.slot(), h1, vec![tx0.into(), tx1.into()]) + .record(bank.slot(), &[(h1, vec![tx0.into(), tx1.into()])]) .unwrap() .unwrap(); assert_eq!(record_result, 0); @@ -1463,7 +1509,7 @@ mod tests { let tx = test_tx(); let h2 = hash(b"foobar"); let record_result = poh_recorder - .record(bank.slot(), h2, vec![tx.into()]) + .record(bank.slot(), &[(h2, vec![tx.into()])]) .unwrap() .unwrap(); assert_eq!(record_result, 2); @@ -1730,7 +1776,7 @@ mod tests { let tx = test_tx(); let h1 = hash(b"hello world!"); assert!(poh_recorder - .record(bank.slot(), h1, vec![tx.into()]) + .record(bank.slot(), &[(h1, vec![tx.into()])]) .is_err()); assert!(poh_recorder.working_bank.is_none()); diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index 2b71c6ab61..d6d809f13c 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -194,11 +194,12 @@ impl PohService { if let Ok(record) = record { if record .sender - .send(poh_recorder.write().unwrap().record( - record.slot, - record.mixin, - record.transactions, - )) + .send( + poh_recorder + .write() + .unwrap() + .record(record.slot, &record.mixins_txs), + ) .is_err() { panic!("Error returning mixin hash"); @@ -257,11 +258,7 @@ impl PohService { timing.total_lock_time_ns += lock_time.as_ns(); let mut record_time = Measure::start("record"); loop { - let res = poh_recorder_l.record( - record.slot, - record.mixin, - std::mem::take(&mut record.transactions), - ); + let res = poh_recorder_l.record(record.slot, &record.mixins_txs); // what do we do on failure here? Ignore for now. let (_send_res, send_record_result_time) = measure!(record.sender.send(res), "send_record_result"); @@ -383,6 +380,7 @@ impl PohService { mod tests { use { super::*, + crate::poh_recorder::WorkingBankEntry, rand::{thread_rng, Rng}, solana_ledger::{ blockstore::Blockstore, @@ -462,11 +460,10 @@ mod tests { loop { // send some data let mut time = Measure::start("record"); - let _ = poh_recorder.write().unwrap().record( - bank_slot, - h1, - vec![tx.clone()], - ); + let _ = poh_recorder + .write() + .unwrap() + .record(bank_slot, &[(h1, vec![tx.clone()])]); time.stop(); total_us += time.as_us(); total_times += 1; @@ -511,7 +508,12 @@ mod tests { let time = Instant::now(); while run_time != 0 || need_tick || need_entry || need_partial { - let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); + let WorkingBankEntry { + bank: _, + entries_ticks, + } = entry_receiver.recv().unwrap(); + assert_eq!(entries_ticks.len(), 0); + let entry = entries_ticks.get(0).unwrap().0.clone(); if entry.is_tick() { num_ticks += 1; diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 61ed30f4f8..2525820a7d 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -95,6 +95,145 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "anchor-attribute-access-control" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.51", + "quote 1.0.18", + "regex", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-account" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "bs58 0.4.0", + "proc-macro2 1.0.51", + "quote 1.0.18", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-constant" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.51", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-error" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-event" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-interface" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "heck 0.3.3", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-program" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-state" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + +[[package]] +name = "anchor-derive-accounts" +version = "0.24.2" +dependencies = [ + "anchor-syn", + "anyhow", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + +[[package]] +name = "anchor-lang" +version = "0.24.2" +dependencies = [ + "anchor-attribute-access-control", + "anchor-attribute-account", + "anchor-attribute-constant", + "anchor-attribute-error", + "anchor-attribute-event", + "anchor-attribute-interface", + "anchor-attribute-program", + "anchor-attribute-state", + "anchor-derive-accounts", + "arrayref", + "base64 0.13.0", + "bincode", + "borsh", + "bytemuck", + "solana-program 1.14.24", + "thiserror", +] + +[[package]] +name = "anchor-syn" +version = "0.24.2" +dependencies = [ + "anyhow", + "bs58 0.3.1", + "heck 0.3.3", + "proc-macro2 1.0.51", + "proc-macro2-diagnostics", + "quote 1.0.18", + "serde", + "serde_json", + "sha2 0.9.9", + "syn 1.0.109", + "thiserror", +] + [[package]] name = "ansi_term" version = "0.11.0" @@ -506,6 +645,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bs58" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" + [[package]] name = "bs58" version = "0.4.0" @@ -713,18 +858,41 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.6" +version = "3.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c93436c21e4698bacadf42917db28b23017027a4deccb35dbe47a7e7840123" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" dependencies = [ "atty", "bitflags", + "clap_derive", + "clap_lex", "indexmap", - "lazy_static", - "os_str_bytes", + "once_cell", "strsim 0.10.0", "termcolor", - "textwrap 0.15.0", + "textwrap 0.16.0", +] + +[[package]] +name = "clap_derive" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" +dependencies = [ + "heck 0.4.0", + "proc-macro-error", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", ] [[package]] @@ -1342,6 +1510,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "fixedbitset" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" + [[package]] name = "fixedbitset" version = "0.4.1" @@ -1997,6 +2171,28 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +[[package]] +name = "jito-programs-vote-state" +version = "0.1.2" +dependencies = [ + "anchor-lang", + "bincode", + "serde", + "serde_derive", + "solana-program 1.14.24", +] + +[[package]] +name = "jito-protos" +version = "1.14.24" +dependencies = [ + "bytes", + "prost 0.8.0", + "prost-types 0.8.0", + "tonic 0.5.2", + "tonic-build 0.5.2", +] + [[package]] name = "jobserver" version = "0.1.21" @@ -2819,9 +3015,6 @@ name = "os_str_bytes" version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] [[package]] name = "ouroboros" @@ -3006,13 +3199,23 @@ dependencies = [ "sha-1 0.8.2", ] +[[package]] +name = "petgraph" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" +dependencies = [ + "fixedbitset 0.2.0", + "indexmap", +] + [[package]] name = "petgraph" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" dependencies = [ - "fixedbitset", + "fixedbitset 0.4.1", "indexmap", ] @@ -3166,6 +3369,29 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +dependencies = [ + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", + "version_check", + "yansi", +] + +[[package]] +name = "prost" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" +dependencies = [ + "bytes", + "prost-derive 0.8.0", +] + [[package]] name = "prost" version = "0.9.0" @@ -3186,6 +3412,24 @@ dependencies = [ "prost-derive 0.11.0", ] +[[package]] +name = "prost-build" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" +dependencies = [ + "bytes", + "heck 0.3.3", + "itertools", + "log", + "multimap", + "petgraph 0.5.1", + "prost 0.8.0", + "prost-types 0.8.0", + "tempfile", + "which", +] + [[package]] name = "prost-build" version = "0.9.0" @@ -3198,7 +3442,7 @@ dependencies = [ "lazy_static", "log", "multimap", - "petgraph", + "petgraph 0.6.0", "prost 0.9.0", "prost-types 0.9.0", "regex", @@ -3218,7 +3462,7 @@ dependencies = [ "lazy_static", "log", "multimap", - "petgraph", + "petgraph 0.6.0", "prost 0.11.0", "prost-types 0.11.0", "regex", @@ -3226,6 +3470,19 @@ dependencies = [ "which", ] +[[package]] +name = "prost-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2 1.0.51", + "quote 1.0.18", + "syn 1.0.109", +] + [[package]] name = "prost-derive" version = "0.9.0" @@ -3252,6 +3509,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "prost-types" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" +dependencies = [ + "bytes", + "prost 0.8.0", +] + [[package]] name = "prost-types" version = "0.9.0" @@ -3320,7 +3587,7 @@ dependencies = [ "rand 0.8.5", "ring", "rustls 0.20.6", - "rustls-native-certs", + "rustls-native-certs 0.6.1", "rustls-pemfile 0.2.1", "slab", "thiserror", @@ -3584,7 +3851,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 0.22.1", "winreg", ] @@ -3708,6 +3975,18 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "rustls-native-certs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +dependencies = [ + "openssl-probe", + "rustls 0.19.1", + "schannel", + "security-framework", +] + [[package]] name = "rustls-native-certs" version = "0.6.1" @@ -4106,7 +4385,7 @@ dependencies = [ "Inflector", "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", "bv", "lazy_static", "serde", @@ -4174,6 +4453,7 @@ dependencies = [ "futures 0.3.21", "solana-banks-interface", "solana-client", + "solana-gossip", "solana-runtime", "solana-sdk 1.14.24", "solana-send-transaction-service", @@ -4679,7 +4959,7 @@ dependencies = [ "async-trait", "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", "bytes", "clap 2.33.3", "crossbeam-channel", @@ -4748,22 +5028,32 @@ name = "solana-core" version = "1.14.24" dependencies = [ "ahash", + "anchor-lang", "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", + "bytes", "chrono", + "clap 3.2.25", "crossbeam-channel", "dashmap", "eager", "etcd-client", "fs_extra", + "futures 0.3.21", + "futures-util", + "h2", "histogram", + "indexmap", "itertools", + "jito-protos", "lazy_static", "log", "lru", "min-max-heap", "num_enum", + "prost 0.8.0", + "prost-types 0.8.0", "rand 0.7.3", "rand_chacha 0.2.2", "rayon", @@ -4798,7 +5088,12 @@ dependencies = [ "sysctl", "tempfile", "thiserror", + "tip-distribution", + "tip-payment", "tokio", + "tokio-stream", + "tonic 0.5.2", + "tonic-build 0.5.2", "trees", ] @@ -4866,7 +5161,7 @@ dependencies = [ "ahash", "blake3", "block-buffer 0.9.0", - "bs58", + "bs58 0.4.0", "bv", "byteorder 1.4.3", "cc", @@ -4898,7 +5193,7 @@ dependencies = [ "ahash", "blake3", "block-buffer 0.9.0", - "bs58", + "bs58 0.4.0", "bv", "byteorder 1.4.3", "cc", @@ -4968,7 +5263,7 @@ dependencies = [ name = "solana-geyser-plugin-manager" version = "1.14.24" dependencies = [ - "bs58", + "bs58 0.4.0", "crossbeam-channel", "json5", "libloading", @@ -5139,7 +5434,7 @@ name = "solana-net-utils" version = "1.14.24" dependencies = [ "bincode", - "clap 3.1.6", + "clap 3.2.25", "crossbeam-channel", "log", "nix", @@ -5208,7 +5503,7 @@ dependencies = [ "blake3", "borsh", "borsh-derive", - "bs58", + "bs58 0.4.0", "bv", "bytemuck", "cc", @@ -5255,7 +5550,7 @@ dependencies = [ "blake3", "borsh", "borsh-derive", - "bs58", + "bs58 0.4.0", "bv", "bytemuck", "cc", @@ -5371,7 +5666,7 @@ version = "1.14.24" dependencies = [ "base64 0.13.0", "bincode", - "bs58", + "bs58 0.4.0", "crossbeam-channel", "dashmap", "itertools", @@ -5486,7 +5781,7 @@ dependencies = [ "bincode", "bitflags", "borsh", - "bs58", + "bs58 0.4.0", "bytemuck", "byteorder 1.4.3", "chrono", @@ -5530,12 +5825,13 @@ dependencies = [ name = "solana-sdk" version = "1.14.24" dependencies = [ + "anchor-lang", "assert_matches", "base64 0.13.0", "bincode", "bitflags", "borsh", - "bs58", + "bs58 0.4.0", "bytemuck", "byteorder 1.4.3", "chrono", @@ -5581,7 +5877,7 @@ version = "1.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d41a09b9cecd0a4df63c78a192adee99ebf2d3757c19713a68246e1d9789c7c" dependencies = [ - "bs58", + "bs58 0.4.0", "proc-macro2 1.0.51", "quote 1.0.18", "rustversion", @@ -5592,7 +5888,7 @@ dependencies = [ name = "solana-sdk-macro" version = "1.14.24" dependencies = [ - "bs58", + "bs58 0.4.0", "proc-macro2 1.0.51", "quote 1.0.18", "rustversion", @@ -5606,6 +5902,7 @@ dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-gossip", "solana-measure", "solana-metrics", "solana-runtime", @@ -5670,7 +5967,7 @@ name = "solana-storage-proto" version = "1.14.24" dependencies = [ "bincode", - "bs58", + "bs58 0.4.0", "prost 0.11.0", "protobuf-src", "serde", @@ -5754,7 +6051,7 @@ dependencies = [ "base64 0.13.0", "bincode", "borsh", - "bs58", + "bs58 0.4.0", "lazy_static", "log", "serde", @@ -5822,6 +6119,7 @@ dependencies = [ "solana-vote-program", "symlink", "tikv-jemallocator", + "tonic 0.5.2", ] [[package]] @@ -6249,9 +6547,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" @@ -6365,6 +6663,22 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +[[package]] +name = "tip-distribution" +version = "0.1.0" +dependencies = [ + "anchor-lang", + "jito-programs-vote-state", + "solana-program 1.14.24", +] + +[[package]] +name = "tip-payment" +version = "0.1.0" +dependencies = [ + "anchor-lang", +] + [[package]] name = "tokio" version = "1.14.1" @@ -6478,7 +6792,7 @@ dependencies = [ "tokio-rustls 0.23.2", "tungstenite", "webpki 0.22.0", - "webpki-roots", + "webpki-roots 0.22.1", ] [[package]] @@ -6520,6 +6834,40 @@ dependencies = [ "serde", ] +[[package]] +name = "tonic" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "796c5e1cd49905e65dd8e700d4cb1dffcbfdb4fc9d017de08c1a537afd83627c" +dependencies = [ + "async-stream", + "async-trait", + "base64 0.13.0", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding 2.1.0", + "pin-project", + "prost 0.8.0", + "prost-derive 0.8.0", + "rustls-native-certs 0.5.0", + "tokio", + "tokio-rustls 0.22.0", + "tokio-stream", + "tokio-util 0.6.9", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", + "webpki-roots 0.21.1", +] + [[package]] name = "tonic" version = "0.6.2" @@ -6586,6 +6934,18 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "tonic-build" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12b52d07035516c2b74337d2ac7746075e7dcae7643816c1b12c5ff8a7484c08" +dependencies = [ + "proc-macro2 1.0.51", + "prost-build 0.8.0", + "quote 1.0.18", + "syn 1.0.109", +] + [[package]] name = "tonic-build" version = "0.6.2" @@ -6760,7 +7120,7 @@ dependencies = [ "url 2.2.2", "utf-8", "webpki 0.22.0", - "webpki-roots", + "webpki-roots 0.22.1", ] [[package]] @@ -7074,6 +7434,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki-roots" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +dependencies = [ + "webpki 0.21.4", +] + [[package]] name = "webpki-roots" version = "0.22.1" @@ -7334,6 +7703,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "yasna" version = "0.5.0" diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index bf3b78845e..836e71defa 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -262,7 +262,7 @@ fn execute_transactions( let batch = bank.prepare_batch_for_tests(txs.clone()); let mut timings = ExecuteTimings::default(); let mut mint_decimals = HashMap::new(); - let tx_pre_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); + let tx_pre_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals, None); let ( TransactionResults { execution_results, .. @@ -282,7 +282,7 @@ fn execute_transactions( &mut timings, None, ); - let tx_post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); + let tx_post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals, None); izip!( txs.iter(), diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 6f282c039e..1682077571 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -16,7 +16,7 @@ use { solana_client::{ connection_cache::ConnectionCache, rpc_cache::LargestAccountsCache, - rpc_config::*, + rpc_config::{RpcSimulateBundleConfig, *}, rpc_custom_error::RpcCustomError, rpc_deprecated_config::*, rpc_filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, @@ -27,7 +27,7 @@ use { MAX_GET_SIGNATURE_STATUSES_QUERY_ITEMS, MAX_GET_SLOT_LEADERS, MAX_MULTIPLE_ACCOUNTS, NUM_LARGEST_ACCOUNTS, }, - rpc_response::{Response as RpcResponse, *}, + rpc_response::{Response as RpcResponse, RpcSimulateBundleResult, *}, }, solana_entry::entry::Entry, solana_faucet::faucet::request_airdrop_transaction, @@ -111,7 +111,6 @@ use { time::Duration, }, }; - type RpcCustomResult = std::result::Result; pub const MAX_REQUEST_PAYLOAD_SIZE: usize = 50 * (1 << 10); // 50kB @@ -226,6 +225,13 @@ impl JsonRpcRequestProcessor { Ok(bank) } + fn bank_from_slot(&self, slot: Slot) -> Option> { + debug!("Slot: {:?}", slot); + + let r_bank_forks = self.bank_forks.read().unwrap(); + r_bank_forks.get(slot) + } + #[allow(deprecated)] fn bank(&self, commitment: Option) -> Arc { debug!("RPC commitment_config: {:?}", commitment); @@ -357,10 +363,9 @@ impl JsonRpcRequestProcessor { Arc::new(Keypair::new()), socket_addr_space, )); - let tpu_address = cluster_info.my_contact_info().tpu; let (sender, receiver) = unbounded(); SendTransactionService::new::( - tpu_address, + cluster_info.clone(), &bank_forks, None, receiver, @@ -2681,13 +2686,16 @@ pub mod rpc_minimal { }) .unwrap(); - let full_snapshot_slot = - snapshot_utils::get_highest_full_snapshot_archive_slot(&full_snapshot_archives_dir) - .ok_or(RpcCustomError::NoSnapshot)?; + let full_snapshot_slot = snapshot_utils::get_highest_full_snapshot_archive_slot( + &full_snapshot_archives_dir, + None, + ) + .ok_or(RpcCustomError::NoSnapshot)?; let incremental_snapshot_slot = snapshot_utils::get_highest_incremental_snapshot_archive_slot( &incremental_snapshot_archives_dir, full_snapshot_slot, + None, ); Ok(RpcSnapshotSlotInfo { @@ -3278,13 +3286,167 @@ pub mod rpc_accounts { } } +pub mod utils { + use { + crate::rpc::{encode_account, sanitize_transaction, verify_pubkey}, + jsonrpc_core::Error, + solana_account_decoder::{UiAccount, UiAccountEncoding}, + solana_client::{ + rpc_config::{RpcSimulateBundleConfig, RpcSimulateTransactionAccountsConfig}, + rpc_response::{ + RpcBundleSimulationSummary, RpcSimulateBundleResult, + RpcSimulateBundleTransactionResult, + }, + }, + solana_runtime::bank::{ + AccountData, Bank, BundleSimulationResult, BundleSimulationSummary, + }, + solana_sdk::{ + pubkey::Pubkey, + transaction::{SanitizedTransaction, VersionedTransaction}, + }, + }; + + pub type BundleSimulationParams = ( + Vec, + Vec>>, + Vec>>, + ); + + fn try_build_pubkeys_from_config( + maybe_config: &Option, + sanitized_tx: &SanitizedTransaction, + ) -> Result>, Error> { + if let Some(config) = maybe_config { + if config.addresses.len() > sanitized_tx.message().account_keys().len() { + return Err(Error::invalid_params( + "too many pre execution addresses requested", + )); + } + + Ok(Some( + config + .addresses + .iter() + .map(|address_string| verify_pubkey(address_string)) + .collect::, Error>>()?, + )) + } else { + Ok(None) + } + } + + pub fn build_simulate_bundle_params( + txs_and_configs: Vec<( + VersionedTransaction, + Option, + Option, + )>, + bank: &Bank, + ) -> Result { + let mut sanitized_txs = Vec::with_capacity(txs_and_configs.len()); + let mut pre_accounts = Vec::with_capacity(txs_and_configs.len()); + let mut post_accounts = Vec::with_capacity(txs_and_configs.len()); + + for (tx, pre_cfg, post_cfg) in txs_and_configs { + let sanitized_tx = sanitize_transaction(tx, bank)?; + pre_accounts.push(try_build_pubkeys_from_config(&pre_cfg, &sanitized_tx)?); + post_accounts.push(try_build_pubkeys_from_config(&post_cfg, &sanitized_tx)?); + sanitized_txs.push(sanitized_tx); + } + + Ok((sanitized_txs, pre_accounts, post_accounts)) + } + + fn try_encode_accounts( + accounts: Option>, + encoding: Option, + ) -> Result>, Error> { + if let Some(accounts) = accounts { + let encoding = encoding.unwrap_or(UiAccountEncoding::Base64); + Ok(Some( + accounts + .iter() + .map(|a| encode_account(&a.data, &a.pubkey, encoding, None)) + .collect::, Error>>()?, + )) + } else { + Ok(None) + } + } + + /// create a [RpcSimulateBundleResult] from a given bank [BundleSimulationResult] + pub fn rpc_bundle_result_from_bank_result( + bank_result: BundleSimulationResult, + rpc_config: RpcSimulateBundleConfig, + ) -> Result { + let BundleSimulationResult { + ref summary, + ref transaction_results, + } = bank_result; + + let summary = match summary.clone() { + BundleSimulationSummary::Failed { + error, + tx_signature, + } => RpcBundleSimulationSummary::Failed { + error, + tx_signature: tx_signature.to_string(), + }, + BundleSimulationSummary::Succeeded => RpcBundleSimulationSummary::Succeeded, + }; + + let mut transaction_results = Vec::with_capacity(transaction_results.len()); + for (i, res) in bank_result.transaction_results.into_iter().enumerate() { + let logs = if res.logs.is_empty() { + None + } else { + Some(res.logs) + }; + + transaction_results.push(RpcSimulateBundleTransactionResult { + err: res.result.err(), + logs, + pre_execution_accounts: try_encode_accounts( + res.pre_execution_accounts, + rpc_config + .pre_execution_accounts_configs + .get(i) + .cloned() + .unwrap_or_default() + .and_then(|c| c.encoding), + )?, + post_execution_accounts: try_encode_accounts( + res.post_execution_accounts, + rpc_config + .post_execution_accounts_configs + .get(i) + .cloned() + .unwrap_or_default() + .and_then(|c| c.encoding), + )?, + units_consumed: Some(res.units_consumed), + return_data: res.return_data.map(|d| d.into()), + }); + } + + Ok(RpcSimulateBundleResult { + summary, + transaction_results, + }) + } +} // Full RPC interface that an API node is expected to provide // (rpc_minimal should also be provided by an API node) pub mod rpc_full { use { super::*, + crate::rpc::utils::{build_simulate_bundle_params, rpc_bundle_result_from_bank_result}, + itertools::izip, + solana_runtime::bank::SimulateBundleError, solana_sdk::message::{SanitizedVersionedMessage, VersionedMessage}, }; + #[rpc] pub trait Full { type Metadata; @@ -3346,6 +3508,14 @@ pub mod rpc_full { config: Option, ) -> Result>; + #[rpc(meta, name = "simulateBundle")] + fn simulate_bundle( + &self, + meta: Self::Metadata, + rpc_bundle_request: RpcBundleRequest, + config: Option, + ) -> Result>; + #[rpc(meta, name = "minimumLedgerSlot")] fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result; @@ -3439,6 +3609,14 @@ pub mod rpc_full { ) -> Result>; } + fn jsonrpc_error_from_simulate_bundle_error(e: SimulateBundleError) -> Error { + match e { + SimulateBundleError::AccountNotFoundInBank(pubkey) => { + Error::invalid_params(format!("account {:?} not found in bank", pubkey)) + } + } + } + pub struct FullImpl; impl Full for FullImpl { type Metadata = JsonRpcRequestProcessor; @@ -3645,7 +3823,6 @@ pub mod rpc_full { commitment: preflight_commitment, min_context_slot, })?; - let transaction = sanitize_transaction(unsanitized_tx, preflight_bank)?; let signature = *transaction.signature(); @@ -3836,6 +4013,97 @@ pub mod rpc_full { )) } + fn simulate_bundle( + &self, + meta: Self::Metadata, + rpc_bundle_request: RpcBundleRequest, + config: Option, + ) -> Result> { + debug!("simulate_bundle rpc request received"); + + let config = config.unwrap_or_else(|| RpcSimulateBundleConfig { + pre_execution_accounts_configs: vec![ + None; + rpc_bundle_request.encoded_transactions.len() + ], + post_execution_accounts_configs: vec![ + None; + rpc_bundle_request.encoded_transactions.len() + ], + ..RpcSimulateBundleConfig::default() + }); + + // Run some request validations + if !(config.pre_execution_accounts_configs.len() + == rpc_bundle_request.encoded_transactions.len() + && config.post_execution_accounts_configs.len() + == rpc_bundle_request.encoded_transactions.len()) + { + return Err(Error::invalid_params( + "pre/post_execution_accounts_configs must be equal in length to the number of transactions", + )); + } + + let bank = match config.simulation_bank.unwrap_or_default() { + SimulationSlotConfig::Commitment(commitment) => Ok(meta.bank(Some(commitment))), + SimulationSlotConfig::Slot(slot) => meta.bank_from_slot(slot).ok_or_else(|| { + Error::invalid_params(format!("bank not found for the provided slot: {}", slot)) + }), + SimulationSlotConfig::Tip => Ok(meta.bank_forks.read().unwrap().working_bank()), + }?; + + let tx_encoding = config + .transaction_encoding + .unwrap_or(UiTransactionEncoding::Base64); + let binary_encoding = tx_encoding.into_binary_encoding().ok_or_else(|| { + Error::invalid_params(format!( + "Unsupported encoding: {}. Supported encodings are: base58 & base64", + tx_encoding + )) + })?; + let mut decoded_transactions = rpc_bundle_request + .encoded_transactions + .into_iter() + .map(|encoded_tx| { + decode_and_deserialize::(encoded_tx, binary_encoding) + .map(|de| de.1) + }) + .collect::>>()?; + + if config.replace_recent_blockhash { + if !config.skip_sig_verify { + return Err(Error::invalid_params( + "sigVerify may not be used with replaceRecentBlockhash", + )); + } + decoded_transactions.iter_mut().for_each(|tx| { + tx.message.set_recent_blockhash(bank.last_blockhash()); + }); + } + + let zipped = izip!( + decoded_transactions, + config.pre_execution_accounts_configs.clone(), + config.post_execution_accounts_configs.clone(), + ); + let (sanitized_txs, pre_execution_pks, post_execution_pks) = + build_simulate_bundle_params(zipped.collect(), &*bank)?; + + if !config.skip_sig_verify { + for tx in &sanitized_txs { + verify_transaction(tx, &bank.feature_set)?; + } + } + + let bank_result = bank + .simulate_bundle(sanitized_txs, pre_execution_pks, post_execution_pks) + .map_err(jsonrpc_error_from_simulate_bundle_error)?; + + let rpc_bundle_result = rpc_bundle_result_from_bank_result(bank_result, config)?; + + Ok(new_response(&*bank, rpc_bundle_result)) + } + fn minimum_ledger_slot(&self, meta: Self::Metadata) -> Result { debug!("minimum_ledger_slot rpc request received"); meta.minimum_ledger_slot() @@ -4139,6 +4407,7 @@ pub mod rpc_deprecated_v1_9 { .and_then(|snapshot_config| { snapshot_utils::get_highest_full_snapshot_archive_slot( &snapshot_config.full_snapshot_archives_dir, + None, ) }) .ok_or_else(|| RpcCustomError::NoSnapshot.into()) @@ -5786,6 +6055,145 @@ pub mod tests { assert_eq!(result.len(), 0); } + #[test] + fn test_rpc_simulate_bundle_happy_path() { + // 1. setup + let rpc = RpcHandler::start(); + let bank = rpc.working_bank(); + + let recent_blockhash = bank.confirmed_last_blockhash(); + let RpcHandler { + ref meta, ref io, .. + } = rpc; + + let data_len = 100; + let lamports = bank.get_minimum_balance_for_rent_exemption(data_len); + let leader_pubkey = solana_sdk::pubkey::new_rand(); + let leader_account_data = AccountSharedData::new(lamports, data_len, &system_program::id()); + bank.store_account(&leader_pubkey, &leader_account_data); + bank.freeze(); + + // 2. build bundle + + // let's pretend the RPC keypair is a searcher + let searcher_keypair = rpc.mint_keypair; + + // create tip tx + let tip_amount = 10000; + let tip_tx = VersionedTransaction::from(system_transaction::transfer( + &searcher_keypair, + &leader_pubkey, + tip_amount, + recent_blockhash, + )); + + // some random mev tx + let mev_amount = 20000; + let goku_pubkey = solana_sdk::pubkey::new_rand(); + let mev_tx = VersionedTransaction::from(system_transaction::transfer( + &searcher_keypair, + &goku_pubkey, + mev_amount, + recent_blockhash, + )); + + let encoded_mev_tx = base64::encode(serialize(&mev_tx).unwrap()); + let encoded_tip_tx = base64::encode(serialize(&tip_tx).unwrap()); + + let b64_data = base64::encode(leader_account_data.data()); + + // 3. test and assert + let skip_sig_verify = true; + let replace_recent_blockhash = false; + let expected_response = json!({ + "jsonrpc": "2.0", + "result": { + "context": {"slot": bank.slot(), "apiVersion": RpcApiVersion::default()}, + "value":{ + "summary": "succeeded", + "transactionResults": [ + { + "err": null, + "logs": ["Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success"], + "returnData": null, + "unitsConsumed": 0, + "postExecutionAccounts": [], + "preExecutionAccounts": [ + { + "data": [b64_data, "base64"], + "executable": false, + "lamports": leader_account_data.lamports(), + "owner": "11111111111111111111111111111111", + "rentEpoch": 0, + } + ], + }, + { + "err": null, + "logs": ["Program 11111111111111111111111111111111 invoke [1]", "Program 11111111111111111111111111111111 success"], + "returnData": null, + "unitsConsumed": 0, + "preExecutionAccounts": [], + "postExecutionAccounts": [ + { + "data": [b64_data, "base64"], + "executable": false, + "lamports": leader_account_data.lamports() + tip_amount, + "owner": "11111111111111111111111111111111", + "rentEpoch": 0, + } + ], + }, + ], + } + }, + "id": 1, + }); + + let request = format!( + r#"{{"jsonrpc":"2.0", + "id":1, + "method":"simulateBundle", + "params":[ + {{ + "encodedTransactions": ["{}", "{}"] + }}, + {{ + "skipSigVerify": {}, + "replaceRecentBlockhash": {}, + "slot": {}, + "preExecutionAccountsConfigs": [ + {{ "encoding": "base64", "addresses": ["{}"] }}, + {{ "encoding": "base64", "addresses": [] }} + ], + "postExecutionAccountsConfigs": [ + {{ "encoding": "base64", "addresses": [] }}, + {{ "encoding": "base64", "addresses": ["{}"] }} + ] + }} + ] + }}"#, + encoded_mev_tx, + encoded_tip_tx, + skip_sig_verify, + replace_recent_blockhash, + bank.slot(), + leader_pubkey, + leader_pubkey, + ); + + let actual_response = io + .handle_request_sync(&request, meta.clone()) + .expect("response"); + + let expected_response = serde_json::from_value::(expected_response) + .expect("expected_response deserialization"); + let actual_response = serde_json::from_str::(&actual_response) + .expect("actual_response deserialization"); + + assert_eq!(expected_response, actual_response); + } + #[test] fn test_rpc_simulate_transaction() { let rpc = RpcHandler::start(); @@ -6370,7 +6778,6 @@ pub mod tests { Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, )); - let tpu_address = cluster_info.my_contact_info().tpu; let (meta, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, @@ -6379,7 +6786,7 @@ pub mod tests { blockstore, validator_exit, health.clone(), - cluster_info, + cluster_info.clone(), Hash::default(), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -6392,7 +6799,7 @@ pub mod tests { ); let connection_cache = Arc::new(ConnectionCache::default()); SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, @@ -6643,7 +7050,6 @@ pub mod tests { Arc::new(Keypair::new()), SocketAddrSpace::Unspecified, )); - let tpu_address = cluster_info.my_contact_info().tpu; let (request_processor, receiver) = JsonRpcRequestProcessor::new( JsonRpcConfig::default(), None, @@ -6652,7 +7058,7 @@ pub mod tests { blockstore, validator_exit, RpcHealth::stub(), - cluster_info, + cluster_info.clone(), Hash::default(), None, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -6665,7 +7071,7 @@ pub mod tests { ); let connection_cache = Arc::new(ConnectionCache::default()); SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index abf30c83fe..3b7406b971 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -256,6 +256,7 @@ impl RequestMiddleware for RpcRequestMiddleware { let full_snapshot_archive_info = snapshot_utils::get_highest_full_snapshot_archive_info( &snapshot_config.full_snapshot_archives_dir, + None, ); let snapshot_archive_info = if let Some(full_snapshot_archive_info) = full_snapshot_archive_info { @@ -265,6 +266,7 @@ impl RequestMiddleware for RpcRequestMiddleware { snapshot_utils::get_highest_incremental_snapshot_archive_info( &snapshot_config.incremental_snapshot_archives_dir, full_snapshot_archive_info.slot(), + None, ) .map(|incremental_snapshot_archive_info| { incremental_snapshot_archive_info @@ -379,8 +381,6 @@ impl JsonRpcService { LARGEST_ACCOUNTS_CACHE_DURATION, ))); - let tpu_address = cluster_info.my_contact_info().tpu; - // sadly, some parts of our current rpc implemention block the jsonrpc's // _socket-listening_ event loop for too long, due to (blocking) long IO or intesive CPU, // causing no further processing of incoming requests and ultimatily innocent clients timing-out. @@ -474,7 +474,7 @@ impl JsonRpcService { let leader_info = poh_recorder.map(|recorder| ClusterTpuInfo::new(cluster_info.clone(), recorder)); let _send_transaction_service = Arc::new(SendTransactionService::new_with_config( - tpu_address, + cluster_info, &bank_forks, leader_info, receiver, diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 0ec9549b01..05eb7d0e2b 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -1096,15 +1096,21 @@ impl Accounts { account_locks: &mut AccountLocks, writable_keys: Vec<&Pubkey>, readonly_keys: Vec<&Pubkey>, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> Result<()> { for k in writable_keys.iter() { - if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) { + if account_locks.is_locked_write(k) + || account_locks.is_locked_readonly(k) + || additional_write_locks.contains(k) + || additional_read_locks.contains(k) + { debug!("Writable account in use: {:?}", k); return Err(TransactionError::AccountInUse); } } for k in readonly_keys.iter() { - if account_locks.is_locked_write(k) { + if account_locks.is_locked_write(k) || additional_write_locks.contains(k) { debug!("Read-only account in use: {:?}", k); return Err(TransactionError::AccountInUse); } @@ -1166,7 +1172,23 @@ impl Accounts { let tx_account_locks_results: Vec> = txs .map(|tx| tx.get_account_locks(tx_account_lock_limit)) .collect(); - self.lock_accounts_inner(tx_account_locks_results) + self.lock_accounts_inner( + tx_account_locks_results, + &HashSet::default(), + &HashSet::default(), + ) + } + + pub fn lock_accounts_sequential_with_results<'a>( + &self, + txs: impl Iterator, + tx_account_lock_limit: usize, + account_locks_override: Option>, + ) -> Vec> { + let tx_account_locks_results: Vec> = txs + .map(|tx| tx.get_account_locks(tx_account_lock_limit)) + .collect(); + self.lock_accounts_sequential_inner(tx_account_locks_results, account_locks_override) } #[must_use] @@ -1176,6 +1198,8 @@ impl Accounts { txs: impl Iterator, results: impl Iterator>, tx_account_lock_limit: usize, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> Vec> { let tx_account_locks_results: Vec> = txs .zip(results) @@ -1184,13 +1208,19 @@ impl Accounts { Err(err) => Err(err.clone()), }) .collect(); - self.lock_accounts_inner(tx_account_locks_results) + self.lock_accounts_inner( + tx_account_locks_results, + additional_read_locks, + additional_write_locks, + ) } #[must_use] fn lock_accounts_inner( &self, tx_account_locks_results: Vec>, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> Vec> { let account_locks = &mut self.account_locks.lock().unwrap(); tx_account_locks_results @@ -1200,12 +1230,51 @@ impl Accounts { account_locks, tx_account_locks.writable, tx_account_locks.readonly, + additional_read_locks, + additional_write_locks, ), Err(err) => Err(err), }) .collect() } + #[must_use] + fn lock_accounts_sequential_inner( + &self, + tx_account_locks_results: Vec>, + account_locks_override: Option>, + ) -> Vec> { + let mut l_account_locks = if let Some(ref account_locks) = account_locks_override { + account_locks.lock().unwrap() + } else { + self.account_locks.lock().unwrap() + }; + + let mut account_in_use_set = false; + tx_account_locks_results + .into_iter() + .map(|tx_account_locks_result| match tx_account_locks_result { + Ok(tx_account_locks) => match account_in_use_set { + true => Err(TransactionError::BundleNotContinuous), + false => { + let locked = self.lock_account( + &mut l_account_locks, + tx_account_locks.writable, + tx_account_locks.readonly, + &HashSet::default(), + &HashSet::default(), + ); + if matches!(locked, Err(TransactionError::AccountInUse)) { + account_in_use_set = true; + } + locked + } + }, + Err(err) => Err(err), + }) + .collect() + } + /// Once accounts are unlocked, new transactions that modify that state can enter the pipeline #[allow(clippy::needless_collect)] pub fn unlock_accounts<'a>( @@ -1218,6 +1287,7 @@ impl Accounts { .filter_map(|(tx, res)| match res { Err(TransactionError::AccountLoadedTwice) | Err(TransactionError::AccountInUse) + | Err(TransactionError::BundleNotContinuous) | Err(TransactionError::SanitizeFailure) | Err(TransactionError::TooManyAccountLocks) | Err(TransactionError::WouldExceedMaxBlockCostLimit) @@ -1249,7 +1319,7 @@ impl Accounts { lamports_per_signature: u64, preserve_rent_epoch_for_rent_exempt_accounts: bool, ) { - let (accounts_to_store, txn_signatures) = self.collect_accounts_to_store( + let (accounts_to_store, txn_signatures) = Self::collect_accounts_to_store( txs, res, loaded, @@ -1275,8 +1345,7 @@ impl Accounts { } #[allow(clippy::too_many_arguments)] - fn collect_accounts_to_store<'a>( - &self, + pub fn collect_accounts_to_store<'a>( txs: &'a [SanitizedTransaction], execution_results: &'a [TransactionExecutionResult], load_results: &'a mut [TransactionLoadResult], @@ -1481,6 +1550,7 @@ mod tests { sync::atomic::{AtomicBool, AtomicU64, Ordering}, thread, time, }, + Accounts, }; fn new_sanitized_tx( @@ -2878,6 +2948,8 @@ mod tests { txs.iter(), qos_results.iter(), MAX_TX_ACCOUNT_LOCKS, + &HashSet::default(), + &HashSet::default(), ); assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times @@ -3002,7 +3074,7 @@ mod tests { } let txs = vec![tx0, tx1]; let execution_results = vec![new_execution_result(Ok(()), None); 2]; - let (collected_accounts, txn_signatures) = accounts.collect_accounts_to_store( + let (collected_accounts, txn_signatures) = Accounts::collect_accounts_to_store( &txs, &execution_results, loaded.as_mut_slice(), @@ -3471,7 +3543,7 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts = Accounts::new_with_config_for_tests( + let _accounts = Accounts::new_with_config_for_tests( Vec::new(), &ClusterType::Development, AccountSecondaryIndexes::default(), @@ -3486,7 +3558,7 @@ mod tests { )), nonce.as_ref(), )]; - let (collected_accounts, _) = accounts.collect_accounts_to_store( + let (collected_accounts, _) = Accounts::collect_accounts_to_store( &txs, &execution_results, loaded.as_mut_slice(), @@ -3586,7 +3658,7 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts = Accounts::new_with_config_for_tests( + let _accounts = Accounts::new_with_config_for_tests( Vec::new(), &ClusterType::Development, AccountSecondaryIndexes::default(), @@ -3601,7 +3673,7 @@ mod tests { )), nonce.as_ref(), )]; - let (collected_accounts, _) = accounts.collect_accounts_to_store( + let (collected_accounts, _) = Accounts::collect_accounts_to_store( &txs, &execution_results, loaded.as_mut_slice(), diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6938379547..e2d565a0dc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -40,7 +40,7 @@ use { crate::{ account_overrides::AccountOverrides, accounts::{ - AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, + AccountAddressFilter, AccountLocks, Accounts, LoadedTransaction, PubkeyAccountSlot, TransactionLoadResult, }, accounts_db::{ @@ -96,6 +96,7 @@ use { AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount, }, account_utils::StateMut, + bundle::{error::BundleExecutionError, utils::check_bundle_lock_results}, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY, @@ -151,22 +152,25 @@ use { std::{ borrow::Cow, cell::RefCell, + cmp::min, collections::{HashMap, HashSet}, convert::{TryFrom, TryInto}, fmt, mem, ops::{Deref, RangeInclusive}, path::PathBuf, rc::Rc, + result, sync::{ atomic::{ AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering::{AcqRel, Acquire, Relaxed}, }, - Arc, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, + Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, }, thread::Builder, time::{Duration, Instant}, }, + thiserror::Error, }; /// params to `verify_bank_hash` @@ -282,7 +286,7 @@ impl RentDebits { } pub type BankStatusCache = StatusCache>; -#[frozen_abi(digest = "HEJXoycXvGT2pwMuKcUKzzbeemnqbfrUC4jHZx1ncaWv")] +#[frozen_abi(digest = "HbqqQzXjZZmiYmUzuTtuLpSkQmxZu5ugjHunda9sJk8t")] pub type BankSlotDelta = SlotDelta>; // Eager rent collection repeats in cyclic manner. @@ -421,8 +425,40 @@ impl TransactionExecutionResult { Self::NotExecuted(err) => Err(err.clone()), } } + + /// Return an Error if a transaction was executed and reverted + /// NOTE: `execution_results` are zipped with `sanitized_txs` so it's expected a sanitized tx at + /// position i has a corresponding execution result at position i within the `execution_results` + /// slice + pub fn check_bundle_execution_results<'a>( + execution_results: &[TransactionExecutionResult], + sanitized_txs: &'a [SanitizedTransaction], + ) -> result::Result<(), (BundleExecutionError, &'a Signature)> { + for (exec_results, sanitized_tx) in execution_results.iter().zip(sanitized_txs) { + match exec_results { + TransactionExecutionResult::Executed { + details, + executors: _, + } => { + if let Err(e) = &details.status { + return Err((e.clone().into(), sanitized_tx.signature())); + } + } + TransactionExecutionResult::NotExecuted(e) => { + if !matches!( + e, + TransactionError::AccountInUse | TransactionError::BundleNotContinuous + ) { + return Err((e.clone().into(), sanitized_tx.signature())); + } + } + } + } + Ok(()) + } } +#[derive(Debug)] pub struct LoadAndExecuteTransactionsOutput { pub loaded_transactions: Vec, // Vector of results indicating whether a transaction was executed or could not @@ -462,6 +498,45 @@ impl DurableNonceFee { } } +#[derive(Debug, Clone, PartialEq)] +pub enum BundleSimulationSummary { + // error and transaction signature responsible + Failed { + error: BundleExecutionError, + tx_signature: Signature, + }, + Succeeded, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct AccountData { + pub pubkey: Pubkey, + pub data: AccountSharedData, +} + +#[derive(Clone)] +pub struct BundleSimulationResult { + /// Gives high level summary of bundle. + pub summary: BundleSimulationSummary, + pub transaction_results: Vec, +} + +#[derive(Error, Debug)] +pub enum SimulateBundleError { + #[error("account missing from bank: {0}")] + AccountNotFoundInBank(Pubkey), +} + +#[derive(Clone)] +pub struct BundleTransactionSimulationResult { + pub result: Result<()>, + pub logs: TransactionLogMessages, + pub pre_execution_accounts: Option>, + pub post_execution_accounts: Option>, + pub return_data: Option, + pub units_consumed: u64, +} + pub struct TransactionSimulationResult { pub result: Result<()>, pub logs: TransactionLogMessages, @@ -469,6 +544,7 @@ pub struct TransactionSimulationResult { pub units_consumed: u64, pub return_data: Option, } + pub struct TransactionBalancesSet { pub pre_balances: TransactionBalances, pub post_balances: TransactionBalances, @@ -1022,7 +1098,7 @@ pub struct Bank { inflation: Arc>, /// cache of vote_account and stake_account state for this fork - stakes_cache: StakesCache, + pub stakes_cache: StakesCache, /// staked nodes on epoch boundaries, saved off when a bank.slot() is at /// a leader schedule calculation boundary @@ -3717,12 +3793,32 @@ impl Bank { &'a self, transactions: &'b [SanitizedTransaction], transaction_results: impl Iterator>, + additional_read_locks: &HashSet, + additional_write_locks: &HashSet, ) -> TransactionBatch<'a, 'b> { - // this lock_results could be: Ok, AccountInUse, WouldExceedBlockMaxLimit or WouldExceedAccountMaxLimit let lock_results = self.rc.accounts.lock_accounts_with_results( transactions.iter(), transaction_results, self.get_transaction_account_lock_limit(), + additional_read_locks, + additional_write_locks, + ); + TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) + } + + /// Prepare a locked transaction batch from a list of sanitized transactions, and their cost + /// limited packing status, where transactions will be locked sequentially until the first failure + pub fn prepare_sequential_sanitized_batch_with_results<'a, 'b>( + &'a self, + transactions: &'b [SanitizedTransaction], + // For use cases where you don't want to actually lock the accounts, for example when simulating. + account_locks_override: Option>, + ) -> TransactionBatch<'a, 'b> { + // this lock_results could be: Ok, AccountInUse, BundleNotContinuous, AccountLoadedTwice, or TooManyAccountLocks + let lock_results = self.rc.accounts.lock_accounts_sequential_with_results( + transactions.iter(), + self.get_transaction_account_lock_limit(), + account_locks_override, ); TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) } @@ -3741,6 +3837,238 @@ impl Bank { batch } + /// Run bundles against a frozen bank without committing the results and return [BundleSimulationResult]. + /// Client has the option to request pre/post execution results on a per-transaction basis. + /// + /// For example given: + /// + /// Bundle: [T0{A, B, C}, T1{D}, T2{E, A, C}, T3{D, F}] + /// Requested Pre-Execution Accounts: [None, [A, D], [B], [A, C, F]] + /// Requested Post-Execution Accounts: [None, [D], None, [A, B, F]] + /// + /// It is expected that the following is returned: + /// Returned Pre-Execution Accounts: [None, [T0(A), D], [T0(B)], [T0(T2(A)), T0(T2(C)), F]] + /// Returned Post-Execution Accounts: [None, [T1(D)], None, [T0(T2(A), T0(B), T3(F)]] + pub fn simulate_bundle( + &self, + bundle: Vec, + pre_execution_accounts_requested: Vec>>, + post_execution_accounts_requested: Vec>>, + ) -> result::Result { + assert_eq!(pre_execution_accounts_requested.len(), bundle.len()); + assert_eq!(post_execution_accounts_requested.len(), bundle.len()); + + // Used to cache account data in between batch execution iterations + let mut account_overrides = AccountOverrides::default(); + + let mut pre_execution_accounts_return_data = + Vec::with_capacity(pre_execution_accounts_requested.len()); + let mut post_execution_accounts_return_data = + Vec::with_capacity(post_execution_accounts_requested.len()); + let mut transaction_results = Vec::with_capacity(bundle.len()); + + let mut timings = ExecuteTimings::default(); + let mut chunk_start = 0; + while chunk_start != bundle.len() { + let chunk_end = min(bundle.len(), chunk_start + 128); + let chunk = &bundle[chunk_start..chunk_end]; + + let account_locks_override = Mutex::new(AccountLocks::default()); + let batch = self.prepare_sequential_sanitized_batch_with_results( + chunk, + Some(account_locks_override), + ); + + // check if any error + if let Some((error, failed_tx_idx)) = check_bundle_lock_results(batch.lock_results()) { + transaction_results.extend(vec![ + BundleTransactionSimulationResult { + result: Err(TransactionError::SkippedExecution), + logs: vec![], + pre_execution_accounts: None, + post_execution_accounts: None, + return_data: None, + units_consumed: 0, + }; + bundle.len() - chunk_start + ]); + + let mut res = transaction_results + .get_mut(failed_tx_idx + chunk_start) + .unwrap(); + res.result = Err(error.clone()); + + let failed_tx = &batch.sanitized_transactions()[failed_tx_idx]; + return Ok(BundleSimulationResult { + summary: BundleSimulationSummary::Failed { + error: error.into(), + tx_signature: *failed_tx.signature(), + }, + transaction_results, + }); + } + + // Set chunk_end to its true value i.e. the first occurrence of an acceptable lock error. + let chunk_end = match batch.lock_results().iter().position(|res| res.is_err()) { + Some(err_idx) => chunk_start + err_idx, + None => chunk_end, + }; + // Load the accounts requested by caller for current chunk of transactions prior to executing. + let pre_execution_accounts = &pre_execution_accounts_requested[chunk_start..chunk_end]; + for maybe_accounts in pre_execution_accounts { + if let Some(accounts) = maybe_accounts { + let mut pre_accounts = Vec::with_capacity(accounts.len()); + + for pubkey in accounts { + let data = if let Some(data) = account_overrides.get(pubkey).cloned() { + Ok(data) + } else { + self.get_account(pubkey) + .ok_or(SimulateBundleError::AccountNotFoundInBank(*pubkey)) + }?; + pre_accounts.push(AccountData { + pubkey: *pubkey, + data, + }); + } + + pre_execution_accounts_return_data.push(Some(pre_accounts)) + } else { + pre_execution_accounts_return_data.push(None); + } + } + + // Execute the transaction! + let LoadAndExecuteTransactionsOutput { + mut loaded_transactions, + execution_results, + .. + } = self.load_and_execute_transactions( + &batch, + // After simulation, transactions will need to be forwarded to the leader + // for processing. During forwarding, the transaction could expire if the + // delay is not accounted for. + MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY, + false, + true, + true, + &mut timings, + Some(&account_overrides), + None, + ); + + // Load account data for successful txs in current batch and store them to the overrides/cache. + let post_loaded_accounts = self + .collect_accounts_to_store( + batch.sanitized_transactions(), + &execution_results, + &mut loaded_transactions, + ) + .into_iter() + .map(|(pubkey, data)| { + account_overrides.set_account(pubkey, Some(data.clone())); + (pubkey, data) + }) + .collect::>(); + + // We know `transactions[chunk_start..chunk_end]` succeeded, so fetch the corresponding requested pubkeys. + // e.g. given Bundle: [T0{A, B}, T1{B, C}, T2{E, F}] and Post Execution Accounts: [None, [A, B], [E]] + // where current chunk is (1..3) then we load up [[A, B], [E]] + let post_execution_accounts = + &post_execution_accounts_requested[chunk_start..chunk_end]; + for maybe_accounts in post_execution_accounts { + if let Some(accounts) = maybe_accounts { + let mut post_accounts = Vec::with_capacity(accounts.len()); + for pubkey in accounts { + let maybe_data = + if let Some(data) = post_loaded_accounts.get(pubkey).cloned() { + Some(data.clone()) + } else { + account_overrides.get(pubkey).cloned() + }; + if let Some(data) = maybe_data { + post_accounts.push(AccountData { + pubkey: *pubkey, + data: data.clone(), + }); + } + } + + post_execution_accounts_return_data.push(Some(post_accounts)) + } else { + post_execution_accounts_return_data.push(None); + } + } + + let simulation_results = loaded_transactions.iter().zip(&execution_results[..]).map( + |(loaded_tx_result, exec_result)| { + Self::build_transaction_simulation_result(loaded_tx_result, exec_result) + }, + ); + + // save the transaction results + for (offset, tx_result) in simulation_results.enumerate() { + let position = offset + chunk_start; + if position == chunk_end { + break; + } + + transaction_results.push(BundleTransactionSimulationResult { + result: tx_result.result, + logs: tx_result.logs, + pre_execution_accounts: pre_execution_accounts_return_data + .get(position) + .cloned() + .unwrap_or_default(), + post_execution_accounts: post_execution_accounts_return_data + .get(position) + .cloned() + .unwrap_or_default(), + return_data: tx_result.return_data, + units_consumed: tx_result.units_consumed, + }); + } + + if let Err((error, tx_signature)) = + TransactionExecutionResult::check_bundle_execution_results( + &execution_results[..], + batch.sanitized_transactions(), + ) + { + // fill the result of the vector with [SkippedExecution] if any txs left over + transaction_results.extend(vec![ + BundleTransactionSimulationResult { + result: Err(TransactionError::SkippedExecution), + logs: vec![], + pre_execution_accounts: None, + post_execution_accounts: None, + return_data: None, + units_consumed: 0, + }; + bundle.len() - chunk_end + ]); + + return Ok(BundleSimulationResult { + summary: BundleSimulationSummary::Failed { + error, + tx_signature: *tx_signature, + }, + transaction_results, + }); + } + + // Welcome to Rust & Solana where we optimize for performance over readability! + // Remember chunk_end was updated above based on whether or not there was the + // batch was not continuous. + chunk_start = chunk_end; + } + + Ok(BundleSimulationResult { + summary: BundleSimulationSummary::Succeeded, + transaction_results, + }) + } + /// Run transactions against a frozen bank without committing the results pub fn simulate_transaction( &self, @@ -3758,14 +4086,13 @@ impl Bank { transaction: SanitizedTransaction, ) -> TransactionSimulationResult { let account_keys = transaction.message().account_keys(); - let number_of_accounts = account_keys.len(); let account_overrides = self.get_account_overrides_for_simulation(&account_keys); let batch = self.prepare_simulation_batch(transaction); let mut timings = ExecuteTimings::default(); let LoadAndExecuteTransactionsOutput { loaded_transactions, - mut execution_results, + execution_results, .. } = self.load_and_execute_transactions( &batch, @@ -3781,43 +4108,42 @@ impl Bank { None, ); - let post_simulation_accounts = loaded_transactions - .into_iter() - .next() - .unwrap() - .0 - .ok() - .map(|loaded_transaction| { - loaded_transaction - .accounts - .into_iter() - .take(number_of_accounts) - .collect::>() - }) - .unwrap_or_default(); - - let units_consumed = timings - .details - .per_program_timings - .iter() - .fold(0, |acc: u64, (_, program_timing)| { - acc.saturating_add(program_timing.accumulated_units) - }); - - debug!("simulate_transaction: {:?}", timings); + Self::build_transaction_simulation_result(&loaded_transactions[0], &execution_results[0]) + } - let execution_result = execution_results.pop().unwrap(); - let flattened_result = execution_result.flattened_result(); - let (logs, return_data) = match execution_result { + fn build_transaction_simulation_result( + loaded_transaction_result: &TransactionLoadResult, + execution_result: &TransactionExecutionResult, + ) -> TransactionSimulationResult { + let (logs, return_data, units_consumed, result) = match execution_result { TransactionExecutionResult::Executed { details, .. } => { - (details.log_messages, details.return_data) + let log_messages = if let Some(ref log_messages) = details.log_messages { + log_messages.clone() + } else { + vec![] + }; + + ( + log_messages, + details.return_data.as_ref().cloned(), + details.executed_units, + execution_result.flattened_result(), + ) + } + TransactionExecutionResult::NotExecuted(_) => { + (vec![], None, 0, execution_result.flattened_result()) } - TransactionExecutionResult::NotExecuted(_) => (None, None), }; - let logs = logs.unwrap_or_default(); + + let post_simulation_accounts = loaded_transaction_result + .0 + .as_ref() + .ok() + .map(|tx| tx.accounts.clone()) + .unwrap_or_default(); TransactionSimulationResult { - result: flattened_result, + result, logs, post_simulation_accounts, units_consumed, @@ -3993,6 +4319,29 @@ impl Bank { balances } + pub fn collect_balances_with_cache( + &self, + batch: &TransactionBatch, + account_overrides: Option<&AccountOverrides>, + ) -> TransactionBalances { + let mut balances: TransactionBalances = vec![]; + for transaction in batch.sanitized_transactions() { + let mut transaction_balances: Vec = vec![]; + for account_key in transaction.message().account_keys().iter() { + let balance = match account_overrides { + None => self.get_balance(account_key), + Some(overrides) => match overrides.get(account_key) { + None => self.get_balance(account_key), + Some(account_data) => account_data.lamports(), + }, + }; + transaction_balances.push(balance); + } + balances.push(transaction_balances); + } + balances + } + /// Get any cached executors needed by the transaction fn get_executors(&self, accounts: &[TransactionAccount]) -> Rc> { let executable_keys: Vec<_> = accounts @@ -4891,6 +5240,27 @@ impl Bank { } } + pub fn collect_accounts_to_store<'a>( + &self, + txs: &'a [SanitizedTransaction], + res: &'a [TransactionExecutionResult], + loaded: &'a mut [TransactionLoadResult], + ) -> Vec<(&'a Pubkey, &'a AccountSharedData)> { + let (last_blockhash, lamports_per_signature) = + self.last_blockhash_and_lamports_per_signature(); + let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); + Accounts::collect_accounts_to_store( + txs, + res, + loaded, + &self.rent_collector, + &durable_nonce, + lamports_per_signature, + self.preserve_rent_epoch_for_rent_exempt_accounts(), + ) + .0 + } + // Distribute collected rent fees for this slot to staked validators (excluding stakers) // according to stake. // @@ -7917,6 +8287,127 @@ pub(crate) mod tests { } } + fn tx_factory( + readonly_accounts: Vec, + mut writeable_accounts: Vec, + signer_key_pair: Keypair, + ) -> Transaction { + if !writeable_accounts.contains(&signer_key_pair.pubkey()) { + writeable_accounts.insert(0, signer_key_pair.pubkey()); + } + let num_readonly_unsigned_accounts = readonly_accounts.len() as u8; + writeable_accounts.extend(readonly_accounts); + + let message = Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts, + }, + account_keys: writeable_accounts, + recent_blockhash: Hash::default(), + instructions: vec![], + }; + let signature = signer_key_pair.sign_message(&message.serialize()[..]); + + Transaction { + signatures: vec![signature], + message, + } + } + + #[test] + fn test_prepare_sequential_sanitized_batch_with_results_happy_path() { + let (genesis_config, _mint_keypair) = create_genesis_config(10); + let bank = Bank::new_for_tests(&genesis_config); + + // 1. create a bundle of self-conflicting account accesses + // e.g. T0{write-a, write-b}, T1{write-a, read-b, read-c}, T2{read-c, write-d, write-e}, T3{read-e, write-f} + let a = Keypair::new(); + let b = Keypair::new(); + let c = Keypair::new(); + let d = Keypair::new(); + let e = Keypair::new(); + let f = Keypair::new(); + + let tx_0 = tx_factory( + vec![], + vec![a.pubkey(), b.pubkey()], + Keypair::from_base58_string(&*a.to_base58_string()), + ); + let tx_0 = SanitizedTransaction::from_transaction_for_tests(tx_0); + + let tx_1 = tx_factory(vec![b.pubkey(), c.pubkey()], vec![a.pubkey()], a); + let tx_1 = SanitizedTransaction::from_transaction_for_tests(tx_1); + + let tx_2 = tx_factory(vec![c.pubkey()], vec![d.pubkey(), e.pubkey()], d); + let tx_2 = SanitizedTransaction::from_transaction_for_tests(tx_2); + + let tx_3 = tx_factory(vec![e.pubkey()], vec![f.pubkey()], f); + let tx_3 = SanitizedTransaction::from_transaction_for_tests(tx_3); + + // 2. test batches are chunked correctly + let sanitized_txs = vec![tx_0, tx_1, tx_2, tx_3]; + + let expected_next_start = 1; + _test_prepare_sequential_sanitized_batch_with_results( + &bank, + &sanitized_txs, + 0, + sanitized_txs.len(), + Some(expected_next_start), + 1, + ); + + let new_start = expected_next_start; + let expected_next_start = 3; + _test_prepare_sequential_sanitized_batch_with_results( + &bank, + &sanitized_txs, + new_start, + sanitized_txs.len(), + Some(expected_next_start), + 2, + ); + + let new_start = expected_next_start; + _test_prepare_sequential_sanitized_batch_with_results( + &bank, + &sanitized_txs, + new_start, + sanitized_txs.len(), + None, + 1, + ); + } + + fn _test_prepare_sequential_sanitized_batch_with_results( + bank: &Bank, + sanitized_txs: &[SanitizedTransaction], + chunk_start: usize, + chunk_end: usize, + expected_next_start: Option, + expected_okays: usize, + ) { + let account_locks_override = Mutex::new(AccountLocks::default()); + let chunk = &sanitized_txs[chunk_start..chunk_end]; + let batch = bank + .prepare_sequential_sanitized_batch_with_results(chunk, Some(account_locks_override)); + + assert_eq!( + batch + .lock_results() + .iter() + .filter(|res| res.is_ok()) + .count(), + expected_okays + ); + + let first_err_idx = batch.lock_results().iter().position(|res| res.is_err()); + let actual_next_start = first_err_idx.map(|first_err_idx| first_err_idx + chunk_start); + assert_eq!(actual_next_start, expected_next_start); + } + #[test] fn test_nonce_info() { let lamports_per_signature = 42; @@ -8492,6 +8983,585 @@ pub(crate) mod tests { assert_eq!(account, bank.get_account(&pubkey).unwrap()); } + #[test] + #[should_panic] + fn test_simulate_bundle_mismatched_lengths() { + let (genesis_config, _mint_keypair) = create_genesis_config(1_000_000); + let bank = Bank::new_for_tests(&genesis_config); + bank.freeze(); + + let _ = bank.simulate_bundle(vec![], vec![None], vec![None]); + } + + fn setup_system_accounts( + pubkeys: Vec, + lamports: u64, + bank: &Bank, + ) -> Vec { + pubkeys + .iter() + .map(|pk| { + let data = AccountSharedData::new(lamports, 0, &system_program::id()); + bank.store_account(pk, &data); + data + }) + .collect::>() + } + + fn assert_transaction_results( + actual_results: Vec>, + expected_results: Vec>, + ) { + assert_eq!(actual_results.len(), expected_results.len()); + for (i, (actual, expected)) in actual_results.iter().zip(expected_results).enumerate() { + assert_eq!( + actual, + &expected, + "{}", + format_args!("result at index {} did not match", i) + ); + } + } + + fn assert_simulate_bundle_correct_lamports( + actual_account_data: Vec>>, + expected_lamports: Vec>>, + ) { + assert_eq!(actual_account_data.len(), expected_lamports.len()); + + for (i, maybe_actual) in actual_account_data.iter().enumerate() { + if let Some(expected) = expected_lamports[i].clone() { + assert!(maybe_actual.is_some()); + let actual = maybe_actual.clone().unwrap(); + assert_eq!(actual.len(), expected.keys().len()); + + for (pk, lamports) in expected { + let account = actual.iter().find(|acc| acc.pubkey == pk).unwrap(); + assert_eq!(account.data.lamports(), lamports) + } + } else { + assert!(maybe_actual.is_none()); + } + } + } + + /// Tests with a bundle expected to fail due to `check_bundle_lock_results`. None of the transactions + /// should execute due to bad locking behaviour! + /// + /// Bundle: [T0{Faucet, A, B}, T1{Z, C}, T2{Z, C}, T3{Faucet, A, A, B}, T4{Faucet, C}] + /// Requested Pre-Execution Accounts: [[A, B], None, None, [A, B], [C]] + /// Requested Post-Execution Accounts: [[A], [C], None, [A, B], [C]] + /// + /// Expect the following: + /// Returned Pre-Execution Accounts: [None, None, None, None, None] + /// Returned Post-Execution Accounts: [None, None, None, None, None] + #[test] + fn test_simulate_bundle_with_bad_locks() { + let (genesis_config, faucet_keypair) = create_genesis_config(1_000_000); + let bank = Bank::new_for_tests(&genesis_config); + let recent_blockhash = bank.confirmed_last_blockhash(); + + // Setup + let a = solana_sdk::pubkey::new_rand(); + let b = solana_sdk::pubkey::new_rand(); + let c = solana_sdk::pubkey::new_rand(); + let z = Keypair::new(); + let initial_lamports = 100_000; + let _ = setup_system_accounts(vec![a, b, c, z.pubkey()], initial_lamports, &bank); + + bank.freeze(); + + let mut expected_pre_lamports_returned = vec![]; + let mut expected_post_lamports_returned = vec![]; + + // Create the transactions + let to_lamports = &[(a, 100), (b, 1000)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_0 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + expected_post_lamports_returned.push(None); + + let to_lamports = &[(c, 1)]; + let ixs = system_instruction::transfer_many(&z.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&z.pubkey())); + let tx_1 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&z], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + expected_post_lamports_returned.push(None); + + let to_lamports = &[(c, 42069)]; + let ixs = system_instruction::transfer_many(&z.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&z.pubkey())); + let tx_2 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&z], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + expected_post_lamports_returned.push(None); + + let message = Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![faucet_keypair.pubkey(), a, a], + ..Message::default() + }; + let tx_3 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + expected_post_lamports_returned.push(None); + + let to_lamports = &[(c, 3433)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_4 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + expected_post_lamports_returned.push(None); + + // Create params + let pre_execution_accounts = vec![ + Some(vec![a, b]), + None, + None, + Some(vec![a, b]), + Some(vec![c]), + ]; + let post_execution_accounts = vec![ + Some(vec![a]), + Some(vec![c]), + None, + Some(vec![a, b]), + Some(vec![c]), + ]; + let bundle = vec![tx_0, tx_1, tx_2, tx_3.clone(), tx_4]; + + // Do it! + let result = bank + .simulate_bundle( + bundle.clone(), + pre_execution_accounts, + post_execution_accounts, + ) + .unwrap(); + + // Basic assertions + assert_eq!( + result.summary, + BundleSimulationSummary::Failed { + error: BundleExecutionError::TransactionFailure( + TransactionError::AccountLoadedTwice + ), + tx_signature: *tx_3.signature() + } + ); + assert_eq!(result.transaction_results.len(), bundle.len()); + + let expected_reults = vec![ + Err(TransactionError::SkippedExecution), + Err(TransactionError::SkippedExecution), + Err(TransactionError::SkippedExecution), + Err(TransactionError::AccountLoadedTwice), + Err(TransactionError::SkippedExecution), + ]; + let actual_results = result + .transaction_results + .clone() + .into_iter() + .map(|res| res.result) + .collect::>>(); + assert_transaction_results(actual_results, expected_reults); + + let actual_pre_lamports = result + .transaction_results + .clone() + .into_iter() + .map(|res| res.pre_execution_accounts) + .collect::>>>(); + assert_simulate_bundle_correct_lamports( + actual_pre_lamports, + expected_pre_lamports_returned, + ); + + let actual_post_lamports = result + .transaction_results + .into_iter() + .map(|res| res.post_execution_accounts) + .collect::>>>(); + assert_simulate_bundle_correct_lamports( + actual_post_lamports, + expected_post_lamports_returned, + ); + } + + /// Tests with a bundle expected to fail due to failing transaction execution. + /// The first two txs are parallelize and both succeed. T3 fails execution causing + /// all txs in its chunk (T2 & T4) to fail with [TransactionError::SkippedExecution]. + /// + /// Bundle: [T0{Faucet, A, B}, T1{Z, C}, T2{Faucet, C}, T3{Z, A, B}, T4{Faucet, C}] + /// Requested Pre-Execution Accounts: [[A, B], None, [C], [A, B], [C]] + /// Requested Post-Execution Accounts: [[A], [C], None, [A, B], [C]] + /// + /// Expect the following: + /// Returned Pre-Execution Accounts: [[A, B], None, [T1(C)], [T0(A), T0(B)], None] + /// Returned Post-Execution Accounts: [[T0(A)], [T1(C)], None, [T0(A), T0(B)], None] + #[test] + fn test_simulate_bundle_with_failing_tx() { + let (genesis_config, faucet_keypair) = create_genesis_config(1_000_000); + let bank = Bank::new_for_tests(&genesis_config); + let recent_blockhash = bank.confirmed_last_blockhash(); + + // Setup + let a = solana_sdk::pubkey::new_rand(); + let b = solana_sdk::pubkey::new_rand(); + let c = solana_sdk::pubkey::new_rand(); + let z = Keypair::new(); + let initial_lamports = 100_000; + let (pre_a_data, pre_b_data, pre_c_data, pre_z_data) = + match &setup_system_accounts(vec![a, b, c, z.pubkey()], initial_lamports, &bank)[..] { + [pre_a_data, pre_b_data, pre_c_data, pre_z_data] => ( + pre_a_data.clone(), + pre_b_data.clone(), + pre_c_data.clone(), + pre_z_data.clone(), + ), + _ => unreachable!(), + }; + + bank.freeze(); + + let mut expected_pre_lamports_returned = vec![]; + let mut expected_post_lamports_returned = vec![]; + + // Create the transactions + let to_lamports = &[(a, 100), (b, 1000)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_0 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + let mut m = HashMap::new(); + m.insert(a, pre_a_data.lamports()); + m.insert(b, pre_b_data.lamports()); + expected_pre_lamports_returned.push(Some(m)); + let mut m = HashMap::new(); + m.insert(a, pre_a_data.lamports() + 100); + expected_post_lamports_returned.push(Some(m)); + + let to_lamports = &[(c, 1)]; + let ixs = system_instruction::transfer_many(&z.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&z.pubkey())); + let tx_1 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&z], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + let mut m = HashMap::new(); + m.insert(c, pre_c_data.lamports() + 1); + expected_post_lamports_returned.push(Some(m)); + + let to_lamports = &[(c, 42069)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_2 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + let mut m = HashMap::new(); + m.insert(c, pre_c_data.lamports() + 1); + expected_pre_lamports_returned.push(Some(m)); + expected_post_lamports_returned.push(None); + + let to_lamports = &[(c, pre_z_data.lamports() + 1000)]; + let ixs = system_instruction::transfer_many(&z.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&z.pubkey())); + let tx_3 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&z], + message, + recent_blockhash, + )) + .unwrap(); + let mut m = HashMap::new(); + m.insert(a, pre_a_data.lamports() + 100); + m.insert(b, pre_b_data.lamports() + 1000); + expected_pre_lamports_returned.push(Some(m)); + let mut m = HashMap::new(); + m.insert(a, pre_a_data.lamports() + 100); + m.insert(b, pre_b_data.lamports() + 1000); + expected_post_lamports_returned.push(Some(m)); + + let to_lamports = &[(c, 3433)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_4 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + expected_post_lamports_returned.push(None); + + // Create params + let pre_execution_accounts = vec![ + Some(vec![a, b]), + None, + Some(vec![c]), + Some(vec![a, b]), + Some(vec![c]), + ]; + let post_execution_accounts = vec![ + Some(vec![a]), + Some(vec![c]), + None, + Some(vec![a, b]), + Some(vec![c]), + ]; + let bundle = vec![tx_0, tx_1, tx_2, tx_3.clone(), tx_4]; + + // Do it! + let result = bank + .simulate_bundle( + bundle.clone(), + pre_execution_accounts, + post_execution_accounts, + ) + .unwrap(); + + // Basic assertions + assert_eq!( + result.summary, + BundleSimulationSummary::Failed { + error: BundleExecutionError::TransactionFailure( + TransactionError::InstructionError(0, InstructionError::Custom(1)) + ), + tx_signature: *tx_3.signature() + } + ); + assert_eq!(result.transaction_results.len(), bundle.len()); + + let expected_reults = vec![ + Ok(()), + Ok(()), + Ok(()), + Err(TransactionError::InstructionError( + 0, + InstructionError::Custom(1), + )), + Err(TransactionError::SkippedExecution), + ]; + let actual_results = result + .transaction_results + .clone() + .into_iter() + .map(|res| res.result) + .collect::>>(); + assert_transaction_results(actual_results, expected_reults); + + let actual_pre_lamports = result + .transaction_results + .clone() + .into_iter() + .map(|res| res.pre_execution_accounts) + .collect::>>>(); + assert_simulate_bundle_correct_lamports( + actual_pre_lamports, + expected_pre_lamports_returned, + ); + + let actual_post_lamports = result + .transaction_results + .into_iter() + .map(|res| res.post_execution_accounts) + .collect::>>>(); + assert_simulate_bundle_correct_lamports( + actual_post_lamports, + expected_post_lamports_returned, + ); + } + + /// Tests with a bundle expected to succeed, containing no parallelize chunks + /// + /// Bundle: [T0{Faucet, A, B, C}, T1{Faucet, D}, T2{Faucet, E, A, C}, T3{Faucet, D, F}] + /// Requested Pre-Execution Accounts: [None, [A, D], [B], [A, C, F]] + /// Requested Post-Execution Accounts: [None, [D], None, [A, B, F]] + /// + /// Expect the following: + /// Returned Pre-Execution Accounts: [None, [T0(A), D], [T0(B)], [T0(T2(A)), T0(T2(C)), F]] + /// Returned Post-Execution Accounts: [None, [T1(D)], None, [T0(T2(A), T0(B), T3(F)]] + #[test] + fn test_simulate_bundle_happy_path() { + let (genesis_config, faucet_keypair) = create_genesis_config(1_000_000); + let bank = Bank::new_for_tests(&genesis_config); + let recent_blockhash = bank.confirmed_last_blockhash(); + + // Create some accounts and save them to the bank + let a = solana_sdk::pubkey::new_rand(); + let b = solana_sdk::pubkey::new_rand(); + let c = solana_sdk::pubkey::new_rand(); + let d = solana_sdk::pubkey::new_rand(); + let e = solana_sdk::pubkey::new_rand(); + let f = solana_sdk::pubkey::new_rand(); + let initial_lamports = 100_000; + let (pre_a_data, pre_b_data, pre_c_data, pre_d_data, pre_f_data) = + match &setup_system_accounts(vec![a, b, c, d, e, f], initial_lamports, &bank)[..] { + [pre_a_data, pre_b_data, pre_c_data, pre_d_data, _, pre_f_data] => ( + pre_a_data.clone(), + pre_b_data.clone(), + pre_c_data.clone(), + pre_d_data.clone(), + pre_f_data.clone(), + ), + _ => unreachable!(), + }; + + bank.freeze(); + + let mut expected_pre_lamports_returned = vec![]; + let mut expected_post_lamports_returned = vec![]; + + // Create the transactions + let to_lamports = &[(a, 100), (b, 1000), (c, 200)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_0 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + expected_pre_lamports_returned.push(None); + expected_post_lamports_returned.push(None); + + let to_lamports = &[(d, 343)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_1 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + let mut m = HashMap::new(); + m.insert(a, pre_a_data.lamports() + 100); + m.insert(d, pre_d_data.lamports()); + expected_pre_lamports_returned.push(Some(m)); + let mut m = HashMap::new(); + m.insert(d, pre_d_data.lamports() + 343); + expected_post_lamports_returned.push(Some(m)); + + let to_lamports = &[(e, 378), (a, 1002), (c, 200)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_2 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + let mut m = HashMap::new(); + m.insert(b, pre_b_data.lamports() + 1000); + expected_pre_lamports_returned.push(Some(m)); + expected_post_lamports_returned.push(None); + + let to_lamports = &[(d, 378), (f, 1002)]; + let ixs = system_instruction::transfer_many(&faucet_keypair.pubkey(), to_lamports); + let message = Message::new(&ixs[..], Some(&faucet_keypair.pubkey())); + let tx_3 = SanitizedTransaction::try_from_legacy_transaction(Transaction::new( + &[&faucet_keypair], + message, + recent_blockhash, + )) + .unwrap(); + let mut m = HashMap::new(); + m.insert(a, pre_a_data.lamports() + 100 + 1002); + m.insert(c, pre_c_data.lamports() + 200 + 200); + m.insert(f, pre_f_data.lamports()); + expected_pre_lamports_returned.push(Some(m)); + let mut m = HashMap::new(); + m.insert(a, pre_a_data.lamports() + 100 + 1002); + m.insert(b, pre_b_data.lamports() + 1000); + m.insert(f, pre_f_data.lamports() + 1002); + expected_post_lamports_returned.push(Some(m)); + + // Create params + let pre_execution_accounts = + vec![None, Some(vec![a, d]), Some(vec![b]), Some(vec![a, c, f])]; + let post_execution_accounts = vec![None, Some(vec![d]), None, Some(vec![a, b, f])]; + let bundle = vec![tx_0, tx_1, tx_2, tx_3]; + + // Do it! + let result = bank + .simulate_bundle( + bundle.clone(), + pre_execution_accounts, + post_execution_accounts, + ) + .unwrap(); + + // Basic assertions + assert_eq!(result.summary, BundleSimulationSummary::Succeeded); + assert_eq!(result.transaction_results.len(), bundle.len()); + + let expected_reults = vec![Ok(()), Ok(()), Ok(()), Ok(())]; + let actual_results = result + .transaction_results + .clone() + .into_iter() + .map(|res| res.result) + .collect::>>(); + assert_transaction_results(actual_results, expected_reults); + + let actual_pre_lamports = result + .transaction_results + .clone() + .into_iter() + .map(|res| res.pre_execution_accounts) + .collect::>>>(); + assert_simulate_bundle_correct_lamports( + actual_pre_lamports, + expected_pre_lamports_returned, + ); + + let actual_post_lamports = result + .transaction_results + .into_iter() + .map(|res| res.post_execution_accounts) + .collect::>>>(); + assert_simulate_bundle_correct_lamports( + actual_post_lamports, + expected_post_lamports_returned, + ); + } + #[test] fn test_store_account_and_update_capitalization_increased() { let old_lamports = 400; diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index 0e3f98843a..d0653441f4 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -197,7 +197,7 @@ fn builtin_feature_transitions() -> Vec { ] } -pub(crate) fn get() -> Builtins { +pub fn get() -> Builtins { Builtins { genesis_builtins: genesis_builtins(), feature_transitions: builtin_feature_transitions(), diff --git a/runtime/src/cost_tracker.rs b/runtime/src/cost_tracker.rs index 518ac8fb75..c8140a7e80 100644 --- a/runtime/src/cost_tracker.rs +++ b/runtime/src/cost_tracker.rs @@ -89,6 +89,10 @@ impl CostTracker { self.vote_cost_limit = vote_cost_limit; } + pub fn set_block_cost_limit(&mut self, new_limit: u64) { + self.block_cost_limit = new_limit; + } + pub fn try_add(&mut self, tx_cost: &TransactionCost) -> Result { self.would_fit(tx_cost)?; self.add_transaction_cost(tx_cost); @@ -138,6 +142,10 @@ impl CostTracker { self.block_cost } + pub fn block_cost_limit(&self) -> u64 { + self.block_cost_limit + } + pub fn transaction_count(&self) -> u64 { self.transaction_count } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index e11f7a65d1..7dc5959a91 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -887,12 +887,13 @@ pub fn bank_fields_from_snapshot_archives( incremental_snapshot_archives_dir: impl AsRef, ) -> Result { let full_snapshot_archive_info = - get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir) + get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir, None) .ok_or(SnapshotError::NoSnapshotArchives)?; let incremental_snapshot_archive_info = get_highest_incremental_snapshot_archive_info( &incremental_snapshot_archives_dir, full_snapshot_archive_info.slot(), + None, ); let temp_dir = tempfile::Builder::new() @@ -1022,18 +1023,20 @@ pub fn bank_from_latest_snapshot_archives( verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, + halt_at_slot: Option, ) -> Result<( Bank, FullSnapshotArchiveInfo, Option, )> { let full_snapshot_archive_info = - get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir) + get_highest_full_snapshot_archive_info(&full_snapshot_archives_dir, halt_at_slot) .ok_or(SnapshotError::NoSnapshotArchives)?; let incremental_snapshot_archive_info = get_highest_incremental_snapshot_archive_info( &incremental_snapshot_archives_dir, full_snapshot_archive_info.slot(), + halt_at_slot, ); info!( @@ -1390,8 +1393,9 @@ pub fn get_incremental_snapshot_archives( /// Get the highest slot of the full snapshot archives in a directory pub fn get_highest_full_snapshot_archive_slot( full_snapshot_archives_dir: impl AsRef, + halt_at_slot: Option, ) -> Option { - get_highest_full_snapshot_archive_info(full_snapshot_archives_dir) + get_highest_full_snapshot_archive_info(full_snapshot_archives_dir, halt_at_slot) .map(|full_snapshot_archive_info| full_snapshot_archive_info.slot()) } @@ -1400,10 +1404,12 @@ pub fn get_highest_full_snapshot_archive_slot( pub fn get_highest_incremental_snapshot_archive_slot( incremental_snapshot_archives_dir: impl AsRef, full_snapshot_slot: Slot, + halt_at_slot: Option, ) -> Option { get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir, full_snapshot_slot, + halt_at_slot, ) .map(|incremental_snapshot_archive_info| incremental_snapshot_archive_info.slot()) } @@ -1411,8 +1417,13 @@ pub fn get_highest_incremental_snapshot_archive_slot( /// Get the path (and metadata) for the full snapshot archive with the highest slot in a directory pub fn get_highest_full_snapshot_archive_info( full_snapshot_archives_dir: impl AsRef, + halt_at_slot: Option, ) -> Option { let mut full_snapshot_archives = get_full_snapshot_archives(full_snapshot_archives_dir); + if let Some(halt_at_slot) = halt_at_slot { + full_snapshot_archives + .retain(|archive| archive.snapshot_archive_info().slot <= halt_at_slot); + } full_snapshot_archives.sort_unstable(); full_snapshot_archives.into_iter().rev().next() } @@ -1422,6 +1433,7 @@ pub fn get_highest_full_snapshot_archive_info( pub fn get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir: impl AsRef, full_snapshot_slot: Slot, + halt_at_slot: Option, ) -> Option { // Since we want to filter down to only the incremental snapshot archives that have the same // full snapshot slot as the value passed in, perform the filtering before sorting to avoid @@ -1433,6 +1445,9 @@ pub fn get_highest_incremental_snapshot_archive_info( incremental_snapshot_archive_info.base_slot() == full_snapshot_slot }) .collect::>(); + if let Some(halt_at_slot) = halt_at_slot { + incremental_snapshot_archives.retain(|archive| archive.slot() <= halt_at_slot); + } incremental_snapshot_archives.sort_unstable(); incremental_snapshot_archives.into_iter().rev().next() } @@ -2858,7 +2873,7 @@ mod tests { ); assert_eq!( - get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir.path()), + get_highest_full_snapshot_archive_slot(full_snapshot_archives_dir.path(), None), Some(max_slot - 1) ); } @@ -2885,7 +2900,8 @@ mod tests { assert_eq!( get_highest_incremental_snapshot_archive_slot( incremental_snapshot_archives_dir.path(), - full_snapshot_slot + full_snapshot_slot, + None, ), Some(max_incremental_snapshot_slot - 1) ); @@ -2894,7 +2910,8 @@ mod tests { assert_eq!( get_highest_incremental_snapshot_archive_slot( incremental_snapshot_archives_dir.path(), - max_full_snapshot_slot + max_full_snapshot_slot, + None, ), None ); @@ -3593,6 +3610,7 @@ mod tests { false, Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), None, + None, ) .unwrap(); diff --git a/runtime/src/stake_account.rs b/runtime/src/stake_account.rs index 8421e37832..6b3ecc8256 100644 --- a/runtime/src/stake_account.rs +++ b/runtime/src/stake_account.rs @@ -41,7 +41,7 @@ impl StakeAccount { } #[inline] - pub(crate) fn stake_state(&self) -> &StakeState { + pub fn stake_state(&self) -> &StakeState { &self.stake_state } @@ -52,7 +52,7 @@ impl StakeAccount { impl StakeAccount { #[inline] - pub(crate) fn delegation(&self) -> Delegation { + pub fn delegation(&self) -> Delegation { // Safe to unwrap here becasue StakeAccount will always // only wrap a stake-state which is a delegation. self.stake_state.delegation().unwrap() diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index f8ea7fb007..23a9870b85 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -50,17 +50,17 @@ pub enum InvalidCacheEntryReason { WrongOwner, } -type StakeAccount = stake_account::StakeAccount; +pub type StakeAccount = stake_account::StakeAccount; #[derive(Default, Debug, AbiExample)] -pub(crate) struct StakesCache(RwLock>); +pub struct StakesCache(RwLock>); impl StakesCache { pub(crate) fn new(stakes: Stakes) -> Self { Self(RwLock::new(stakes)) } - pub(crate) fn stakes(&self) -> RwLockReadGuard> { + pub fn stakes(&self) -> RwLockReadGuard> { self.0.read().unwrap() } @@ -171,7 +171,7 @@ pub struct Stakes { vote_accounts: VoteAccounts, /// stake_delegations - stake_delegations: ImHashMap, + pub stake_delegations: ImHashMap, /// unused unused: u64, @@ -211,7 +211,7 @@ impl Stakes { /// full account state for respective stake pubkeys. get_account function /// should return the account at the respective slot where stakes where /// cached. - pub(crate) fn new(stakes: &Stakes, get_account: F) -> Result + pub fn new(stakes: &Stakes, get_account: F) -> Result where F: Fn(&Pubkey) -> Option, { @@ -396,7 +396,7 @@ impl Stakes { } } - pub(crate) fn stake_delegations(&self) -> &ImHashMap { + pub fn stake_delegations(&self) -> &ImHashMap { &self.stake_delegations } diff --git a/rustfmt.toml b/rustfmt.toml index e26d07f0d8..c7ccd48750 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,7 @@ imports_granularity = "One" group_imports = "One" + +ignore = [ + "jito-programs", + "anchor" +] \ No newline at end of file diff --git a/s b/s new file mode 100755 index 0000000000..d6e0fd31b3 --- /dev/null +++ b/s @@ -0,0 +1,15 @@ +#!/usr/bin/env sh + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" + +if [ -f .env ]; then + export $(cat .env | grep -v '#' | awk '/=/ {print $1}') +else + echo "Missing .env file" + exit 0 +fi + +echo "Syncing to host: $HOST" + +# sync to build server, ignoring local builds and local/remote dev ledger +rsync -avh --delete --exclude target --exclude docker-output "$SCRIPT_DIR" "$HOST":~/ diff --git a/scripts/coverage.sh b/scripts/coverage.sh index 059ddc2e20..7a54a4c65b 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -69,14 +69,15 @@ if [[ -n $CI || -z $1 ]]; then fi # limit jobs to 4gb/thread -if [[ -f "/proc/meminfo" ]]; then - JOBS=$(grep MemTotal /proc/meminfo | awk '{printf "%.0f", ($2 / (4 * 1024 * 1024))}') -else - JOBS=$(sysctl hw.memsize | awk '{printf "%.0f", ($2 / (4 * 1024**3))}') -fi - -NPROC=$(nproc) -JOBS=$((JOBS>NPROC ? NPROC : JOBS)) +#if [[ -f "/proc/meminfo" ]]; then + #JOBS=$(grep MemTotal /proc/meminfo | awk '{printf "%.0f", ($2 / (4 * 1024 * 1024))}') +#else + #JOBS=$(sysctl hw.memsize | awk '{printf "%.0f", ($2 / (4 * 1024**3))}') +#fi + +#NPROC=$(nproc) +#JOBS=$((JOBS>NPROC ? NPROC : JOBS)) +JOBS=32 RUST_LOG=solana=trace _ "$cargo" nightly test --jobs "$JOBS" --target-dir target/cov --no-run "${packages[@]}" if RUST_LOG=solana=trace _ "$cargo" nightly test --jobs "$JOBS" --target-dir target/cov "${packages[@]}" -- --nocapture 2> >(tee target/cov/coverage-stderr.log >&2); then diff --git a/scripts/increment-cargo-version.sh b/scripts/increment-cargo-version.sh index 7a5f489603..a4bcfa5918 100755 --- a/scripts/increment-cargo-version.sh +++ b/scripts/increment-cargo-version.sh @@ -24,6 +24,8 @@ ignores=( target web3.js/test node_modules + jito-programs + anchor ) not_paths=() diff --git a/scripts/run.sh b/scripts/run.sh index a890aa10c1..f76e472d0a 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -102,6 +102,10 @@ args=( --identity "$validator_identity" --vote-account "$validator_vote_account" --ledger "$ledgerDir" + --tip-payment-program-pubkey "DThZmRNNXh7kvTQW9hXeGoWGPKktK8pgVAyoTLjH7UrT" + --tip-distribution-program-pubkey "FjrdANjvo76aCYQ4kf9FM1R8aESUcEE6F8V7qyoVUQcM" + --merkle-root-upload-authority "$validator_identity" + --commission-bps 0 --gossip-port 8001 --full-rpc-api --rpc-port 8899 diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 56b9536342..a3520febd4 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -37,6 +37,7 @@ full = [ ] [dependencies] +anchor-lang = { path = "../anchor/lang" } assert_matches = { version = "1.5.0", optional = true } base64 = "0.13" bincode = "1.3.3" diff --git a/sdk/src/bundle/error.rs b/sdk/src/bundle/error.rs new file mode 100644 index 0000000000..33bed7d356 --- /dev/null +++ b/sdk/src/bundle/error.rs @@ -0,0 +1,51 @@ +#![cfg(feature = "full")] + +use { + anchor_lang::error::Error, serde::Deserialize, solana_program::pubkey::Pubkey, + solana_sdk::transaction::TransactionError, std::time::Duration, thiserror::Error, +}; + +#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum BundleExecutionError { + #[error("PoH max height reached in the middle of a bundle.")] + PohMaxHeightError, + + #[error("A transaction in the bundle failed with - {0}")] + TransactionFailure(#[from] TransactionError), + + #[error("The bundle exceeds the cost model")] + ExceedsCostModel, + + #[error("Tip error {0}")] + TipError(#[from] TipPaymentError), + + #[error("Shutdown triggered")] + Shutdown, + + #[error("The time spent retrying bundles exceeded the allowed time {0:?}")] + MaxRetriesExceeded(Duration), + + #[error("Error locking bundle because the transaction is malformed")] + LockError, +} + +#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum TipPaymentError { + #[error("account is missing from bank: {0}")] + AccountMissing(Pubkey), + + #[error("MEV program is non-existent")] + ProgramNonExistent(Pubkey), + + #[error("Anchor error: {0}")] + AnchorError(String), +} + +impl From for TipPaymentError { + fn from(anchor_err: Error) -> Self { + match anchor_err { + Error::AnchorError(e) => Self::AnchorError(e.error_msg), + Error::ProgramError(e) => Self::AnchorError(e.to_string()), + } + } +} diff --git a/sdk/src/bundle/mod.rs b/sdk/src/bundle/mod.rs new file mode 100644 index 0000000000..0ecbaf9e33 --- /dev/null +++ b/sdk/src/bundle/mod.rs @@ -0,0 +1,12 @@ +#![cfg(feature = "full")] + +use crate::transaction::VersionedTransaction; + +pub mod error; +pub mod sanitized; +pub mod utils; + +#[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] +pub struct VersionedBundle { + pub transactions: Vec, +} diff --git a/sdk/src/bundle/sanitized.rs b/sdk/src/bundle/sanitized.rs new file mode 100644 index 0000000000..3c4ec77cef --- /dev/null +++ b/sdk/src/bundle/sanitized.rs @@ -0,0 +1,20 @@ +#![cfg(feature = "full")] + +use { + crate::transaction::VersionedTransaction, + itertools::Itertools, + sha2::{Digest, Sha256}, + solana_sdk::transaction::SanitizedTransaction, +}; + +#[derive(Clone, Debug)] +pub struct SanitizedBundle { + pub transactions: Vec, + pub bundle_id: String, +} + +pub fn derive_bundle_id(transactions: &[VersionedTransaction]) -> String { + let mut hasher = Sha256::new(); + hasher.update(transactions.iter().map(|tx| tx.signatures[0]).join(",")); + format!("{:x}", hasher.finalize()) +} diff --git a/sdk/src/bundle/utils.rs b/sdk/src/bundle/utils.rs new file mode 100644 index 0000000000..04c7446eda --- /dev/null +++ b/sdk/src/bundle/utils.rs @@ -0,0 +1,20 @@ +use {crate::bundle::error::BundleExecutionError, solana_sdk::transaction::TransactionError}; + +type LockResult = Result<(), TransactionError>; + +/// Checks that preparing a bundle gives an acceptable batch back +pub fn check_bundle_lock_results(lock_results: &[LockResult]) -> Option<(TransactionError, usize)> { + for (i, res) in lock_results.iter().enumerate() { + match res { + Ok(()) + | Err(TransactionError::AccountInUse) + | Err(TransactionError::BundleNotContinuous) => {} + Err(e) => { + return Some((e.clone(), i)); + } + } + } + None +} + +pub type BundleExecutionResult = Result; diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 08da2ce824..3e4230a33d 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -12,6 +12,7 @@ pub use solana_program::*; pub mod account; pub mod account_utils; pub mod builtins; +pub mod bundle; pub mod client; pub mod commitment_config; pub mod compute_budget; diff --git a/sdk/src/transaction/error.rs b/sdk/src/transaction/error.rs index 3f25fa5a62..0d9fa6944c 100644 --- a/sdk/src/transaction/error.rs +++ b/sdk/src/transaction/error.rs @@ -149,6 +149,14 @@ pub enum TransactionError { "Transaction results in an account ({account_index}) without insufficient funds for rent" )] InsufficientFundsForRent { account_index: u8 }, + + /// Bundle is not continuous + #[error("Bundle is not continuous")] + BundleNotContinuous, + + /// This error type should be used when a transaction in a bundle is not executed due to an earlier tx error. + #[error("Transaction did not execute.")] + SkippedExecution, } impl From for TransactionError { diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index 63fbd285b4..1ee8a9d34d 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -13,6 +13,7 @@ edition = "2021" crossbeam-channel = "0.5" log = "0.4.17" solana-client = { path = "../client", version = "=1.14.24" } +solana-gossip = { path = "../gossip", version = "=1.14.24" } solana-measure = { path = "../measure", version = "=1.14.24" } solana-metrics = { path = "../metrics", version = "=1.14.24" } solana-runtime = { path = "../runtime", version = "=1.14.24" } @@ -20,6 +21,7 @@ solana-sdk = { path = "../sdk", version = "=1.14.24" } [dev-dependencies] solana-logger = { path = "../logger", version = "=1.14.24" } +solana-streamer = { path = "../streamer", version = "=1.14.24" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 5419cd0bb7..fcc314c570 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -3,6 +3,7 @@ use { crossbeam_channel::{Receiver, RecvTimeoutError}, log::*, solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection}, + solana_gossip::cluster_info::ClusterInfo, solana_measure::measure::Measure, solana_metrics::datapoint_warn, solana_runtime::{bank::Bank, bank_forks::BankForks}, @@ -325,7 +326,7 @@ const SEND_TRANSACTION_METRICS_REPORT_RATE_MS: u64 = 5000; impl SendTransactionService { pub fn new( - tpu_address: SocketAddr, + cluster_info: Arc, bank_forks: &Arc>, leader_info: Option, receiver: Receiver, @@ -340,7 +341,7 @@ impl SendTransactionService { ..Config::default() }; Self::new_with_config( - tpu_address, + cluster_info, bank_forks, leader_info, receiver, @@ -351,7 +352,7 @@ impl SendTransactionService { } pub fn new_with_config( - tpu_address: SocketAddr, + cluster_info: Arc, bank_forks: &Arc>, leader_info: Option, receiver: Receiver, @@ -366,7 +367,7 @@ impl SendTransactionService { let leader_info_provider = Arc::new(Mutex::new(CurrentLeaderInfo::new(leader_info))); let receive_txn_thread = Self::receive_txn_thread( - tpu_address, + cluster_info.clone(), receiver, leader_info_provider.clone(), connection_cache.clone(), @@ -377,7 +378,7 @@ impl SendTransactionService { ); let retry_thread = Self::retry_thread( - tpu_address, + cluster_info, bank_forks.clone(), leader_info_provider, connection_cache.clone(), @@ -395,7 +396,7 @@ impl SendTransactionService { /// Thread responsible for receiving transactions from RPC clients. fn receive_txn_thread( - tpu_address: SocketAddr, + cluster_info: Arc, receiver: Receiver, leader_info_provider: Arc>>, connection_cache: Arc, @@ -456,6 +457,7 @@ impl SendTransactionService { stats .sent_transactions .fetch_add(transactions.len() as u64, Ordering::Relaxed); + let tpu_address = cluster_info.my_contact_info().tpu; Self::send_transactions_in_batch( &tpu_address, &mut transactions, @@ -502,7 +504,7 @@ impl SendTransactionService { /// Thread responsible for retrying transactions fn retry_thread( - tpu_address: SocketAddr, + cluster_info: Arc, bank_forks: Arc>, leader_info_provider: Arc>>, connection_cache: Arc, @@ -538,7 +540,7 @@ impl SendTransactionService { bank_forks.working_bank().clone(), ) }; - + let tpu_address = cluster_info.my_contact_info().tpu; let _result = Self::process_transactions( &working_bank, &root_bank, @@ -779,27 +781,40 @@ mod test { super::*, crate::tpu_info::NullTpuInfo, crossbeam_channel::{bounded, unbounded}, + solana_gossip::legacy_contact_info::LegacyContactInfo as ContactInfo, solana_sdk::{ account::AccountSharedData, genesis_config::create_genesis_config, nonce::{self, state::DurableNonce}, pubkey::Pubkey, - signature::Signer, + signature::{Keypair, Signer}, system_program, system_transaction, }, - std::ops::Sub, + solana_streamer::socket::SocketAddrSpace, + std::{ + net::{IpAddr, Ipv4Addr}, + ops::Sub, + }, }; #[test] fn service_exit() { - let tpu_address = "127.0.0.1:0".parse().unwrap(); let bank = Bank::default_for_tests(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let (sender, receiver) = unbounded(); let connection_cache = Arc::new(ConnectionCache::default()); + let contact_info = ContactInfo { + tpu: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + ..ContactInfo::default() + }; + let cluster_info: Arc = Arc::new(ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::new(false), + )); let send_tranaction_service = SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, @@ -815,7 +830,15 @@ mod test { #[test] fn validator_exit() { - let tpu_address = "127.0.0.1:0".parse().unwrap(); + let contact_info = ContactInfo { + tpu: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + ..ContactInfo::default() + }; + let cluster_info: Arc = Arc::new(ClusterInfo::new( + contact_info, + Arc::new(Keypair::new()), + SocketAddrSpace::new(false), + )); let bank = Bank::default_for_tests(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let (sender, receiver) = bounded(0); @@ -833,7 +856,7 @@ mod test { let exit = Arc::new(AtomicBool::new(false)); let connection_cache = Arc::new(ConnectionCache::default()); let _send_transaction_service = SendTransactionService::new::( - tpu_address, + cluster_info, &bank_forks, None, receiver, diff --git a/start b/start new file mode 100755 index 0000000000..be2e1a9a7e --- /dev/null +++ b/start @@ -0,0 +1,9 @@ +#!/usr/bin/env sh +solana_keygen=./target/release/solana-keygen +SOLANA_CONFIG_DIR=./config + +mkdir $SOLANA_CONFIG_DIR + +NDEBUG=1 ./multinode-demo/setup.sh +./target/release/solana-ledger-tool -l config/bootstrap-validator/ create-snapshot 0 +NDEBUG=1 ./multinode-demo/faucet.sh diff --git a/start_multi b/start_multi new file mode 100755 index 0000000000..0083cf363b --- /dev/null +++ b/start_multi @@ -0,0 +1,29 @@ +#!/usr/bin/env sh +solana_keygen=./target/release/solana-keygen +SOLANA_CONFIG_DIR=./config + +mkdir $SOLANA_CONFIG_DIR +if [ $? -eq 0 ] ; then + echo "New Config! Generating Identities" + $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/a/identity.json + $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/a/stake-account.json + $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/a/vote-account.json + + $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/b/identity.json + $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/b/stake-account.json + $solana_keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/b/vote-account.json +fi + + +NDEBUG=1 ./multinode-demo/setup.sh \ + --bootstrap-validator \ + "$SOLANA_CONFIG_DIR"/a/identity.json \ + "$SOLANA_CONFIG_DIR"/a/vote-account.json \ + "$SOLANA_CONFIG_DIR"/a/stake-account.json \ + --bootstrap-validator \ + "$SOLANA_CONFIG_DIR"/b/identity.json \ + "$SOLANA_CONFIG_DIR"/b/vote-account.json \ + "$SOLANA_CONFIG_DIR"/b/stake-account.json + +./target/release/solana-ledger-tool -l config/bootstrap-validator/ create-snapshot 0 +NDEBUG=1 ./multinode-demo/faucet.sh diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index 00d4b8ea04..be4093661e 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -40,11 +40,11 @@ zstd = "0.11.2" # declare it here as well to activate the "vendored" feature that builds OpenSSL # statically... [target."cfg(not(windows))".dependencies] -openssl = { version = "0.10", features = ["vendored"] } +openssl = { version = "0.10.55", features = ["vendored"] } # ...except on Windows to avoid having to deal with getting CI past a build-time # Perl dependency [target."cfg(windows)".dependencies] -openssl = { version = "0.10", features = [] } +openssl = { version = "0.10.55", features = [] } [lib] crate-type = ["lib"] diff --git a/storage-proto/proto/transaction_by_addr.proto b/storage-proto/proto/transaction_by_addr.proto index 4c95b18297..20cada5f81 100644 --- a/storage-proto/proto/transaction_by_addr.proto +++ b/storage-proto/proto/transaction_by_addr.proto @@ -57,6 +57,8 @@ enum TransactionErrorType { WOULD_EXCEED_ACCOUNT_DATA_TOTAL_LIMIT = 29; DUPLICATE_INSTRUCTION = 30; INSUFFICIENT_FUNDS_FOR_RENT = 31; + BUNDLE_NOT_CONTINUOUS = 32; + SKIPPED_EXECUTION = 33; } message InstructionError { diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index 0b942a2dd9..a89bec0dc9 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -774,6 +774,8 @@ impl TryFrom for TransactionError { 27 => TransactionError::InvalidRentPayingAccount, 28 => TransactionError::WouldExceedMaxVoteCostLimit, 29 => TransactionError::WouldExceedAccountDataTotalLimit, + 32 => TransactionError::BundleNotContinuous, + 33 => TransactionError::SkippedExecution, _ => return Err("Invalid TransactionError"), }) } @@ -877,6 +879,12 @@ impl From for tx_by_addr::TransactionError { TransactionError::InsufficientFundsForRent { .. } => { tx_by_addr::TransactionErrorType::InsufficientFundsForRent } + TransactionError::BundleNotContinuous => { + tx_by_addr::TransactionErrorType::BundleNotContinuous + } + TransactionError::SkippedExecution => { + tx_by_addr::TransactionErrorType::SkippedExecution + } } as i32, instruction_error: match transaction_error { TransactionError::InstructionError(index, ref instruction_error) => { diff --git a/tip-distributor/Cargo.toml b/tip-distributor/Cargo.toml new file mode 100644 index 0000000000..ad4539b8c8 --- /dev/null +++ b/tip-distributor/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "solana-tip-distributor" +version = "1.14.24" +edition = "2021" +license = "Apache-2.0" +description = "Collection of binaries used to distribute MEV rewards to delegators and validators." + +[dependencies] +anchor-lang = { path = "../anchor/lang" } +clap = { version = "3.1.18", features = ["derive", "env"] } +env_logger = "0.9.0" +futures = "0.3.21" +im = "15.1.0" +itertools = "0.10.3" +log = "0.4.17" +num-traits = "0.2.15" +serde = "1.0.137" +serde_json = "1.0.81" +solana-client = { path = "../client", version = "=1.14.24" } +solana-genesis-utils = { path = "../genesis-utils", version = "=1.14.24" } +solana-ledger = { path = "../ledger", version = "=1.14.24" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.14.24" } +solana-metrics = { path = "../metrics", version = "=1.14.24" } +solana-program = { path = "../sdk/program", version = "=1.14.24" } +solana-runtime = { path = "../runtime", version = "=1.14.24" } +solana-sdk = { path = "../sdk", version = "=1.14.24" } +solana-stake-program = { path = "../programs/stake", version = "=1.14.24" } +thiserror = "1.0.31" +tip-distribution = { path = "../jito-programs/mev-programs/programs/tip-distribution", features = ["no-entrypoint"] } +tip-payment = { path = "../jito-programs/mev-programs/programs/tip-payment", features = ["no-entrypoint"] } +tokio = { version = "1.12.0", features = ["rt-multi-thread", "macros", "sync", "time", "full"] } + +[[bin]] +name = "solana-stake-meta-generator" +path = "src/bin/stake-meta-generator.rs" + +[[bin]] +name = "solana-merkle-root-generator" +path = "src/bin/merkle-root-generator.rs" + +[[bin]] +name = "solana-merkle-root-uploader" +path = "src/bin/merkle-root-uploader.rs" + +[[bin]] +name = "solana-claim-mev-tips" +path = "src/bin/claim-mev-tips.rs" + +[[bin]] +name = "solana-reclaim-rent" +path = "src/bin/reclaim-rent.rs" diff --git a/tip-distributor/README.md b/tip-distributor/README.md new file mode 100644 index 0000000000..c100843a41 --- /dev/null +++ b/tip-distributor/README.md @@ -0,0 +1,43 @@ +# Tip Distributor +This library and collection of binaries are responsible for generating and uploading merkle roots to the on-chain +tip-distribution program found [here](https://github.com/jito-foundation/jito-programs/blob/submodule/tip-payment/programs/tip-distribution/src/lib.rs). + +## Background +Each individual validator is assigned a new PDA per epoch where their share of tips, in lamports, will be stored. +At the end of the epoch it's expected that validators take a commission and then distribute the rest of the funds +to their delegators such that delegators receive rewards proportional to their respective delegations. The distribution +mechanism is via merkle proofs similar to how airdrops work. + +The merkle roots are calculated off-chain and uploaded to the validator's **TipDistributionAccount** PDA. Validators may +elect an account to upload the merkle roots on their behalf. Once uploaded, users can invoke the **claim** instruction +and receive the rewards they're entitled to. Once all funds are claimed by users the validator can close the account and +refunded the rent. + +## Scripts + +### stake-meta-generator + +This script generates a JSON file identifying individual stake delegations to a validator, along with amount of lamports +in each validator's **TipDistributionAccount**. All validators will be contained in the JSON list, regardless of whether +the validator is a participant in the system; participant being indicative of running the jito-solana client to accept tips +having initialized a **TipDistributionAccount** PDA account for the epoch. + +One edge case that we've taken into account is the last validator in an epoch N receives tips but those tips don't get transferred +out into the PDA until some slot in epoch N + 1. Due to this we cannot rely on the bank's state at epoch N for lamports amount +in the PDAs. We use the bank solely to take a snapshot of delegations, but an RPC node to fetch the PDA lamports for more up-to-date data. + +### merkle-root-generator +This script accepts a path to the above JSON file as one of its arguments, and generates a merkle-root. It'll optionally upload the root +on-chain if specified. Additionally, it'll spit the generated merkle trees out into a JSON file. + +## How it works? +In order to use this library as the merkle root creator one must follow the following steps: +1. Download a ledger snapshot containing the slot of interest, i.e. the last slot in an epoch. The Solana foundation has snapshots that can be found [here](https://console.cloud.google.com/storage/browser/mainnet-beta-ledger-us-ny5). +2. Download the snapshot onto your worker machine (where this script will run). +3. Run `solana-ledger-tool -l ${PATH_TO_LEDGER} create-snapshot ${YOUR_SLOT} ${WHERE_TO_CREATE_SNAPSHOT}` + 1. The snapshot created at `${WHERE_TO_CREATE_SNAPSHOT}` will have the highest slot of `${YOUR_SLOT}`, assuming you downloaded the correct snapshot. +4. Run `stake-meta-generator --ledger-path ${WHERE_TO_CREATE_SNAPSHOT} --tip-distribution-program-id ${PUBKEY} --out-path ${JSON_OUT_PATH} --snapshot-slot ${SLOT} --rpc-url ${URL}` + 1. Note: `${WHERE_TO_CREATE_SNAPSHOT}` must be the same in steps 3 & 4. +5. Run `merkle-root-generator --path-to-my-keypair ${KEYPAIR_PATH} --stake-meta-coll-path ${STAKE_META_COLLECTION_JSON} --rpc-url ${URL} --upload-roots ${BOOL} --force-upload-root ${BOOL}` + +Voila! diff --git a/tip-distributor/src/bin/claim-mev-tips.rs b/tip-distributor/src/bin/claim-mev-tips.rs new file mode 100644 index 0000000000..45b515dbd1 --- /dev/null +++ b/tip-distributor/src/bin/claim-mev-tips.rs @@ -0,0 +1,52 @@ +//! This binary claims MEV tips. + +use { + clap::Parser, + log::*, + solana_sdk::pubkey::Pubkey, + solana_tip_distributor::claim_mev_workflow::claim_mev_tips, + std::{path::PathBuf, str::FromStr}, +}; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to JSON file containing the [GeneratedMerkleTreeCollection] object. + #[clap(long, env)] + merkle_trees_path: PathBuf, + + /// RPC to send transactions through + #[clap(long, env)] + rpc_url: String, + + /// Tip distribution program ID + #[clap(long, env)] + tip_distribution_program_id: String, + + /// Path to keypair + #[clap(long, env)] + keypair_path: PathBuf, +} + +fn main() { + env_logger::init(); + info!("Starting to claim mev tips..."); + + let args: Args = Args::parse(); + + let tip_distribution_program_id = Pubkey::from_str(&args.tip_distribution_program_id) + .expect("valid tip_distribution_program_id"); + + if let Err(e) = claim_mev_tips( + &args.merkle_trees_path, + &args.rpc_url, + &tip_distribution_program_id, + &args.keypair_path, + ) { + panic!("error claiming mev tips: {:?}", e); + } + info!( + "done claiming mev tips from file {:?}", + args.merkle_trees_path + ); +} diff --git a/tip-distributor/src/bin/merkle-root-generator.rs b/tip-distributor/src/bin/merkle-root-generator.rs new file mode 100644 index 0000000000..bbf4105503 --- /dev/null +++ b/tip-distributor/src/bin/merkle-root-generator.rs @@ -0,0 +1,34 @@ +//! This binary generates a merkle tree for each [TipDistributionAccount]; they are derived +//! using a user provided [StakeMetaCollection] JSON file. + +use { + clap::Parser, log::*, + solana_tip_distributor::merkle_root_generator_workflow::generate_merkle_root, + std::path::PathBuf, +}; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to JSON file containing the [StakeMetaCollection] object. + #[clap(long, env)] + stake_meta_coll_path: PathBuf, + + /// RPC to send transactions through. Used to validate what's being claimed is equal to TDA balance minus rent. + #[clap(long, env)] + rpc_url: String, + + /// Path to JSON file to get populated with tree node data. + #[clap(long, env)] + out_path: PathBuf, +} + +fn main() { + env_logger::init(); + info!("Starting merkle-root-generator workflow..."); + + let args: Args = Args::parse(); + generate_merkle_root(&args.stake_meta_coll_path, &args.out_path, &args.rpc_url) + .expect("merkle tree produced"); + info!("saved merkle roots to {:?}", args.stake_meta_coll_path); +} diff --git a/tip-distributor/src/bin/merkle-root-uploader.rs b/tip-distributor/src/bin/merkle-root-uploader.rs new file mode 100644 index 0000000000..f23e8f74b2 --- /dev/null +++ b/tip-distributor/src/bin/merkle-root-uploader.rs @@ -0,0 +1,50 @@ +use { + clap::Parser, + log::info, + solana_sdk::pubkey::Pubkey, + solana_tip_distributor::merkle_root_upload_workflow::upload_merkle_root, + std::{path::PathBuf, str::FromStr}, +}; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to JSON file containing the [StakeMetaCollection] object. + #[clap(long, env)] + merkle_root_path: PathBuf, + + /// The path to the keypair used to sign and pay for the `upload_merkle_root` transactions. + #[clap(long, env)] + keypair_path: PathBuf, + + /// The RPC to send transactions to. + #[clap(long, env)] + rpc_url: String, + + /// Tip distribution program ID + #[clap(long, env)] + tip_distribution_program_id: String, +} + +fn main() { + env_logger::init(); + + let args: Args = Args::parse(); + + let tip_distribution_program_id = Pubkey::from_str(&args.tip_distribution_program_id) + .expect("valid tip_distribution_program_id"); + + info!("starting merkle root uploader..."); + if let Err(e) = upload_merkle_root( + &args.merkle_root_path, + &args.keypair_path, + &args.rpc_url, + &tip_distribution_program_id, + ) { + panic!("failed to upload merkle roots: {:?}", e); + } + info!( + "uploaded merkle roots from file {:?}", + args.merkle_root_path + ); +} diff --git a/tip-distributor/src/bin/reclaim-rent.rs b/tip-distributor/src/bin/reclaim-rent.rs new file mode 100644 index 0000000000..a86491f4e3 --- /dev/null +++ b/tip-distributor/src/bin/reclaim-rent.rs @@ -0,0 +1,62 @@ +//! Reclaims rent from TDAs and Claim Status accounts. + +use { + clap::Parser, + log::*, + solana_client::nonblocking::rpc_client::RpcClient, + solana_sdk::{ + commitment_config::CommitmentConfig, pubkey::Pubkey, signature::read_keypair_file, + }, + solana_tip_distributor::reclaim_rent_workflow::reclaim_rent, + std::{path::PathBuf, str::FromStr, time::Duration}, + tokio::runtime::Runtime, +}; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// RPC to send transactions through. + /// NOTE: This script uses getProgramAccounts, make sure you have added an account index + /// for the tip_distribution_program_id on the RPC node. + #[clap(long, env)] + rpc_url: String, + + /// Tip distribution program ID. + #[clap(long, env, value_parser = Pubkey::from_str)] + tip_distribution_program_id: Pubkey, + + /// The keypair signing and paying for transactions. + #[clap(long, env)] + keypair_path: PathBuf, + + /// High timeout b/c of get_program_accounts call + #[clap(long, env, default_value_t = 180)] + rpc_timeout_secs: u64, + + /// Specifies whether to reclaim rent on behalf of validators from respective TDAs. + #[clap(long, env)] + should_reclaim_tdas: bool, +} + +fn main() { + env_logger::init(); + + info!("Starting to claim mev tips..."); + let args: Args = Args::parse(); + + let runtime = Runtime::new().unwrap(); + if let Err(e) = runtime.block_on(reclaim_rent( + RpcClient::new_with_timeout_and_commitment( + args.rpc_url, + Duration::from_secs(args.rpc_timeout_secs), + CommitmentConfig::confirmed(), + ), + args.tip_distribution_program_id, + read_keypair_file(&args.keypair_path).expect("read keypair file"), + args.should_reclaim_tdas, + )) { + panic!("error reclaiming rent: {e:?}"); + } + + info!("done reclaiming all rent",); +} diff --git a/tip-distributor/src/bin/stake-meta-generator.rs b/tip-distributor/src/bin/stake-meta-generator.rs new file mode 100644 index 0000000000..4bdfa5bc3a --- /dev/null +++ b/tip-distributor/src/bin/stake-meta-generator.rs @@ -0,0 +1,67 @@ +//! This binary is responsible for generating a JSON file that contains meta-data about stake +//! & delegations given a ledger snapshot directory. The JSON file is structured as an array +//! of [StakeMeta] objects. + +use { + clap::Parser, + log::*, + solana_sdk::{clock::Slot, pubkey::Pubkey}, + solana_tip_distributor::{self, stake_meta_generator_workflow::generate_stake_meta}, + std::{ + fs::{self}, + path::PathBuf, + process::exit, + }, +}; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Ledger path, where you created the snapshot. + #[clap(long, env, value_parser = Args::ledger_path_parser)] + ledger_path: PathBuf, + + /// The tip-distribution program id. + #[clap(long, env)] + tip_distribution_program_id: Pubkey, + + /// The tip-payment program id. + #[clap(long, env)] + tip_payment_program_id: Pubkey, + + /// Path to JSON file populated with the [StakeMetaCollection] object. + #[clap(long, env)] + out_path: String, + + /// The expected snapshot slot. + #[clap(long, env)] + snapshot_slot: Slot, +} + +impl Args { + fn ledger_path_parser(ledger_path: &str) -> Result { + Ok(fs::canonicalize(ledger_path).unwrap_or_else(|err| { + error!("Unable to access ledger path '{}': {}", ledger_path, err); + exit(1); + })) + } +} + +fn main() { + env_logger::init(); + info!("Starting stake-meta-generator..."); + + let args: Args = Args::parse(); + + if let Err(e) = generate_stake_meta( + &args.ledger_path, + &args.snapshot_slot, + &args.tip_distribution_program_id, + &args.out_path, + &args.tip_payment_program_id, + ) { + error!("error producing stake-meta: {:?}", e); + } else { + info!("produced stake meta"); + } +} diff --git a/tip-distributor/src/claim_mev_workflow.rs b/tip-distributor/src/claim_mev_workflow.rs new file mode 100644 index 0000000000..d6ba42899e --- /dev/null +++ b/tip-distributor/src/claim_mev_workflow.rs @@ -0,0 +1,151 @@ +use { + crate::{ + read_json_from_file, sign_and_send_transactions_with_retries, GeneratedMerkleTreeCollection, + }, + anchor_lang::{AccountDeserialize, InstructionData, ToAccountMetas}, + log::{debug, info, warn}, + solana_client::{client_error, nonblocking::rpc_client::RpcClient, rpc_request::RpcError}, + solana_program::{ + fee_calculator::DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE, native_token::LAMPORTS_PER_SOL, + stake::state::StakeState, system_program, + }, + solana_sdk::{ + commitment_config::CommitmentConfig, + instruction::Instruction, + pubkey::Pubkey, + signature::{read_keypair_file, Signer}, + transaction::Transaction, + }, + std::{path::PathBuf, time::Duration}, + thiserror::Error, + tip_distribution::state::*, + tokio::runtime::Builder, +}; + +#[derive(Error, Debug)] +pub enum ClaimMevError { + #[error(transparent)] + IoError(#[from] std::io::Error), + + #[error(transparent)] + JsonError(#[from] serde_json::Error), +} + +pub fn claim_mev_tips( + merkle_root_path: &PathBuf, + rpc_url: &str, + tip_distribution_program_id: &Pubkey, + keypair_path: &PathBuf, +) -> Result<(), ClaimMevError> { + const MAX_RETRY_DURATION: Duration = Duration::from_secs(600); + + let merkle_trees: GeneratedMerkleTreeCollection = + read_json_from_file(merkle_root_path).expect("read GeneratedMerkleTreeCollection"); + let keypair = read_keypair_file(keypair_path).expect("read keypair file"); + + let tip_distribution_config = + Pubkey::find_program_address(&[Config::SEED], tip_distribution_program_id).0; + + let rpc_client = + RpcClient::new_with_commitment(rpc_url.to_string(), CommitmentConfig::finalized()); + + let runtime = Builder::new_multi_thread() + .worker_threads(16) + .enable_all() + .build() + .unwrap(); + + let mut instructions = Vec::new(); + + runtime.block_on(async move { + let start_balance = rpc_client.get_balance(&keypair.pubkey()).await.expect("failed to get balance"); + // heuristic to make sure we have enough funds to cover the rent costs if epoch has many validators + { + // most amounts are for 0 lamports. had 1736 non-zero claims out of 164742 + let node_count = merkle_trees.generated_merkle_trees.iter().flat_map(|tree| &tree.tree_nodes).filter(|node| node.amount > 0).count(); + let min_rent_per_claim = rpc_client.get_minimum_balance_for_rent_exemption(ClaimStatus::SIZE).await.expect("Failed to calculate min rent"); + let desired_balance = (node_count as u64).checked_mul(min_rent_per_claim.checked_add(DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE).unwrap()).unwrap(); + if start_balance < desired_balance { + let sol_to_deposit = desired_balance.checked_sub(start_balance).unwrap().checked_add(LAMPORTS_PER_SOL).unwrap().checked_sub(1).unwrap().checked_div(LAMPORTS_PER_SOL).unwrap(); // rounds up to nearest sol + panic!("Expected to have at least {} lamports in {}, current balance is {} lamports, deposit {} SOL to continue.", + desired_balance, &keypair.pubkey(), start_balance, sol_to_deposit) + } + } + let stake_acct_min_rent = rpc_client.get_minimum_balance_for_rent_exemption(StakeState::size_of()).await.expect("Failed to calculate min rent"); + let mut below_min_rent_count: usize = 0; + let mut zero_lamports_count: usize = 0; + for tree in merkle_trees.generated_merkle_trees { + // only claim for ones that have merkle root on-chain + let account = rpc_client.get_account(&tree.tip_distribution_account).await.expect("expected to fetch tip distribution account"); + let fetched_tip_distribution_account = TipDistributionAccount::try_deserialize(&mut account.data.as_slice()).expect("failed to deserialize tip_distribution_account state"); + if fetched_tip_distribution_account.merkle_root.is_none() { + info!( + "not claiming because merkle root isn't uploaded yet. skipped {} claimants for tda: {:?}", + tree.tree_nodes.len(), + tree.tip_distribution_account + ); + continue; + } + for node in tree.tree_nodes { + if node.amount == 0 { + zero_lamports_count = zero_lamports_count.checked_add(1).unwrap(); + continue; + } + + // make sure not previously claimed + match rpc_client.get_account(&node.claim_status_pubkey).await { + Ok(_) => { + debug!("claim status account already exists, skipping pubkey {:?}.", node.claim_status_pubkey); + continue; + } + // expected to not find ClaimStatus account, don't skip + Err(client_error::ClientError { kind: client_error::ClientErrorKind::RpcError(RpcError::ForUser(err)), .. }) if err.starts_with("AccountNotFound") => {} + Err(err) => panic!("Unexpected RPC Error: {}", err), + } + + let current_balance = rpc_client.get_balance(&node.claimant).await.expect("Failed to get balance"); + // some older accounts can be rent-paying + // any new transfers will need to make the account rent-exempt (runtime enforced) + if current_balance.checked_add(node.amount).unwrap() < stake_acct_min_rent { + warn!("Current balance + tip claim amount of {} is less than required rent-exempt of {} for pubkey: {}. Skipping.", + current_balance.checked_add(node.amount).unwrap(), stake_acct_min_rent, node.claimant); + below_min_rent_count = below_min_rent_count.checked_add(1).unwrap(); + continue; + } + instructions.push(Instruction { + program_id: *tip_distribution_program_id, + data: tip_distribution::instruction::Claim { + proof: node.proof.unwrap(), + amount: node.amount, + bump: node.claim_status_bump, + }.data(), + accounts: tip_distribution::accounts::Claim { + config: tip_distribution_config, + tip_distribution_account: tree.tip_distribution_account, + claimant: node.claimant, + claim_status: node.claim_status_pubkey, + payer: keypair.pubkey(), + system_program: system_program::id(), + }.to_account_metas(None), + }); + } + } + + let transactions = instructions.into_iter().map(|ix|{ + Transaction::new_with_payer( + &[ix], + Some(&keypair.pubkey()), + ) + }).collect::>(); + + info!("Sending {} tip claim transactions. {} tried sending zero lamports, {} would be below minimum rent", + &transactions.len(), zero_lamports_count, below_min_rent_count); + + let failed_transactions = sign_and_send_transactions_with_retries(&keypair, &rpc_client, transactions, MAX_RETRY_DURATION).await; + if !failed_transactions.is_empty() { + panic!("failed to send {} transactions", failed_transactions.len()); + } + }); + + Ok(()) +} diff --git a/tip-distributor/src/lib.rs b/tip-distributor/src/lib.rs new file mode 100644 index 0000000000..6dd0adbc58 --- /dev/null +++ b/tip-distributor/src/lib.rs @@ -0,0 +1,890 @@ +pub mod claim_mev_workflow; +pub mod merkle_root_generator_workflow; +pub mod merkle_root_upload_workflow; +pub mod reclaim_rent_workflow; +pub mod stake_meta_generator_workflow; + +use { + crate::{ + merkle_root_generator_workflow::MerkleRootGeneratorError, + stake_meta_generator_workflow::StakeMetaGeneratorError::CheckedMathError, + }, + anchor_lang::Id, + log::*, + serde::{de::DeserializeOwned, Deserialize, Serialize}, + solana_client::{ + client_error::{ClientError, ClientErrorKind}, + nonblocking::rpc_client::RpcClient, + rpc_client::RpcClient as SyncRpcClient, + rpc_request::RpcRequest, + }, + solana_merkle_tree::MerkleTree, + solana_metrics::{datapoint_error, datapoint_warn}, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + clock::Slot, + hash::{Hash, Hasher}, + pubkey::Pubkey, + signature::{Keypair, Signature}, + stake_history::Epoch, + transaction::{Transaction, TransactionError::AlreadyProcessed}, + }, + std::{ + collections::HashMap, + fs::File, + io::BufReader, + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, + }, + tip_distribution::{ + program::TipDistribution, + state::{ClaimStatus, TipDistributionAccount}, + }, + tip_payment::{ + Config, CONFIG_ACCOUNT_SEED, TIP_ACCOUNT_SEED_0, TIP_ACCOUNT_SEED_1, TIP_ACCOUNT_SEED_2, + TIP_ACCOUNT_SEED_3, TIP_ACCOUNT_SEED_4, TIP_ACCOUNT_SEED_5, TIP_ACCOUNT_SEED_6, + TIP_ACCOUNT_SEED_7, + }, + tokio::time::sleep, +}; + +#[derive(Deserialize, Serialize, Debug)] +pub struct GeneratedMerkleTreeCollection { + pub generated_merkle_trees: Vec, + pub bank_hash: String, + pub epoch: Epoch, + pub slot: Slot, +} + +#[derive(Eq, Debug, Hash, PartialEq, Deserialize, Serialize)] +pub struct GeneratedMerkleTree { + #[serde(with = "pubkey_string_conversion")] + pub tip_distribution_account: Pubkey, + #[serde(with = "pubkey_string_conversion")] + pub merkle_root_upload_authority: Pubkey, + pub merkle_root: Hash, + pub tree_nodes: Vec, + pub max_total_claim: u64, + pub max_num_nodes: u64, +} + +pub struct TipPaymentPubkeys { + config_pda: Pubkey, + tip_pdas: Vec, +} + +fn emit_inconsistent_tree_node_amount_dp( + tree_nodes: &[TreeNode], + tip_distribution_account: &Pubkey, + rpc_client: &SyncRpcClient, +) { + let actual_claims: u64 = tree_nodes.iter().map(|t| t.amount).sum(); + let tda = rpc_client.get_account(tip_distribution_account).unwrap(); + let min_rent = rpc_client + .get_minimum_balance_for_rent_exemption(tda.data.len()) + .unwrap(); + + let expected_claims = tda.lamports.checked_sub(min_rent).unwrap(); + if actual_claims == expected_claims { + return; + } + + if actual_claims > expected_claims { + datapoint_error!( + "tip-distributor", + ( + "actual_claims_exceeded", + format!("tip_distribution_account={tip_distribution_account},actual_claims={actual_claims}, expected_claims={expected_claims}"), + String + ), + ); + } else { + datapoint_warn!( + "tip-distributor", + ( + "actual_claims_below", + format!("tip_distribution_account={tip_distribution_account},actual_claims={actual_claims}, expected_claims={expected_claims}"), + String + ), + ); + } +} + +impl GeneratedMerkleTreeCollection { + pub fn new_from_stake_meta_collection( + stake_meta_coll: StakeMetaCollection, + maybe_rpc_client: Option, + ) -> Result { + let generated_merkle_trees = stake_meta_coll + .stake_metas + .into_iter() + .filter(|stake_meta| stake_meta.maybe_tip_distribution_meta.is_some()) + .filter_map(|stake_meta| { + let mut tree_nodes = match TreeNode::vec_from_stake_meta(&stake_meta) { + Err(e) => return Some(Err(e)), + Ok(maybe_tree_nodes) => maybe_tree_nodes, + }?; + + if let Some(rpc_client) = &maybe_rpc_client { + if let Some(tda) = stake_meta.maybe_tip_distribution_meta.as_ref() { + emit_inconsistent_tree_node_amount_dp( + &tree_nodes[..], + &tda.tip_distribution_pubkey, + rpc_client, + ); + } + } + + let hashed_nodes: Vec<[u8; 32]> = + tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + + let tip_distribution_meta = stake_meta.maybe_tip_distribution_meta.unwrap(); + + let merkle_tree = MerkleTree::new(&hashed_nodes[..], true); + let max_num_nodes = tree_nodes.len() as u64; + + for (i, tree_node) in tree_nodes.iter_mut().enumerate() { + tree_node.proof = Some(get_proof(&merkle_tree, i)); + } + + Some(Ok(GeneratedMerkleTree { + max_num_nodes, + tip_distribution_account: tip_distribution_meta.tip_distribution_pubkey, + merkle_root_upload_authority: tip_distribution_meta + .merkle_root_upload_authority, + merkle_root: *merkle_tree.get_root().unwrap(), + tree_nodes, + max_total_claim: tip_distribution_meta.total_tips, + })) + }) + .collect::, MerkleRootGeneratorError>>()?; + + Ok(GeneratedMerkleTreeCollection { + generated_merkle_trees, + bank_hash: stake_meta_coll.bank_hash, + epoch: stake_meta_coll.epoch, + slot: stake_meta_coll.slot, + }) + } +} + +pub fn get_proof(merkle_tree: &MerkleTree, i: usize) -> Vec<[u8; 32]> { + let mut proof = Vec::new(); + let path = merkle_tree.find_path(i).expect("path to index"); + for branch in path.get_proof_entries() { + if let Some(hash) = branch.get_left_sibling() { + proof.push(hash.to_bytes()); + } else if let Some(hash) = branch.get_right_sibling() { + proof.push(hash.to_bytes()); + } else { + panic!("expected some hash at each level of the tree"); + } + } + proof +} + +fn derive_tip_payment_pubkeys(program_id: &Pubkey) -> TipPaymentPubkeys { + let config_pda = Pubkey::find_program_address(&[CONFIG_ACCOUNT_SEED], program_id).0; + let tip_pda_0 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_0], program_id).0; + let tip_pda_1 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_1], program_id).0; + let tip_pda_2 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_2], program_id).0; + let tip_pda_3 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_3], program_id).0; + let tip_pda_4 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_4], program_id).0; + let tip_pda_5 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_5], program_id).0; + let tip_pda_6 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_6], program_id).0; + let tip_pda_7 = Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_7], program_id).0; + + TipPaymentPubkeys { + config_pda, + tip_pdas: vec![ + tip_pda_0, tip_pda_1, tip_pda_2, tip_pda_3, tip_pda_4, tip_pda_5, tip_pda_6, tip_pda_7, + ], + } +} + +#[derive(Clone, Eq, Debug, Hash, PartialEq, Deserialize, Serialize)] +pub struct TreeNode { + /// The stake account entitled to redeem. + #[serde(with = "pubkey_string_conversion")] + pub claimant: Pubkey, + + /// Pubkey of the ClaimStatus PDA account, this account should be closed to reclaim rent. + #[serde(with = "pubkey_string_conversion")] + pub claim_status_pubkey: Pubkey, + + /// Bump of the ClaimStatus PDA account + pub claim_status_bump: u8, + + #[serde(with = "pubkey_string_conversion")] + pub staker_pubkey: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub withdrawer_pubkey: Pubkey, + + /// The amount this account is entitled to. + pub amount: u64, + + /// The proof associated with this TreeNode + pub proof: Option>, +} + +impl TreeNode { + fn vec_from_stake_meta( + stake_meta: &StakeMeta, + ) -> Result>, MerkleRootGeneratorError> { + if let Some(tip_distribution_meta) = stake_meta.maybe_tip_distribution_meta.as_ref() { + let validator_amount = (tip_distribution_meta.total_tips as u128) + .checked_mul(tip_distribution_meta.validator_fee_bps as u128) + .unwrap() + .checked_div(10_000) + .unwrap() as u64; + let (claim_status_pubkey, claim_status_bump) = Pubkey::find_program_address( + &[ + ClaimStatus::SEED, + &stake_meta.validator_vote_account.to_bytes(), + &tip_distribution_meta.tip_distribution_pubkey.to_bytes(), + ], + &TipDistribution::id(), + ); + let mut tree_nodes = vec![TreeNode { + claimant: stake_meta.validator_vote_account, + claim_status_pubkey, + claim_status_bump, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: validator_amount, + proof: None, + }]; + + let remaining_total_rewards = tip_distribution_meta + .total_tips + .checked_sub(validator_amount) + .unwrap() as u128; + + let total_delegated = stake_meta.total_delegated as u128; + tree_nodes.extend( + stake_meta + .delegations + .iter() + .map(|delegation| { + let amount_delegated = delegation.lamports_delegated as u128; + let reward_amount = (amount_delegated.checked_mul(remaining_total_rewards)) + .unwrap() + .checked_div(total_delegated) + .unwrap(); + let (claim_status_pubkey, claim_status_bump) = Pubkey::find_program_address( + &[ + ClaimStatus::SEED, + &delegation.stake_account_pubkey.to_bytes(), + &tip_distribution_meta.tip_distribution_pubkey.to_bytes(), + ], + &TipDistribution::id(), + ); + Ok(TreeNode { + claimant: delegation.stake_account_pubkey, + claim_status_pubkey, + claim_status_bump, + staker_pubkey: delegation.staker_pubkey, + withdrawer_pubkey: delegation.withdrawer_pubkey, + amount: reward_amount as u64, + proof: None, + }) + }) + .collect::, MerkleRootGeneratorError>>()?, + ); + + Ok(Some(tree_nodes)) + } else { + Ok(None) + } + } + + fn hash(&self) -> Hash { + let mut hasher = Hasher::default(); + hasher.hash(self.claimant.as_ref()); + hasher.hash(self.amount.to_le_bytes().as_ref()); + hasher.result() + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct StakeMetaCollection { + /// List of [StakeMeta]. + pub stake_metas: Vec, + + /// base58 encoded tip-distribution program id. + #[serde(with = "pubkey_string_conversion")] + pub tip_distribution_program_id: Pubkey, + + /// Base58 encoded bank hash this object was generated at. + pub bank_hash: String, + + /// Epoch for which this object was generated for. + pub epoch: Epoch, + + /// Slot at which this object was generated. + pub slot: Slot, +} + +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct StakeMeta { + #[serde(with = "pubkey_string_conversion")] + pub validator_vote_account: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub validator_node_pubkey: Pubkey, + + /// The validator's tip-distribution meta if it exists. + pub maybe_tip_distribution_meta: Option, + + /// Delegations to this validator. + pub delegations: Vec, + + /// The total amount of delegations to the validator. + pub total_delegated: u64, + + /// The validator's delegation commission rate as a percentage between 0-100. + pub commission: u8, +} + +impl Ord for StakeMeta { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.validator_vote_account + .cmp(&other.validator_vote_account) + } +} + +impl PartialOrd for StakeMeta { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct TipDistributionMeta { + #[serde(with = "pubkey_string_conversion")] + pub merkle_root_upload_authority: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub tip_distribution_pubkey: Pubkey, + + /// The validator's total tips in the [TipDistributionAccount]. + pub total_tips: u64, + + /// The validator's cut of tips from [TipDistributionAccount], calculated from the on-chain + /// commission fee bps. + pub validator_fee_bps: u16, +} + +impl TipDistributionMeta { + fn from_tda_wrapper( + tda_wrapper: TipDistributionAccountWrapper, + // The amount that will be left remaining in the tda to maintain rent exemption status. + rent_exempt_amount: u64, + ) -> Result { + Ok(TipDistributionMeta { + tip_distribution_pubkey: tda_wrapper.tip_distribution_pubkey, + total_tips: tda_wrapper + .account_data + .lamports() + .checked_sub(rent_exempt_amount) + .ok_or(CheckedMathError)?, + validator_fee_bps: tda_wrapper + .tip_distribution_account + .validator_commission_bps, + merkle_root_upload_authority: tda_wrapper + .tip_distribution_account + .merkle_root_upload_authority, + }) + } +} + +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] +pub struct Delegation { + #[serde(with = "pubkey_string_conversion")] + pub stake_account_pubkey: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub staker_pubkey: Pubkey, + + #[serde(with = "pubkey_string_conversion")] + pub withdrawer_pubkey: Pubkey, + + /// Lamports delegated by the stake account + pub lamports_delegated: u64, +} + +impl Ord for Delegation { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + ( + self.stake_account_pubkey, + self.withdrawer_pubkey, + self.staker_pubkey, + self.lamports_delegated, + ) + .cmp(&( + other.stake_account_pubkey, + other.withdrawer_pubkey, + other.staker_pubkey, + other.lamports_delegated, + )) + } +} + +impl PartialOrd for Delegation { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +/// Convenience wrapper around [TipDistributionAccount] +pub struct TipDistributionAccountWrapper { + pub tip_distribution_account: TipDistributionAccount, + pub account_data: AccountSharedData, + pub tip_distribution_pubkey: Pubkey, +} + +// TODO: move to program's sdk +pub fn derive_tip_distribution_account_address( + tip_distribution_program_id: &Pubkey, + vote_pubkey: &Pubkey, + epoch: Epoch, +) -> (Pubkey, u8) { + Pubkey::find_program_address( + &[ + TipDistributionAccount::SEED, + vote_pubkey.to_bytes().as_ref(), + epoch.to_le_bytes().as_ref(), + ], + tip_distribution_program_id, + ) +} + +pub async fn sign_and_send_transactions_with_retries( + signer: &Keypair, + rpc_client: &RpcClient, + transactions: Vec, + max_retry_duration: Duration, +) -> HashMap { + use tokio::sync::Semaphore; + const MAX_CONCURRENT_RPC_CALLS: usize = 50; + let semaphore = Arc::new(Semaphore::new(MAX_CONCURRENT_RPC_CALLS)); + + let mut errors = HashMap::default(); + let mut blockhash = rpc_client + .get_latest_blockhash() + .await + .expect("fetch latest blockhash"); + + let mut signatures_to_transactions = transactions + .into_iter() + .map(|mut tx| { + tx.sign(&[signer], blockhash); + (tx.signatures[0], tx) + }) + .collect::>(); + + let start = Instant::now(); + while start.elapsed() < max_retry_duration && !signatures_to_transactions.is_empty() { + if start.elapsed() > Duration::from_secs(60) { + blockhash = rpc_client + .get_latest_blockhash() + .await + .expect("fetch latest blockhash"); + signatures_to_transactions + .iter_mut() + .for_each(|(_sig, tx)| { + *tx = Transaction::new_unsigned(tx.message.clone()); + tx.sign(&[signer], blockhash); + }); + } + + let futs = signatures_to_transactions.iter().map(|(sig, tx)| { + let semaphore = semaphore.clone(); + async move { + let permit = semaphore.clone().acquire_owned().await.unwrap(); + let res = match rpc_client.send_transaction(tx).await { + Ok(_sig) => { + info!("sent transaction: {_sig:?}"); + drop(permit); + sleep(Duration::from_secs(10)).await; + + let _permit = semaphore.acquire_owned().await.unwrap(); + match rpc_client.confirm_transaction(sig).await { + Ok(true) => Ok(()), + Ok(false) => Err(ClientError::new_with_request( + ClientErrorKind::Custom( + "transaction failed to confirm".to_string(), + ), + RpcRequest::SendTransaction, + )), + Err(e) => Err(e), + } + } + Err(e) => Err(e), + }; + + let res = res + .err() + .map(|e| { + if let ClientErrorKind::TransactionError(AlreadyProcessed) = e.kind { + Ok(()) + } else { + error!("error sending transaction {sig:?} error: {e:?}"); + Err(e) + } + }) + .unwrap_or(Ok(())); + + (*sig, res) + } + }); + + errors = futures::future::join_all(futs) + .await + .into_iter() + .filter(|(sig, result)| { + if result.is_err() { + true + } else { + let _ = signatures_to_transactions.remove(sig); + false + } + }) + .map(|(sig, result)| { + let e = result.err().unwrap(); + warn!("error sending transaction: [error={e}, signature={sig}]"); + (sig, e) + }) + .collect::>(); + } + + errors +} + +mod pubkey_string_conversion { + use { + serde::{self, Deserialize, Deserializer, Serializer}, + solana_sdk::pubkey::Pubkey, + std::str::FromStr, + }; + + pub(crate) fn serialize(pubkey: &Pubkey, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&pubkey.to_string()) + } + + pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Pubkey::from_str(&s).map_err(serde::de::Error::custom) + } +} + +pub(crate) fn read_json_from_file(path: &PathBuf) -> serde_json::Result +where + T: DeserializeOwned, +{ + let file = File::open(path).unwrap(); + let reader = BufReader::new(file); + serde_json::from_reader(reader) +} + +#[cfg(test)] +mod tests { + use {super::*, tip_distribution::merkle_proof}; + + #[test] + fn test_merkle_tree_verify() { + // Create the merkle tree and proofs + let tda = Pubkey::new_unique(); + let (acct_0, acct_1) = (Pubkey::new_unique(), Pubkey::new_unique()); + let claim_statuses = &[(acct_0, tda), (acct_1, tda)] + .iter() + .map(|(claimant, tda)| { + Pubkey::find_program_address( + &[ClaimStatus::SEED, &claimant.to_bytes(), &tda.to_bytes()], + &TipDistribution::id(), + ) + }) + .collect::>(); + let tree_nodes = vec![ + TreeNode { + claimant: acct_0, + claim_status_pubkey: claim_statuses[0].0, + claim_status_bump: claim_statuses[0].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 151_507, + proof: None, + }, + TreeNode { + claimant: acct_1, + claim_status_pubkey: claim_statuses[1].0, + claim_status_bump: claim_statuses[1].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 176_624, + proof: None, + }, + ]; + + // First the nodes are hashed and merkle tree constructed + let hashed_nodes: Vec<[u8; 32]> = tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + let mk = MerkleTree::new(&hashed_nodes[..], true); + let root = mk.get_root().expect("to have valid root").to_bytes(); + + // verify first node + let node = solana_program::hash::hashv(&[&[0u8], &hashed_nodes[0]]); + let proof = get_proof(&mk, 0); + assert!(merkle_proof::verify(proof, root, node.to_bytes())); + + // verify second node + let node = solana_program::hash::hashv(&[&[0u8], &hashed_nodes[1]]); + let proof = get_proof(&mk, 1); + assert!(merkle_proof::verify(proof, root, node.to_bytes())); + } + + #[test] + fn test_new_from_stake_meta_collection_happy_path() { + let merkle_root_upload_authority = Pubkey::new_unique(); + + let (tda_0, tda_1) = (Pubkey::new_unique(), Pubkey::new_unique()); + + let stake_account_0 = Pubkey::new_unique(); + let stake_account_1 = Pubkey::new_unique(); + let stake_account_2 = Pubkey::new_unique(); + let stake_account_3 = Pubkey::new_unique(); + + let staker_account_0 = Pubkey::new_unique(); + let staker_account_1 = Pubkey::new_unique(); + let staker_account_2 = Pubkey::new_unique(); + let staker_account_3 = Pubkey::new_unique(); + + let validator_vote_account_0 = Pubkey::new_unique(); + let validator_vote_account_1 = Pubkey::new_unique(); + + let validator_id_0 = Pubkey::new_unique(); + let validator_id_1 = Pubkey::new_unique(); + + let stake_meta_collection = StakeMetaCollection { + stake_metas: vec![ + StakeMeta { + validator_vote_account: validator_vote_account_0, + validator_node_pubkey: validator_id_0, + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_0, + total_tips: 1_900_122_111_000, + validator_fee_bps: 100, + }), + delegations: vec![ + Delegation { + stake_account_pubkey: stake_account_0, + staker_pubkey: staker_account_0, + withdrawer_pubkey: staker_account_0, + lamports_delegated: 123_999_123_555, + }, + Delegation { + stake_account_pubkey: stake_account_1, + staker_pubkey: staker_account_1, + withdrawer_pubkey: staker_account_1, + lamports_delegated: 144_555_444_556, + }, + ], + total_delegated: 1_555_123_000_333_454_000, + commission: 100, + }, + StakeMeta { + validator_vote_account: validator_vote_account_1, + validator_node_pubkey: validator_id_1, + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_1, + total_tips: 1_900_122_111_333, + validator_fee_bps: 200, + }), + delegations: vec![ + Delegation { + stake_account_pubkey: stake_account_2, + staker_pubkey: staker_account_2, + withdrawer_pubkey: staker_account_2, + lamports_delegated: 224_555_444, + }, + Delegation { + stake_account_pubkey: stake_account_3, + staker_pubkey: staker_account_3, + withdrawer_pubkey: staker_account_3, + lamports_delegated: 700_888_944_555, + }, + ], + total_delegated: 2_565_318_909_444_123, + commission: 10, + }, + ], + tip_distribution_program_id: Pubkey::new_unique(), + bank_hash: Hash::new_unique().to_string(), + epoch: 100, + slot: 2_000_000, + }; + + let merkle_tree_collection = GeneratedMerkleTreeCollection::new_from_stake_meta_collection( + stake_meta_collection.clone(), + None, + ) + .unwrap(); + + assert_eq!(stake_meta_collection.epoch, merkle_tree_collection.epoch); + assert_eq!( + stake_meta_collection.bank_hash, + merkle_tree_collection.bank_hash + ); + assert_eq!(stake_meta_collection.slot, merkle_tree_collection.slot); + assert_eq!( + stake_meta_collection.stake_metas.len(), + merkle_tree_collection.generated_merkle_trees.len() + ); + let claim_statuses = &[ + (validator_vote_account_0, tda_0), + (stake_account_0, tda_0), + (stake_account_1, tda_0), + (validator_vote_account_1, tda_1), + (stake_account_2, tda_1), + (stake_account_3, tda_1), + ] + .iter() + .map(|(claimant, tda)| { + Pubkey::find_program_address( + &[ClaimStatus::SEED, &claimant.to_bytes(), &tda.to_bytes()], + &TipDistribution::id(), + ) + }) + .collect::>(); + let tree_nodes = vec![ + TreeNode { + claimant: validator_vote_account_0, + claim_status_pubkey: claim_statuses[0].0, + claim_status_bump: claim_statuses[0].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 19_001_221_110, + proof: None, + }, + TreeNode { + claimant: stake_account_0, + claim_status_pubkey: claim_statuses[1].0, + claim_status_bump: claim_statuses[1].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 149_992, + proof: None, + }, + TreeNode { + claimant: stake_account_1, + claim_status_pubkey: claim_statuses[2].0, + claim_status_bump: claim_statuses[2].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 174_858, + proof: None, + }, + ]; + let hashed_nodes: Vec<[u8; 32]> = tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + let merkle_tree = MerkleTree::new(&hashed_nodes[..], true); + let gmt_0 = GeneratedMerkleTree { + tip_distribution_account: tda_0, + merkle_root_upload_authority, + merkle_root: *merkle_tree.get_root().unwrap(), + tree_nodes, + max_total_claim: stake_meta_collection.stake_metas[0] + .clone() + .maybe_tip_distribution_meta + .unwrap() + .total_tips, + max_num_nodes: 3, + }; + + let tree_nodes = vec![ + TreeNode { + claimant: validator_vote_account_1, + claim_status_pubkey: claim_statuses[3].0, + claim_status_bump: claim_statuses[3].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 38_002_442_226, + proof: None, + }, + TreeNode { + claimant: stake_account_2, + claim_status_pubkey: claim_statuses[4].0, + claim_status_bump: claim_statuses[4].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 163_000, + proof: None, + }, + TreeNode { + claimant: stake_account_3, + claim_status_pubkey: claim_statuses[5].0, + claim_status_bump: claim_statuses[5].1, + staker_pubkey: Pubkey::default(), + withdrawer_pubkey: Pubkey::default(), + amount: 508_762_900, + proof: None, + }, + ]; + let hashed_nodes: Vec<[u8; 32]> = tree_nodes.iter().map(|n| n.hash().to_bytes()).collect(); + let merkle_tree = MerkleTree::new(&hashed_nodes[..], true); + let gmt_1 = GeneratedMerkleTree { + tip_distribution_account: tda_1, + merkle_root_upload_authority, + merkle_root: *merkle_tree.get_root().unwrap(), + tree_nodes, + max_total_claim: stake_meta_collection.stake_metas[1] + .clone() + .maybe_tip_distribution_meta + .unwrap() + .total_tips, + max_num_nodes: 3, + }; + + let expected_generated_merkle_trees = vec![gmt_0, gmt_1]; + let actual_generated_merkle_trees = merkle_tree_collection.generated_merkle_trees; + + expected_generated_merkle_trees + .iter() + .for_each(|expected_gmt| { + let actual_gmt = actual_generated_merkle_trees + .iter() + .find(|gmt| { + gmt.tip_distribution_account == expected_gmt.tip_distribution_account + }) + .unwrap(); + + assert_eq!(expected_gmt.max_num_nodes, actual_gmt.max_num_nodes); + assert_eq!(expected_gmt.max_total_claim, actual_gmt.max_total_claim); + assert_eq!( + expected_gmt.tip_distribution_account, + actual_gmt.tip_distribution_account + ); + assert_eq!(expected_gmt.tree_nodes.len(), actual_gmt.tree_nodes.len()); + expected_gmt + .tree_nodes + .iter() + .for_each(|expected_tree_node| { + let actual_tree_node = actual_gmt + .tree_nodes + .iter() + .find(|tree_node| tree_node.claimant == expected_tree_node.claimant) + .unwrap(); + assert_eq!(expected_tree_node.amount, actual_tree_node.amount); + }); + assert_eq!(expected_gmt.merkle_root, actual_gmt.merkle_root); + }); + } +} diff --git a/tip-distributor/src/merkle_root_generator_workflow.rs b/tip-distributor/src/merkle_root_generator_workflow.rs new file mode 100644 index 0000000000..7d9c51ee99 --- /dev/null +++ b/tip-distributor/src/merkle_root_generator_workflow.rs @@ -0,0 +1,54 @@ +use { + crate::{read_json_from_file, GeneratedMerkleTreeCollection, StakeMetaCollection}, + log::*, + solana_client::rpc_client::RpcClient, + std::{ + fmt::Debug, + fs::File, + io::{BufWriter, Write}, + path::PathBuf, + }, + thiserror::Error, +}; + +#[derive(Error, Debug)] +pub enum MerkleRootGeneratorError { + #[error(transparent)] + IoError(#[from] std::io::Error), + + #[error(transparent)] + RpcError(#[from] Box), + + #[error(transparent)] + SerdeJsonError(#[from] serde_json::Error), +} + +pub fn generate_merkle_root( + stake_meta_coll_path: &PathBuf, + out_path: &PathBuf, + rpc_url: &str, +) -> Result<(), MerkleRootGeneratorError> { + let stake_meta_coll: StakeMetaCollection = read_json_from_file(stake_meta_coll_path)?; + + let rpc_client = RpcClient::new(rpc_url); + let merkle_tree_coll = GeneratedMerkleTreeCollection::new_from_stake_meta_collection( + stake_meta_coll, + Some(rpc_client), + )?; + + write_to_json_file(&merkle_tree_coll, out_path)?; + Ok(()) +} + +fn write_to_json_file( + merkle_tree_coll: &GeneratedMerkleTreeCollection, + file_path: &PathBuf, +) -> Result<(), MerkleRootGeneratorError> { + let file = File::create(file_path)?; + let mut writer = BufWriter::new(file); + let json = serde_json::to_string_pretty(&merkle_tree_coll).unwrap(); + let _ = writer.write_all(json.as_bytes())?; + writer.flush()?; + + Ok(()) +} diff --git a/tip-distributor/src/merkle_root_upload_workflow.rs b/tip-distributor/src/merkle_root_upload_workflow.rs new file mode 100644 index 0000000000..14ce93b5cc --- /dev/null +++ b/tip-distributor/src/merkle_root_upload_workflow.rs @@ -0,0 +1,134 @@ +use { + crate::{ + read_json_from_file, sign_and_send_transactions_with_retries, GeneratedMerkleTree, + GeneratedMerkleTreeCollection, + }, + anchor_lang::AccountDeserialize, + log::{error, info}, + solana_client::nonblocking::rpc_client::RpcClient, + solana_program::{ + fee_calculator::DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE, native_token::LAMPORTS_PER_SOL, + }, + solana_sdk::{ + commitment_config::CommitmentConfig, + pubkey::Pubkey, + signature::{read_keypair_file, Signer}, + transaction::Transaction, + }, + std::{path::PathBuf, time::Duration}, + thiserror::Error, + tip_distribution::{ + sdk::instruction::{upload_merkle_root_ix, UploadMerkleRootAccounts, UploadMerkleRootArgs}, + state::{Config, TipDistributionAccount}, + }, + tokio::runtime::Builder, +}; + +#[derive(Error, Debug)] +pub enum MerkleRootUploadError { + #[error(transparent)] + IoError(#[from] std::io::Error), + + #[error(transparent)] + JsonError(#[from] serde_json::Error), +} + +pub fn upload_merkle_root( + merkle_root_path: &PathBuf, + keypair_path: &PathBuf, + rpc_url: &str, + tip_distribution_program_id: &Pubkey, +) -> Result<(), MerkleRootUploadError> { + const MAX_RETRY_DURATION: Duration = Duration::from_secs(600); + + let merkle_tree: GeneratedMerkleTreeCollection = + read_json_from_file(merkle_root_path).expect("read GeneratedMerkleTreeCollection"); + let keypair = read_keypair_file(keypair_path).expect("read keypair file"); + + let tip_distribution_config = + Pubkey::find_program_address(&[Config::SEED], tip_distribution_program_id).0; + + let runtime = Builder::new_multi_thread() + .worker_threads(16) + .enable_all() + .build() + .expect("build runtime"); + + runtime.block_on(async move { + let rpc_client = + RpcClient::new_with_commitment(rpc_url.to_string(), CommitmentConfig::confirmed()); + let trees: Vec = merkle_tree + .generated_merkle_trees + .into_iter() + .filter(|tree| tree.merkle_root_upload_authority == keypair.pubkey()) + .collect(); + + info!("num trees to upload: {:?}", trees.len()); + + // heuristic to make sure we have enough funds to cover execution, assumes all trees need updating + { + let initial_balance = rpc_client.get_balance(&keypair.pubkey()).await.expect("failed to get balance"); + let desired_balance = (trees.len() as u64).checked_mul(DEFAULT_TARGET_LAMPORTS_PER_SIGNATURE).unwrap(); + if initial_balance < desired_balance { + let sol_to_deposit = desired_balance.checked_sub(initial_balance).unwrap().checked_add(LAMPORTS_PER_SOL).unwrap().checked_sub(1).unwrap().checked_div(LAMPORTS_PER_SOL).unwrap(); // rounds up to nearest sol + panic!("Expected to have at least {} lamports in {}, current balance is {} lamports, deposit {} SOL to continue.", + desired_balance, &keypair.pubkey(), initial_balance, sol_to_deposit) + } + } + let mut trees_needing_update: Vec = vec![]; + for tree in trees { + let account = rpc_client + .get_account(&tree.tip_distribution_account) + .await + .expect("fetch expect"); + + let mut data = account.data.as_slice(); + let fetched_tip_distribution_account = + TipDistributionAccount::try_deserialize(&mut data) + .expect("failed to deserialize tip_distribution_account state"); + + let needs_upload = match fetched_tip_distribution_account.merkle_root { + Some(merkle_root) => { + merkle_root.total_funds_claimed == 0 + && merkle_root.root != tree.merkle_root.to_bytes() + } + None => true, + }; + + if needs_upload { + trees_needing_update.push(tree); + } + } + + info!("num trees need uploading: {:?}", trees_needing_update.len()); + + let transactions: Vec = trees_needing_update + .iter() + .map(|tree| { + let ix = upload_merkle_root_ix( + *tip_distribution_program_id, + UploadMerkleRootArgs { + root: tree.merkle_root.to_bytes(), + max_total_claim: tree.max_total_claim, + max_num_nodes: tree.max_num_nodes, + }, + UploadMerkleRootAccounts { + config: tip_distribution_config, + merkle_root_upload_authority: keypair.pubkey(), + tip_distribution_account: tree.tip_distribution_account, + }, + ); + Transaction::new_with_payer( + &[ix], + Some(&keypair.pubkey()), + ) + }) + .collect(); + let failed_transactions = sign_and_send_transactions_with_retries(&keypair, &rpc_client, transactions, MAX_RETRY_DURATION).await; + if !failed_transactions.is_empty() { + panic!("failed to send {} transactions", failed_transactions.len()); + } + }); + + Ok(()) +} diff --git a/tip-distributor/src/reclaim_rent_workflow.rs b/tip-distributor/src/reclaim_rent_workflow.rs new file mode 100644 index 0000000000..4d39bf8d5a --- /dev/null +++ b/tip-distributor/src/reclaim_rent_workflow.rs @@ -0,0 +1,168 @@ +use { + crate::sign_and_send_transactions_with_retries, + anchor_lang::AccountDeserialize, + log::info, + solana_client::nonblocking::rpc_client::RpcClient, + solana_program::pubkey::Pubkey, + solana_sdk::{ + signature::{Keypair, Signer}, + transaction::Transaction, + }, + std::{ + error::Error, + time::{Duration, Instant}, + }, + tip_distribution::{ + sdk::{ + derive_config_account_address, + instruction::{ + close_claim_status_ix, close_tip_distribution_account_ix, CloseClaimStatusAccounts, + CloseClaimStatusArgs, CloseTipDistributionAccountArgs, + CloseTipDistributionAccounts, + }, + }, + state::{ClaimStatus, Config, TipDistributionAccount}, + }, +}; + +pub async fn reclaim_rent( + rpc_client: RpcClient, + tip_distribution_program_id: Pubkey, + signer: Keypair, + // Optionally reclaim TipDistributionAccount rents on behalf of validators. + should_reclaim_tdas: bool, +) -> Result<(), Box> { + info!("fetching program accounts..."); + let now = Instant::now(); + let accounts = rpc_client + .get_program_accounts(&tip_distribution_program_id) + .await?; + info!( + "get_program_accounts took {}ms and fetched {} accounts", + now.elapsed().as_millis(), + accounts.len() + ); + + info!("fetching current_epoch..."); + let current_epoch = rpc_client.get_epoch_info().await?.epoch; + info!("current_epoch: {current_epoch}"); + + info!("fetching config_account..."); + let now = Instant::now(); + let config_pubkey = derive_config_account_address(&tip_distribution_program_id).0; + let config_account = rpc_client.get_account(&config_pubkey).await?; + let config_account: Config = Config::try_deserialize(&mut config_account.data.as_slice())?; + info!("fetch config_account took {}ms", now.elapsed().as_millis()); + + info!("filtering for claim_status accounts"); + let claim_status_accounts: Vec<(Pubkey, ClaimStatus)> = accounts + .iter() + .filter_map(|(pubkey, account)| { + let claim_status = ClaimStatus::try_deserialize(&mut account.data.as_slice()).ok()?; + Some((*pubkey, claim_status)) + }) + .filter(|(_, claim_status): &(Pubkey, ClaimStatus)| { + // Only return claim statuses that we've paid for and ones that are expired to avoid transaction failures. + claim_status.claim_status_payer == signer.pubkey() + && current_epoch > claim_status.expires_at + }) + .collect::>(); + info!( + "{} claim_status accounts eligible for rent reclaim", + claim_status_accounts.len() + ); + + info!("fetching recent_blockhash"); + let now = Instant::now(); + let recent_blockhash = rpc_client.get_latest_blockhash().await?; + info!( + "fetch recent_blockhash took {}ms, hash={recent_blockhash:?}", + now.elapsed().as_millis() + ); + + info!("creating close_claim_status_account transactions"); + let now = Instant::now(); + let mut transactions = claim_status_accounts + .into_iter() + .map(|(claim_status_pubkey, claim_status)| { + close_claim_status_ix( + tip_distribution_program_id, + CloseClaimStatusArgs, + CloseClaimStatusAccounts { + config: config_pubkey, + claim_status: claim_status_pubkey, + claim_status_payer: claim_status.claim_status_payer, + }, + ) + }) + .collect::>() + .chunks(4) + .into_iter() + .map(|instructions| { + Transaction::new_signed_with_payer( + instructions, + Some(&signer.pubkey()), + &[&signer], + recent_blockhash, + ) + }) + .collect::>(); + + info!( + "create close_claim_status_account transactions took {}us", + now.elapsed().as_micros() + ); + + if should_reclaim_tdas { + let tip_distribution_accounts = accounts + .into_iter() + .filter_map(|(pubkey, account)| { + let tda = + TipDistributionAccount::try_deserialize(&mut account.data.as_slice()).ok()?; + Some((pubkey, tda)) + }) + .filter(|(_, tda): &(Pubkey, TipDistributionAccount)| current_epoch > tda.expires_at); + + info!("creating close_tip_distribution_account transactions"); + let now = Instant::now(); + let close_tda_txs = tip_distribution_accounts + .map( + |(tip_distribution_account, tda): (Pubkey, TipDistributionAccount)| { + close_tip_distribution_account_ix( + tip_distribution_program_id, + CloseTipDistributionAccountArgs { + _epoch: tda.epoch_created_at, + }, + CloseTipDistributionAccounts { + config: config_pubkey, + tip_distribution_account, + validator_vote_account: tda.validator_vote_account, + expired_funds_account: config_account.expired_funds_account, + signer: signer.pubkey(), + }, + ) + }, + ) + .collect::>() + .chunks(4) + .map(|instructions| Transaction::new_with_payer(instructions, Some(&signer.pubkey()))) + .collect::>(); + info!("create close_tip_distribution_account transactions took {}us, closing {} tip distribution accounts", now.elapsed().as_micros(), close_tda_txs.len()); + + transactions.extend(close_tda_txs); + } + + info!("sending {} transactions", transactions.len()); + let failed_txs = sign_and_send_transactions_with_retries( + &signer, + &rpc_client, + transactions, + Duration::from_secs(300), + ) + .await; + if !failed_txs.is_empty() { + panic!("failed to send {} transactions", failed_txs.len()); + } + + Ok(()) +} diff --git a/tip-distributor/src/stake_meta_generator_workflow.rs b/tip-distributor/src/stake_meta_generator_workflow.rs new file mode 100644 index 0000000000..35bfaedde3 --- /dev/null +++ b/tip-distributor/src/stake_meta_generator_workflow.rs @@ -0,0 +1,951 @@ +use { + crate::{ + derive_tip_distribution_account_address, derive_tip_payment_pubkeys, Config, StakeMeta, + StakeMetaCollection, TipDistributionAccount, TipDistributionAccountWrapper, + TipDistributionMeta, + }, + anchor_lang::AccountDeserialize, + itertools::Itertools, + log::*, + solana_client::client_error::ClientError, + solana_ledger::{ + bank_forks_utils, + blockstore::BlockstoreError, + blockstore_processor::{BlockstoreProcessorError, ProcessOptions}, + }, + solana_runtime::{ + bank::Bank, + hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + snapshot_config::SnapshotConfig, + stakes::StakeAccount, + vote_account::VoteAccount, + }, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + clock::Slot, + pubkey::Pubkey, + }, + std::{ + collections::HashMap, + fmt::{Debug, Display, Formatter}, + fs::File, + io::{BufWriter, Write}, + mem::size_of, + path::{Path, PathBuf}, + sync::Arc, + }, + thiserror::Error, +}; + +#[derive(Error, Debug)] +pub enum StakeMetaGeneratorError { + #[error(transparent)] + AnchorError(#[from] anchor_lang::error::Error), + + #[error(transparent)] + BlockstoreError(#[from] BlockstoreError), + + #[error(transparent)] + BlockstoreProcessorError(#[from] BlockstoreProcessorError), + + #[error(transparent)] + IoError(#[from] std::io::Error), + + CheckedMathError, + + #[error(transparent)] + RpcError(#[from] ClientError), + + #[error(transparent)] + SerdeJsonError(#[from] serde_json::Error), + + SnapshotSlotNotFound, +} + +impl Display for StakeMetaGeneratorError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Debug::fmt(&self, f) + } +} + +/// Runs the entire workflow of creating a bank from a snapshot to writing stake meta-data +/// to a JSON file. +pub fn generate_stake_meta( + ledger_path: &Path, + snapshot_slot: &Slot, + tip_distribution_program_id: &Pubkey, + out_path: &str, + tip_payment_program_id: &Pubkey, +) -> Result<(), StakeMetaGeneratorError> { + info!("Creating bank from ledger path..."); + let bank = create_bank_from_snapshot(ledger_path, snapshot_slot)?; + + info!("Generating stake_meta_collection object..."); + let stake_meta_coll = + generate_stake_meta_collection(&bank, tip_distribution_program_id, tip_payment_program_id)?; + + info!("Writing stake_meta_collection to JSON {}...", out_path); + write_to_json_file(&stake_meta_coll, out_path)?; + + Ok(()) +} + +fn create_bank_from_snapshot( + ledger_path: &Path, + snapshot_slot: &Slot, +) -> Result, StakeMetaGeneratorError> { + let genesis_config = open_genesis_config(ledger_path, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE); + let snapshot_config = SnapshotConfig { + full_snapshot_archive_interval_slots: Slot::MAX, + incremental_snapshot_archive_interval_slots: Slot::MAX, + full_snapshot_archives_dir: PathBuf::from(ledger_path), + incremental_snapshot_archives_dir: PathBuf::from(ledger_path), + bank_snapshots_dir: PathBuf::from(ledger_path), + ..SnapshotConfig::default() + }; + let (bank_forks, _snapshot_hashes) = bank_forks_utils::bank_forks_from_snapshot( + &genesis_config, + vec![PathBuf::from(ledger_path).join(Path::new("stake-meta.accounts"))], + None, + &snapshot_config, + &ProcessOptions::default(), + None, + ); + + let working_bank = bank_forks.read().unwrap().working_bank(); + assert_eq!( + working_bank.slot(), + *snapshot_slot, + "expected working bank slot {}, found {}", + snapshot_slot, + working_bank.slot() + ); + + Ok(working_bank) +} + +fn write_to_json_file( + stake_meta_coll: &StakeMetaCollection, + out_path: &str, +) -> Result<(), StakeMetaGeneratorError> { + let file = File::create(out_path)?; + let mut writer = BufWriter::new(file); + let json = serde_json::to_string_pretty(&stake_meta_coll).unwrap(); + let _ = writer.write_all(json.as_bytes())?; + writer.flush()?; + + Ok(()) +} + +/// Creates a collection of [StakeMeta]'s from the given bank. +pub fn generate_stake_meta_collection( + bank: &Arc, + tip_distribution_program_id: &Pubkey, + tip_payment_program_id: &Pubkey, +) -> Result { + assert!(bank.is_frozen()); + + let epoch_vote_accounts = bank.epoch_vote_accounts(bank.epoch()).unwrap_or_else(|| { + panic!( + "No epoch_vote_accounts found for slot {} at epoch {}", + bank.slot(), + bank.epoch() + ) + }); + + let l_stakes = bank.stakes_cache.stakes(); + let delegations = l_stakes.stake_delegations(); + + let voter_pubkey_to_delegations = group_delegations_by_voter_pubkey(delegations, bank); + + // the last leader in an epoch may not crank the tip program before the epoch is over, which + // would result in MEV rewards for epoch N not being cranked until epoch N + 1. This means that + // the account balance in the snapshot could be incorrect. + // We assume that the rewards sitting in the tip program PDAs are cranked out by the time all of + // the rewards are claimed. + let tip_accounts = derive_tip_payment_pubkeys(tip_payment_program_id); + let account = bank + .get_account(&tip_accounts.config_pda) + .expect("config pda exists"); + + let config = Config::try_deserialize(&mut account.data()).expect("deserializes configuration"); + + let bb_commission_pct: u64 = config.block_builder_commission_pct; + let tip_receiver: Pubkey = config.tip_receiver; + + // includes the block builder fee + let excess_tip_balances: u64 = tip_accounts + .tip_pdas + .iter() + .map(|pubkey| { + let tip_account = bank.get_account(pubkey).expect("tip account exists"); + tip_account + .lamports() + .checked_sub(bank.get_minimum_balance_for_rent_exemption(tip_account.data().len())) + .expect("tip balance underflow") + }) + .sum(); + // matches math in tip payment program + let block_builder_tips = excess_tip_balances + .checked_mul(bb_commission_pct) + .expect("block_builder_tips overflow") + .checked_div(100) + .expect("block_builder_tips division error"); + let tip_receiver_fee = excess_tip_balances + .checked_sub(block_builder_tips) + .expect("tip_receiver_fee doesnt underflow"); + + let vote_pk_and_maybe_tdas: Vec<( + (Pubkey, &VoteAccount), + Option, + )> = epoch_vote_accounts + .iter() + .map(|(vote_pubkey, (_total_stake, vote_account))| { + let tip_distribution_pubkey = derive_tip_distribution_account_address( + tip_distribution_program_id, + vote_pubkey, + bank.epoch(), + ) + .0; + let tda = if let Some(mut account_data) = bank.get_account(&tip_distribution_pubkey) { + // TDAs may be funded with lamports and therefore exist in the bank, but would fail the deserialization step + // if the buffer is yet to be allocated thru the init call to the program. + if let Ok(tip_distribution_account) = + TipDistributionAccount::try_deserialize(&mut account_data.data()) + { + // this snapshot might have tips that weren't claimed by the time the epoch is over + // assume that it will eventually be cranked and credit the excess to this account + if tip_distribution_pubkey == tip_receiver { + account_data.set_lamports( + account_data + .lamports() + .checked_add(tip_receiver_fee) + .expect("tip overflow"), + ); + } + Some(TipDistributionAccountWrapper { + tip_distribution_account, + account_data, + tip_distribution_pubkey, + }) + } else { + None + } + } else { + None + }; + Ok(((*vote_pubkey, vote_account), tda)) + }) + .collect::>()?; + + let mut stake_metas = vec![]; + for ((vote_pubkey, vote_account), maybe_tda) in vote_pk_and_maybe_tdas { + if let Some(mut delegations) = voter_pubkey_to_delegations.get(&vote_pubkey).cloned() { + let total_delegated = delegations.iter().fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }); + + let maybe_tip_distribution_meta = if let Some(tda) = maybe_tda { + let actual_len = tda.account_data.data().len(); + let expected_len: usize = + 8_usize.saturating_add(size_of::()); + if actual_len != expected_len { + warn!("len mismatch actual={actual_len}, expected={expected_len}"); + } + let rent_exempt_amount = + bank.get_minimum_balance_for_rent_exemption(tda.account_data.data().len()); + + Some(TipDistributionMeta::from_tda_wrapper( + tda, + rent_exempt_amount, + )?) + } else { + None + }; + + let vote_state = vote_account.vote_state().as_ref().unwrap(); + delegations.sort(); + stake_metas.push(StakeMeta { + maybe_tip_distribution_meta, + validator_node_pubkey: vote_state.node_pubkey, + validator_vote_account: vote_pubkey, + delegations, + total_delegated, + commission: vote_state.commission, + }); + } else { + warn!( + "voter_pubkey not found in voter_pubkey_to_delegations map [validator_vote_pubkey={}]", + vote_pubkey + ); + } + } + stake_metas.sort(); + + Ok(StakeMetaCollection { + stake_metas, + tip_distribution_program_id: *tip_distribution_program_id, + bank_hash: bank.hash().to_string(), + epoch: bank.epoch(), + slot: bank.slot(), + }) +} + +/// Given an [EpochStakes] object, return delegations grouped by voter_pubkey (validator delegated to). +fn group_delegations_by_voter_pubkey( + delegations: &im::HashMap, + bank: &Bank, +) -> HashMap> { + delegations + .into_iter() + .filter(|(_stake_pubkey, stake_account)| { + stake_account.delegation().stake(bank.epoch(), None) > 0 + }) + .into_group_map_by(|(_stake_pubkey, stake_account)| stake_account.delegation().voter_pubkey) + .into_iter() + .map(|(voter_pubkey, group)| { + ( + voter_pubkey, + group + .into_iter() + .map(|(stake_pubkey, stake_account)| crate::Delegation { + stake_account_pubkey: *stake_pubkey, + staker_pubkey: stake_account + .stake_state() + .authorized() + .map(|a| a.staker) + .unwrap_or_default(), + withdrawer_pubkey: stake_account + .stake_state() + .authorized() + .map(|a| a.withdrawer) + .unwrap_or_default(), + lamports_delegated: stake_account.delegation().stake, + }) + .collect::>(), + ) + }) + .collect() +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::derive_tip_distribution_account_address, + anchor_lang::AccountSerialize, + solana_runtime::genesis_utils::{ + create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, + }, + solana_sdk::{ + self, + account::{from_account, AccountSharedData}, + message::Message, + signature::{Keypair, Signer}, + stake::{ + self, + state::{Authorized, Lockup}, + }, + stake_history::StakeHistory, + sysvar, + transaction::Transaction, + }, + solana_stake_program::stake_state, + tip_distribution::state::TipDistributionAccount, + tip_payment::{ + InitBumps, TipPaymentAccount, CONFIG_ACCOUNT_SEED, TIP_ACCOUNT_SEED_0, + TIP_ACCOUNT_SEED_1, TIP_ACCOUNT_SEED_2, TIP_ACCOUNT_SEED_3, TIP_ACCOUNT_SEED_4, + TIP_ACCOUNT_SEED_5, TIP_ACCOUNT_SEED_6, TIP_ACCOUNT_SEED_7, + }, + }; + + #[test] + fn test_generate_stake_meta_collection_happy_path() { + /* 1. Create a Bank seeded with some validator stake accounts */ + let validator_keypairs_0 = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs_1 = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs_2 = ValidatorVoteKeypairs::new_rand(); + let validator_keypairs = vec![ + &validator_keypairs_0, + &validator_keypairs_1, + &validator_keypairs_2, + ]; + const INITIAL_VALIDATOR_STAKES: u64 = 10_000; + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![INITIAL_VALIDATOR_STAKES; 3], + ); + + let bank = Bank::new_for_tests(&genesis_config); + + /* 2. Seed the Bank with [TipDistributionAccount]'s */ + let merkle_root_upload_authority = Pubkey::new_unique(); + let tip_distribution_program_id = Pubkey::new_unique(); + let tip_payment_program_id = Pubkey::new_unique(); + + let delegator_0 = Keypair::new(); + let delegator_1 = Keypair::new(); + let delegator_2 = Keypair::new(); + let delegator_3 = Keypair::new(); + let delegator_4 = Keypair::new(); + + let delegator_0_pk = delegator_0.pubkey(); + let delegator_1_pk = delegator_1.pubkey(); + let delegator_2_pk = delegator_2.pubkey(); + let delegator_3_pk = delegator_3.pubkey(); + let delegator_4_pk = delegator_4.pubkey(); + + let d_0_data = AccountSharedData::new( + 300_000_000_000_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_1_data = AccountSharedData::new( + 100_000_203_000_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_2_data = AccountSharedData::new( + 100_000_235_899_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_3_data = AccountSharedData::new( + 200_000_000_000_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + let d_4_data = AccountSharedData::new( + 100_000_000_777_000 * 10, + 0, + &solana_sdk::system_program::id(), + ); + + bank.store_account(&delegator_0_pk, &d_0_data); + bank.store_account(&delegator_1_pk, &d_1_data); + bank.store_account(&delegator_2_pk, &d_2_data); + bank.store_account(&delegator_3_pk, &d_3_data); + bank.store_account(&delegator_4_pk, &d_4_data); + + /* 3. Delegate some stake to the initial set of validators */ + let mut validator_0_delegations = vec![crate::Delegation { + stake_account_pubkey: validator_keypairs_0.stake_keypair.pubkey(), + staker_pubkey: validator_keypairs_0.stake_keypair.pubkey(), + withdrawer_pubkey: validator_keypairs_0.stake_keypair.pubkey(), + lamports_delegated: INITIAL_VALIDATOR_STAKES, + }]; + let stake_account = delegate_stake_helper( + &bank, + &delegator_0, + &validator_keypairs_0.vote_keypair.pubkey(), + 30_000_000_000, + ); + validator_0_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_0.pubkey(), + withdrawer_pubkey: delegator_0.pubkey(), + lamports_delegated: 30_000_000_000, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_1, + &validator_keypairs_0.vote_keypair.pubkey(), + 3_000_000_000, + ); + validator_0_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_1.pubkey(), + withdrawer_pubkey: delegator_1.pubkey(), + lamports_delegated: 3_000_000_000, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_2, + &validator_keypairs_0.vote_keypair.pubkey(), + 33_000_000_000, + ); + validator_0_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_2.pubkey(), + withdrawer_pubkey: delegator_2.pubkey(), + lamports_delegated: 33_000_000_000, + }); + + let mut validator_1_delegations = vec![crate::Delegation { + stake_account_pubkey: validator_keypairs_1.stake_keypair.pubkey(), + staker_pubkey: validator_keypairs_1.stake_keypair.pubkey(), + withdrawer_pubkey: validator_keypairs_1.stake_keypair.pubkey(), + lamports_delegated: INITIAL_VALIDATOR_STAKES, + }]; + let stake_account = delegate_stake_helper( + &bank, + &delegator_3, + &validator_keypairs_1.vote_keypair.pubkey(), + 4_222_364_000, + ); + validator_1_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_3.pubkey(), + withdrawer_pubkey: delegator_3.pubkey(), + lamports_delegated: 4_222_364_000, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_4, + &validator_keypairs_1.vote_keypair.pubkey(), + 6_000_000_527, + ); + validator_1_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_4.pubkey(), + withdrawer_pubkey: delegator_4.pubkey(), + lamports_delegated: 6_000_000_527, + }); + + let mut validator_2_delegations = vec![crate::Delegation { + stake_account_pubkey: validator_keypairs_2.stake_keypair.pubkey(), + staker_pubkey: validator_keypairs_2.stake_keypair.pubkey(), + withdrawer_pubkey: validator_keypairs_2.stake_keypair.pubkey(), + lamports_delegated: INITIAL_VALIDATOR_STAKES, + }]; + let stake_account = delegate_stake_helper( + &bank, + &delegator_0, + &validator_keypairs_2.vote_keypair.pubkey(), + 1_300_123_156, + ); + validator_2_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_0.pubkey(), + withdrawer_pubkey: delegator_0.pubkey(), + lamports_delegated: 1_300_123_156, + }); + let stake_account = delegate_stake_helper( + &bank, + &delegator_4, + &validator_keypairs_2.vote_keypair.pubkey(), + 1_610_565_420, + ); + validator_2_delegations.push(crate::Delegation { + stake_account_pubkey: stake_account, + staker_pubkey: delegator_4.pubkey(), + withdrawer_pubkey: delegator_4.pubkey(), + lamports_delegated: 1_610_565_420, + }); + + /* 4. Run assertions */ + fn warmed_up(bank: &Bank, stake_pubkeys: &[Pubkey]) -> bool { + for stake_pubkey in stake_pubkeys { + let stake = + stake_state::stake_from(&bank.get_account(stake_pubkey).unwrap()).unwrap(); + + if stake.delegation.stake + != stake.stake( + bank.epoch(), + Some( + &from_account::( + &bank.get_account(&sysvar::stake_history::id()).unwrap(), + ) + .unwrap(), + ), + ) + { + return false; + } + } + + true + } + fn next_epoch(bank: &Arc) -> Arc { + bank.squash(); + + Arc::new(Bank::new_from_parent( + bank, + &Pubkey::default(), + bank.get_slots_in_epoch(bank.epoch()) + bank.slot(), + )) + } + + let mut bank = Arc::new(bank); + let mut stake_pubkeys = validator_0_delegations + .iter() + .map(|v| v.stake_account_pubkey) + .collect::>(); + stake_pubkeys.extend( + validator_1_delegations + .iter() + .map(|v| v.stake_account_pubkey), + ); + stake_pubkeys.extend( + validator_2_delegations + .iter() + .map(|v| v.stake_account_pubkey), + ); + loop { + if warmed_up(&bank, &stake_pubkeys[..]) { + break; + } + + // Cycle thru banks until we're fully warmed up + bank = next_epoch(&bank); + } + + let tip_distribution_account_0 = derive_tip_distribution_account_address( + &tip_distribution_program_id, + &validator_keypairs_0.vote_keypair.pubkey(), + bank.epoch(), + ); + let tip_distribution_account_1 = derive_tip_distribution_account_address( + &tip_distribution_program_id, + &validator_keypairs_1.vote_keypair.pubkey(), + bank.epoch(), + ); + let tip_distribution_account_2 = derive_tip_distribution_account_address( + &tip_distribution_program_id, + &validator_keypairs_2.vote_keypair.pubkey(), + bank.epoch(), + ); + + let expires_at = bank.epoch() + 3; + + let tda_0 = TipDistributionAccount { + validator_vote_account: validator_keypairs_0.vote_keypair.pubkey(), + merkle_root_upload_authority, + merkle_root: None, + epoch_created_at: bank.epoch(), + validator_commission_bps: 50, + expires_at, + bump: tip_distribution_account_0.1, + }; + let tda_1 = TipDistributionAccount { + validator_vote_account: validator_keypairs_1.vote_keypair.pubkey(), + merkle_root_upload_authority, + merkle_root: None, + epoch_created_at: bank.epoch(), + validator_commission_bps: 500, + expires_at: 0, + bump: tip_distribution_account_1.1, + }; + let tda_2 = TipDistributionAccount { + validator_vote_account: validator_keypairs_2.vote_keypair.pubkey(), + merkle_root_upload_authority, + merkle_root: None, + epoch_created_at: bank.epoch(), + validator_commission_bps: 75, + expires_at: 0, + bump: tip_distribution_account_2.1, + }; + + let tip_distro_0_tips = 1_000_000 * 10; + let tip_distro_1_tips = 69_000_420 * 10; + let tip_distro_2_tips = 789_000_111 * 10; + + let tda_0_fields = (tip_distribution_account_0.0, tda_0.validator_commission_bps); + let data_0 = + tda_to_account_shared_data(&tip_distribution_program_id, tip_distro_0_tips, tda_0); + let tda_1_fields = (tip_distribution_account_1.0, tda_1.validator_commission_bps); + let data_1 = + tda_to_account_shared_data(&tip_distribution_program_id, tip_distro_1_tips, tda_1); + let tda_2_fields = (tip_distribution_account_2.0, tda_2.validator_commission_bps); + let data_2 = + tda_to_account_shared_data(&tip_distribution_program_id, tip_distro_2_tips, tda_2); + + let accounts_data = create_config_account_data(&tip_payment_program_id, &bank); + for (pubkey, data) in accounts_data { + bank.store_account(&pubkey, &data); + } + + bank.store_account(&tip_distribution_account_0.0, &data_0); + bank.store_account(&tip_distribution_account_1.0, &data_1); + bank.store_account(&tip_distribution_account_2.0, &data_2); + + bank.freeze(); + let stake_meta_collection = generate_stake_meta_collection( + &bank, + &tip_distribution_program_id, + &tip_payment_program_id, + ) + .unwrap(); + assert_eq!( + stake_meta_collection.tip_distribution_program_id, + tip_distribution_program_id + ); + assert_eq!(stake_meta_collection.slot, bank.slot()); + assert_eq!(stake_meta_collection.epoch, bank.epoch()); + + let mut expected_stake_metas = HashMap::new(); + expected_stake_metas.insert( + validator_keypairs_0.vote_keypair.pubkey(), + StakeMeta { + validator_vote_account: validator_keypairs_0.vote_keypair.pubkey(), + delegations: validator_0_delegations.clone(), + total_delegated: validator_0_delegations + .iter() + .fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }), + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_0_fields.0, + total_tips: tip_distro_0_tips + .checked_sub( + bank.get_minimum_balance_for_rent_exemption( + TipDistributionAccount::SIZE, + ), + ) + .unwrap(), + validator_fee_bps: tda_0_fields.1, + }), + commission: 0, + validator_node_pubkey: validator_keypairs_0.node_keypair.pubkey(), + }, + ); + expected_stake_metas.insert( + validator_keypairs_1.vote_keypair.pubkey(), + StakeMeta { + validator_vote_account: validator_keypairs_1.vote_keypair.pubkey(), + delegations: validator_1_delegations.clone(), + total_delegated: validator_1_delegations + .iter() + .fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }), + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_1_fields.0, + total_tips: tip_distro_1_tips + .checked_sub( + bank.get_minimum_balance_for_rent_exemption( + TipDistributionAccount::SIZE, + ), + ) + .unwrap(), + validator_fee_bps: tda_1_fields.1, + }), + commission: 0, + validator_node_pubkey: validator_keypairs_1.node_keypair.pubkey(), + }, + ); + expected_stake_metas.insert( + validator_keypairs_2.vote_keypair.pubkey(), + StakeMeta { + validator_vote_account: validator_keypairs_2.vote_keypair.pubkey(), + delegations: validator_2_delegations.clone(), + total_delegated: validator_2_delegations + .iter() + .fold(0u64, |sum, delegation| { + sum.checked_add(delegation.lamports_delegated).unwrap() + }), + maybe_tip_distribution_meta: Some(TipDistributionMeta { + merkle_root_upload_authority, + tip_distribution_pubkey: tda_2_fields.0, + total_tips: tip_distro_2_tips + .checked_sub( + bank.get_minimum_balance_for_rent_exemption( + TipDistributionAccount::SIZE, + ), + ) + .unwrap(), + validator_fee_bps: tda_2_fields.1, + }), + commission: 0, + validator_node_pubkey: validator_keypairs_2.node_keypair.pubkey(), + }, + ); + + println!( + "validator_0 [vote_account={}, stake_account={}]", + validator_keypairs_0.vote_keypair.pubkey(), + validator_keypairs_0.stake_keypair.pubkey() + ); + println!( + "validator_1 [vote_account={}, stake_account={}]", + validator_keypairs_1.vote_keypair.pubkey(), + validator_keypairs_1.stake_keypair.pubkey() + ); + println!( + "validator_2 [vote_account={}, stake_account={}]", + validator_keypairs_2.vote_keypair.pubkey(), + validator_keypairs_2.stake_keypair.pubkey(), + ); + + assert_eq!( + expected_stake_metas.len(), + stake_meta_collection.stake_metas.len() + ); + + for actual_stake_meta in stake_meta_collection.stake_metas { + let expected_stake_meta = expected_stake_metas + .get(&actual_stake_meta.validator_vote_account) + .unwrap(); + assert_eq!( + expected_stake_meta.maybe_tip_distribution_meta, + actual_stake_meta.maybe_tip_distribution_meta + ); + assert_eq!( + expected_stake_meta.total_delegated, + actual_stake_meta.total_delegated + ); + assert_eq!(expected_stake_meta.commission, actual_stake_meta.commission); + assert_eq!( + expected_stake_meta.validator_vote_account, + actual_stake_meta.validator_vote_account + ); + + assert_eq!( + expected_stake_meta.delegations.len(), + actual_stake_meta.delegations.len() + ); + + for expected_delegation in &expected_stake_meta.delegations { + let actual_delegation = actual_stake_meta + .delegations + .iter() + .find(|d| d.stake_account_pubkey == expected_delegation.stake_account_pubkey) + .unwrap(); + + assert_eq!(expected_delegation, actual_delegation); + } + } + } + + /// Helper function that sends a delegate stake instruction to the bank. + /// Returns the created stake account pubkey. + fn delegate_stake_helper( + bank: &Bank, + from_keypair: &Keypair, + vote_account: &Pubkey, + delegation_amount: u64, + ) -> Pubkey { + let minimum_delegation = solana_stake_program::get_minimum_delegation(&*bank.feature_set); + assert!( + delegation_amount >= minimum_delegation, + "{}", + format!( + "received delegation_amount {}, must be at least {}", + delegation_amount, minimum_delegation + ) + ); + if let Some(from_account) = bank.get_account(&from_keypair.pubkey()) { + assert_eq!(from_account.owner(), &solana_sdk::system_program::id()); + } else { + panic!("from_account DNE"); + } + assert!(bank.get_account(vote_account).is_some()); + + let stake_keypair = Keypair::new(); + let instructions = stake::instruction::create_account_and_delegate_stake( + &from_keypair.pubkey(), + &stake_keypair.pubkey(), + vote_account, + &Authorized::auto(&from_keypair.pubkey()), + &Lockup::default(), + delegation_amount, + ); + + let message = Message::new(&instructions[..], Some(&from_keypair.pubkey())); + let transaction = Transaction::new( + &[from_keypair, &stake_keypair], + message, + bank.last_blockhash(), + ); + + bank.process_transaction(&transaction) + .map_err(|e| { + eprintln!("Error delegating stake [error={}]", e); + e + }) + .unwrap(); + + stake_keypair.pubkey() + } + + fn tda_to_account_shared_data( + tip_distribution_program_id: &Pubkey, + lamports: u64, + tda: TipDistributionAccount, + ) -> AccountSharedData { + let mut account_data = AccountSharedData::new( + lamports, + TipDistributionAccount::SIZE, + tip_distribution_program_id, + ); + + let mut data: [u8; TipDistributionAccount::SIZE] = [0u8; TipDistributionAccount::SIZE]; + let mut cursor = std::io::Cursor::new(&mut data[..]); + tda.try_serialize(&mut cursor).unwrap(); + + account_data.set_data(data.to_vec()); + account_data + } + + fn create_config_account_data( + tip_payment_program_id: &Pubkey, + bank: &Bank, + ) -> Vec<(Pubkey, AccountSharedData)> { + let mut account_datas = vec![]; + + let config_pda = + Pubkey::find_program_address(&[CONFIG_ACCOUNT_SEED], tip_payment_program_id); + + let tip_accounts = [ + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_0], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_1], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_2], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_3], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_4], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_5], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_6], tip_payment_program_id), + Pubkey::find_program_address(&[TIP_ACCOUNT_SEED_7], tip_payment_program_id), + ]; + + let config = Config { + tip_receiver: Pubkey::new_unique(), + block_builder: Pubkey::new_unique(), + block_builder_commission_pct: 10, + bumps: InitBumps { + config: config_pda.1, + tip_payment_account_0: tip_accounts[0].1, + tip_payment_account_1: tip_accounts[1].1, + tip_payment_account_2: tip_accounts[2].1, + tip_payment_account_3: tip_accounts[3].1, + tip_payment_account_4: tip_accounts[4].1, + tip_payment_account_5: tip_accounts[5].1, + tip_payment_account_6: tip_accounts[6].1, + tip_payment_account_7: tip_accounts[7].1, + }, + }; + + let mut config_account_data = AccountSharedData::new( + bank.get_minimum_balance_for_rent_exemption(Config::SIZE), + Config::SIZE, + tip_payment_program_id, + ); + + let mut config_data: [u8; Config::SIZE] = [0u8; Config::SIZE]; + let mut config_cursor = std::io::Cursor::new(&mut config_data[..]); + config.try_serialize(&mut config_cursor).unwrap(); + config_account_data.set_data(config_data.to_vec()); + account_datas.push((config_pda.0, config_account_data)); + + account_datas.extend(tip_accounts.into_iter().map(|(pubkey, _)| { + let mut tip_account_data = AccountSharedData::new( + bank.get_minimum_balance_for_rent_exemption(TipPaymentAccount::SIZE), + TipPaymentAccount::SIZE, + tip_payment_program_id, + ); + + let mut data: [u8; TipPaymentAccount::SIZE] = [0u8; TipPaymentAccount::SIZE]; + let mut cursor = std::io::Cursor::new(&mut data[..]); + TipPaymentAccount::default() + .try_serialize(&mut cursor) + .unwrap(); + tip_account_data.set_data(data.to_vec()); + + (pubkey, tip_account_data) + })); + + account_datas + } +} diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 1f8acf98e9..7f7453eb6e 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -53,6 +53,7 @@ solana-test-validator = { path = "../test-validator", version = "=1.14.24" } solana-version = { path = "../version", version = "=1.14.24" } solana-vote-program = { path = "../programs/vote", version = "=1.14.24" } symlink = "0.1.0" +tonic = { version = "0.5.2", features = ["tls", "tls-roots", "tls-webpki-roots"] } [target.'cfg(not(target_env = "msvc"))'.dependencies] jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index fc10f6f193..d7630535fd 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -7,8 +7,14 @@ use { log::*, serde::{Deserialize, Serialize}, solana_core::{ - admin_rpc_post_init::AdminRpcRequestMetadataPostInit, consensus::Tower, - tower_storage::TowerStorage, validator::ValidatorStartProgress, + admin_rpc_post_init::AdminRpcRequestMetadataPostInit, + consensus::Tower, + proxy::{ + block_engine_stage::{BlockEngineConfig, BlockEngineStage}, + relayer_stage::{RelayerConfig, RelayerStage}, + }, + tower_storage::TowerStorage, + validator::ValidatorStartProgress, }, solana_gossip::legacy_contact_info::LegacyContactInfo as ContactInfo, solana_sdk::{ @@ -19,6 +25,7 @@ use { fmt::{self, Display}, net::SocketAddr, path::{Path, PathBuf}, + str::FromStr, sync::{Arc, RwLock}, thread::{self, Builder}, time::{Duration, SystemTime}, @@ -169,6 +176,29 @@ pub trait AdminRpc { #[rpc(meta, name = "contactInfo")] fn contact_info(&self, meta: Self::Metadata) -> Result; + + #[rpc(meta, name = "setBlockEngineConfig")] + fn set_block_engine_config( + &self, + meta: Self::Metadata, + auth_service_addr: String, + backend_addr: String, + trust_packets: bool, + ) -> Result<()>; + + #[rpc(meta, name = "setRelayerConfig")] + fn set_relayer_config( + &self, + meta: Self::Metadata, + auth_service_addr: String, + backend_addr: String, + trust_packets: bool, + expected_heartbeat_interval_ms: u64, + max_failed_heartbeats: u64, + ) -> Result<()>; + + #[rpc(meta, name = "setShredReceiverAddress")] + fn set_shred_receiver_address(&self, meta: Self::Metadata, addr: String) -> Result<()>; } pub struct AdminRpcImpl; @@ -253,6 +283,34 @@ impl AdminRpc for AdminRpcImpl { Ok(()) } + fn set_block_engine_config( + &self, + meta: Self::Metadata, + auth_service_addr: String, + backend_addr: String, + trust_packets: bool, + ) -> Result<()> { + debug!("set_block_engine_config request received"); + let config = BlockEngineConfig { + auth_service_addr, + backend_addr, + trust_packets, + }; + // Detailed log messages are printed inside validate function + if BlockEngineStage::is_valid_block_engine_config(&config) + || (config.auth_service_addr.is_empty() && config.backend_addr.is_empty()) + { + meta.with_post_init(|post_init| { + *post_init.block_engine_config.lock().unwrap() = config; + Ok(()) + }) + } else { + Err(jsonrpc_core::error::Error::invalid_params( + "failed to set block engine config. see logs for details.", + )) + } + } + fn set_identity( &self, meta: Self::Metadata, @@ -289,6 +347,59 @@ impl AdminRpc for AdminRpcImpl { AdminRpcImpl::set_identity_keypair(meta, identity_keypair, require_tower) } + fn set_relayer_config( + &self, + meta: Self::Metadata, + auth_service_addr: String, + backend_addr: String, + trust_packets: bool, + expected_heartbeat_interval_ms: u64, + max_failed_heartbeats: u64, + ) -> Result<()> { + debug!("set_relayer_config request received"); + let expected_heartbeat_interval = Duration::from_millis(expected_heartbeat_interval_ms); + let oldest_allowed_heartbeat = + Duration::from_millis(max_failed_heartbeats * expected_heartbeat_interval_ms); + let config = RelayerConfig { + auth_service_addr, + backend_addr, + expected_heartbeat_interval, + oldest_allowed_heartbeat, + trust_packets, + }; + // Detailed log messages are printed inside validate function + if RelayerStage::is_valid_relayer_config(&config) + || (config.auth_service_addr.is_empty() && config.backend_addr.is_empty()) + { + meta.with_post_init(|post_init| { + *post_init.relayer_config.lock().unwrap() = config; + Ok(()) + }) + } else { + Err(jsonrpc_core::error::Error::invalid_params( + "failed to set relayer config. see logs for details.", + )) + } + } + + fn set_shred_receiver_address(&self, meta: Self::Metadata, addr: String) -> Result<()> { + let shred_receiver_address = if addr.is_empty() { + None + } else { + Some(SocketAddr::from_str(&addr).map_err(|_| { + jsonrpc_core::error::Error::invalid_params(format!( + "invalid shred receiver address: {}", + addr + )) + })?) + }; + + meta.with_post_init(|post_init| { + *post_init.shred_receiver_address.write().unwrap() = shred_receiver_address; + Ok(()) + }) + } + fn contact_info(&self, meta: Self::Metadata) -> Result { meta.with_post_init(|post_init| Ok(post_init.cluster_info.my_contact_info().into())) } diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 9868346e52..1484671910 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -638,12 +638,13 @@ fn get_highest_local_snapshot_hash( incremental_snapshot_archives_dir: impl AsRef, incremental_snapshot_fetch: bool, ) -> Option<(Slot, Hash)> { - snapshot_utils::get_highest_full_snapshot_archive_info(full_snapshot_archives_dir).and_then( - |full_snapshot_info| { + snapshot_utils::get_highest_full_snapshot_archive_info(full_snapshot_archives_dir, None) + .and_then(|full_snapshot_info| { if incremental_snapshot_fetch { snapshot_utils::get_highest_incremental_snapshot_archive_info( incremental_snapshot_archives_dir, full_snapshot_info.slot(), + None, ) .map(|incremental_snapshot_info| { ( @@ -655,8 +656,7 @@ fn get_highest_local_snapshot_hash( None } .or_else(|| Some((full_snapshot_info.slot(), *full_snapshot_info.hash()))) - }, - ) + }) } /// Get peer snapshot hashes diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs index ec94e0d5a7..684cc67008 100644 --- a/validator/src/dashboard.rs +++ b/validator/src/dashboard.rs @@ -275,6 +275,7 @@ fn get_validator_stats( Err(err) => { if let client_error::ClientErrorKind::RpcError( rpc_request::RpcError::RpcResponseError { + request_id: _, code: _, message: _, data: diff --git a/validator/src/main.rs b/validator/src/main.rs index 937c69de21..fcd890d204 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -26,7 +26,9 @@ use { }, solana_core::{ ledger_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, + proxy::{block_engine_stage::BlockEngineConfig, relayer_stage::RelayerConfig}, system_monitor_service::SystemMonitorService, + tip_manager::{TipDistributionAccountConfig, TipManagerConfig}, tower_storage, tpu::DEFAULT_TPU_COALESCE_MS, validator::{is_snapshot_config_valid, Validator, ValidatorConfig, ValidatorStartProgress}, @@ -86,7 +88,7 @@ use { path::{Path, PathBuf}, process::exit, str::FromStr, - sync::{Arc, RwLock}, + sync::{Arc, Mutex, RwLock}, time::{Duration, SystemTime}, }, }; @@ -108,6 +110,9 @@ const DEFAULT_MIN_SNAPSHOT_DOWNLOAD_SPEED: u64 = 10485760; // The maximum times of snapshot download abort and retry const MAX_SNAPSHOT_DOWNLOAD_ABORT: u32 = 5; const MILLIS_PER_SECOND: u64 = 1000; +const DEFAULT_PREALLOCATED_BUNDLE_COST: &str = "3000000"; +const DEFAULT_RELAYER_EXPECTED_HEARTBEAT_INTERVAL_MS: &str = "500"; +const DEFAULT_RELAYER_MAX_FAILED_HEARTBEATS: &str = "3"; fn monitor_validator(ledger_path: &Path) { let dashboard = Dashboard::new(ledger_path, None, None).unwrap_or_else(|err| { @@ -1819,6 +1824,119 @@ pub fn main() { .help("Allow contacting private ip addresses") .hidden(true), ) + .arg( + Arg::with_name("block_engine_address") + .long("block-engine-address") + .value_name("block_engine_address") + .takes_value(true) + .help("Deprecated: Please use block_engine_url.") + .conflicts_with("block_engine_url") + ) + .arg( + Arg::with_name("block_engine_auth_service_address") + .long("block-engine-auth-service-address") + .value_name("block_engine_auth_service_address") + .takes_value(true) + .help("Deprecated: Please use block_engine_url.") + .conflicts_with("block_engine_url") + ) + .arg( + Arg::with_name("relayer_auth_service_address") + .long("relayer-auth-service-address") + .value_name("relayer_auth_service_address") + .takes_value(true) + .help("Deprecated: Please use relayer_url.") + .conflicts_with("relayer_url") + ) + .arg( + Arg::with_name("relayer_address") + .long("relayer-address") + .value_name("relayer_address") + .takes_value(true) + .help("Deprecated: Please use relayer_url.") + .conflicts_with("relayer_url") + ) + .arg( + Arg::with_name("block_engine_url") + .long("block-engine-url") + .help("Block engine url") + .takes_value(true) + ) + .arg( + Arg::with_name("relayer_url") + .long("relayer-url") + .help("Relayer url") + .takes_value(true) + ) + .arg( + Arg::with_name("trust_relayer_packets") + .long("trust-relayer-packets") + .takes_value(false) + .help("Skip signature verification on relayer packets. Not recommended unless the relayer is trusted.") + ) + .arg( + Arg::with_name("relayer_expected_heartbeat_interval_ms") + .long("relayer-expected-heartbeat-interval-ms") + .takes_value(true) + .help("Interval at which the Relayer is expected to send heartbeat messages.") + .default_value(DEFAULT_RELAYER_EXPECTED_HEARTBEAT_INTERVAL_MS) + ) + .arg( + Arg::with_name("relayer_max_failed_heartbeats") + .long("relayer-max-failed-heartbeats") + .takes_value(true) + .help("Maximum number of heartbeats the Relayer can miss before falling back to the normal TPU pipeline.") + .default_value(DEFAULT_RELAYER_MAX_FAILED_HEARTBEATS) + ) + .arg( + Arg::with_name("trust_block_engine_packets") + .long("trust-block-engine-packets") + .takes_value(false) + .help("Skip signature verification on block engine packets. Not recommended unless the block engine is trusted.") + ) + .arg( + Arg::with_name("tip_payment_program_pubkey") + .long("tip-payment-program-pubkey") + .value_name("TIP_PAYMENT_PROGRAM_PUBKEY") + .takes_value(true) + .help("The public key of the tip-payment program") + ) + .arg( + Arg::with_name("tip_distribution_program_pubkey") + .long("tip-distribution-program-pubkey") + .value_name("TIP_DISTRIBUTION_PROGRAM_PUBKEY") + .takes_value(true) + .help("The public key of the tip-distribution program.") + ) + .arg( + Arg::with_name("merkle_root_upload_authority") + .long("merkle-root-upload-authority") + .value_name("MERKLE_ROOT_UPLOAD_AUTHORITY") + .takes_value(true) + .help("The public key of the authorized merkle-root uploader.") + ) + .arg( + Arg::with_name("commission_bps") + .long("commission-bps") + .value_name("COMMISSION_BPS") + .takes_value(true) + .help("The commission validator takes from tips expressed in basis points.") + ) + .arg( + Arg::with_name("preallocated_bundle_cost") + .long("preallocated-bundle-cost") + .value_name("PREALLOCATED_BUNDLE_COST") + .takes_value(true) + .default_value(DEFAULT_PREALLOCATED_BUNDLE_COST) + .help("Number of CUs to allocate for bundles at beginning of slot.") + ) + .arg( + Arg::with_name("shred_receiver_address") + .long("shred-receiver-address") + .value_name("SHRED_RECEIVER_ADDRESS") + .takes_value(true) + .help("Validator will forward all shreds to this address in addition to normal turbine operation. Omit or set to empty string to disable.") + ) .arg( Arg::with_name("log_messages_bytes_limit") .long("log-messages-bytes-limit") @@ -1921,6 +2039,41 @@ pub fn main() { SubCommand::with_name("run") .about("Run the validator") ) + .subcommand( + SubCommand::with_name("set-block-engine-config") + .about("Set configuration for connection to a block engine") + .arg( + Arg::with_name("block_engine_url") + .long("block-engine-url") + .help("Block engine url. Set to empty string to disable block engine connection.") + .takes_value(true) + .required(false) + ) + .arg( + Arg::with_name("block_engine_address") + .long("block-engine-address") + .value_name("block_engine_address") + .takes_value(true) + .help("Deprecated: Address of the block engine's grpc.") + .conflicts_with("block_engine_url") + .required(false) + ) + .arg( + Arg::with_name("block_engine_auth_service_address") + .long("block-engine-auth-service-address") + .value_name("block_engine_auth_service_address") + .takes_value(true) + .help("Deprecated: Address of the block engine's authentication service.") + .conflicts_with("block_engine_url") + .required(false) + ) + .arg( + Arg::with_name("trust_block_engine_packets") + .long("trust-block-engine-packets") + .takes_value(false) + .help("Skip signature verification on block engine packets. Not recommended unless the block engine is trusted.") + ) + ) .subcommand( SubCommand::with_name("set-identity") .about("Set the validator identity") @@ -1954,6 +2107,69 @@ pub fn main() { ) .after_help("Note: the new filter only applies to the currently running validator instance") ) + .subcommand( + SubCommand::with_name("set-relayer-config") + .about("Set configuration for connection to a relayer") + .arg( + Arg::with_name("relayer_url") + .long("relayer-url") + .help("Relayer url. Set to empty string to disable relayer connection.") + .takes_value(true) + .required(false) + ) + .arg( + Arg::with_name("relayer_auth_service_address") + .long("relayer-auth-service-address") + .value_name("relayer_auth_service_address") + .takes_value(true) + .help("Deprecated: Address of the block engine's authentication service.") + .conflicts_with("relayer_url") + .required(false) + ) + .arg( + Arg::with_name("relayer_address") + .long("relayer-address") + .value_name("relayer_address") + .takes_value(true) + .help("Deprecated: Address of the relayer grpc.") + .conflicts_with("relayer_url") + .required(false) + ) + .arg( + Arg::with_name("trust_relayer_packets") + .long("trust-relayer-packets") + .takes_value(false) + .help("Skip signature verification on relayer packets. Not recommended unless the relayer is trusted.") + ) + .arg( + Arg::with_name("relayer_expected_heartbeat_interval_ms") + .long("relayer-expected-heartbeat-interval-ms") + .takes_value(true) + .help("Interval at which the Relayer is expected to send heartbeat messages.") + .required(false) + .default_value(DEFAULT_RELAYER_EXPECTED_HEARTBEAT_INTERVAL_MS) + ) + .arg( + Arg::with_name("relayer_max_failed_heartbeats") + .long("relayer-max-failed-heartbeats") + .takes_value(true) + .help("Maximum number of heartbeats the Relayer can miss before falling back to the normal TPU pipeline.") + .required(false) + .default_value(DEFAULT_RELAYER_MAX_FAILED_HEARTBEATS) + ) + ) + .subcommand( + SubCommand::with_name("set-shred-receiver-address") + .about("Changes shred receiver address") + .arg( + Arg::with_name("shred_receiver_address") + .long("shred-receiver-address") + .value_name("SHRED_RECEIVER_ADDRESS") + .takes_value(true) + .help("Validator will forward all shreds to this address in addition to normal turbine operation. Set to empty string to disable.") + .required(true) + ) + ) .subcommand( SubCommand::with_name("wait-for-restart-window") .about("Monitor the validator for a good time to restart") @@ -2138,6 +2354,38 @@ pub fn main() { monitor_validator(&ledger_path); return; } + ("set-block-engine-config", Some(subcommand_matches)) => { + let (auth_service_addr, backend_addr) = + if subcommand_matches.is_present("block_engine_url") { + let block_engine_url = + value_t_or_exit!(subcommand_matches, "block_engine_url", String); + (block_engine_url.clone(), block_engine_url) + } else { + let auth_addr = value_t_or_exit!( + subcommand_matches, + "block_engine_auth_service_address", + String + ); + let backend_addr = + value_t_or_exit!(subcommand_matches, "block_engine_address", String); + (auth_addr, backend_addr) + }; + + let trust_packets = subcommand_matches.is_present("trust_block_engine_packets"); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client + .await? + .set_block_engine_config(auth_service_addr, backend_addr, trust_packets) + .await + }) + .unwrap_or_else(|err| { + println!("set block engine config failed: {}", err); + exit(1); + }); + return; + } ("set-identity", Some(subcommand_matches)) => { let require_tower = subcommand_matches.is_present("require_tower"); @@ -2201,6 +2449,53 @@ pub fn main() { }); return; } + ("set-relayer-config", Some(subcommand_matches)) => { + let (auth_service_addr, backend_addr) = if subcommand_matches.is_present("relayer_url") + { + let relayer_url = value_t_or_exit!(subcommand_matches, "relayer_url", String); + (relayer_url.clone(), relayer_url) + } else { + ( + value_t_or_exit!(subcommand_matches, "relayer_auth_service_address", String), + value_t_or_exit!(subcommand_matches, "relayer_address", String), + ) + }; + let trust_packets = subcommand_matches.is_present("trust_relayer_packets"); + let expected_heartbeat_interval_ms: u64 = + value_of(subcommand_matches, "relayer_expected_heartbeat_interval_ms").unwrap(); + let max_failed_heartbeats: u64 = + value_of(subcommand_matches, "relayer_max_failed_heartbeats").unwrap(); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client + .await? + .set_relayer_config( + auth_service_addr, + backend_addr, + trust_packets, + expected_heartbeat_interval_ms, + max_failed_heartbeats, + ) + .await + }) + .unwrap_or_else(|err| { + println!("set relayer config failed: {}", err); + exit(1); + }); + return; + } + ("set-shred-receiver-address", Some(subcommand_matches)) => { + let addr = value_t_or_exit!(subcommand_matches, "shred_receiver_address", String); + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { admin_client.await?.set_shred_receiver_address(addr).await }) + .unwrap_or_else(|err| { + println!("set shred receiver address failed: {}", err); + exit(1); + }); + return; + } ("wait-for-restart-window", Some(subcommand_matches)) => { let min_idle_time = value_t_or_exit!(subcommand_matches, "min_idle_time", usize); let identity = pubkey_of(subcommand_matches, "identity"); @@ -2592,6 +2887,86 @@ pub fn main() { warn!("`--accounts-db-skip-shrink` is deprecated. please consider removing it from the validator command line argument list"); } + let voting_disabled = matches.is_present("no_voting") || restricted_repair_only_mode; + let tip_manager_config = tip_manager_config_from_matches(&matches, voting_disabled); + + let mut block_engine_config = BlockEngineConfig { + auth_service_addr: "".to_string(), + backend_addr: "".to_string(), + trust_packets: matches.is_present("trust_block_engine_packets"), + }; + if matches.is_present("block_engine_url") { + let url: String = + value_of(&matches, "block_engine_url").expect("couldn't parse block_engine_url"); + block_engine_config.auth_service_addr = url.clone(); + block_engine_config.backend_addr = url; + } else { + match ( + matches.is_present("block_engine_auth_service_address"), + matches.is_present("block_engine_address"), + ) { + (true, true) => { + block_engine_config.auth_service_addr = + value_of(&matches, "block_engine_auth_service_address") + .expect("couldn't parse block_engine_auth_service_address"); + block_engine_config.backend_addr = value_of(&matches, "block_engine_address") + .expect("couldn't parse block_engine_address"); + } + (false, false) => {} + _ => { + eprintln!("Specifying seperate auth and backend addresses for block engine is deprecated. Recommended to use --block_engine_url instead.\ + If using block_engine_auth_service_address and block_engine_address, they must both be provided."); + exit(1); + } + } + } + + // Defaults are set in cli definition, safe to use unwrap() here + let expected_heartbeat_interval_ms: u64 = + value_of(&matches, "relayer_expected_heartbeat_interval_ms").unwrap(); + let max_failed_heartbeats: u64 = value_of(&matches, "relayer_max_failed_heartbeats").unwrap(); + assert!( + expected_heartbeat_interval_ms > 0, + "expected_heartbeat_interval_ms must be greater than zero" + ); + assert!( + max_failed_heartbeats > 0, + "relayer-max-failed-heartbeats must be greater than zero" + ); + let mut relayer_config = RelayerConfig { + auth_service_addr: "".to_string(), + backend_addr: "".to_string(), + expected_heartbeat_interval: Duration::from_millis(expected_heartbeat_interval_ms), + oldest_allowed_heartbeat: Duration::from_millis( + max_failed_heartbeats * expected_heartbeat_interval_ms, + ), + trust_packets: matches.is_present("trust_relayer_packets"), + }; + if matches.is_present("relayer_url") { + let url: String = value_of(&matches, "relayer_url").expect("couldn't parse relayer_url"); + relayer_config.auth_service_addr = url.clone(); + relayer_config.backend_addr = url; + } else { + match ( + matches.is_present("relayer_auth_service_address"), + matches.is_present("relayer_address"), + ) { + (true, true) => { + relayer_config.auth_service_addr = + value_of(&matches, "relayer_auth_service_address") + .expect("couldn't parse relayer_auth_service_address"); + relayer_config.backend_addr = + value_of(&matches, "relayer_address").expect("couldn't parse relayer_address"); + } + (false, false) => {} + _ => { + eprintln!("Specifying seperate auth and backend addresses for relayer is deprecated. Recommended to use --relayer_url instead.\ + If using relayer_auth_service_address and relayer_address, they must both be provided."); + exit(1); + } + } + } + let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -2720,6 +3095,16 @@ pub fn main() { log_messages_bytes_limit: value_of(&matches, "log_messages_bytes_limit"), ..RuntimeConfig::default() }, + relayer_config: Arc::new(Mutex::new(relayer_config)), + block_engine_config: Arc::new(Mutex::new(block_engine_config)), + tip_manager_config, + shred_receiver_address: Arc::new(RwLock::new( + matches + .value_of("shred_receiver_address") + .map(|addr| SocketAddr::from_str(addr).expect("shred_receiver_address invalid")), + )), + preallocated_bundle_cost: value_of(&matches, "preallocated_bundle_cost") + .unwrap_or_else(|| DEFAULT_PREALLOCATED_BUNDLE_COST.parse().unwrap()), ..ValidatorConfig::default() }; @@ -3207,3 +3592,47 @@ fn process_account_indexes(matches: &ArgMatches) -> AccountSecondaryIndexes { indexes: account_indexes, } } + +fn tip_manager_config_from_matches( + matches: &ArgMatches, + voting_disabled: bool, +) -> TipManagerConfig { + TipManagerConfig { + tip_payment_program_id: pubkey_of(matches, "tip_payment_program_pubkey").unwrap_or_else( + || { + if !voting_disabled { + panic!("--tip-payment-program-pubkey argument required when validator is voting"); + } + Pubkey::new_unique() + }, + ), + tip_distribution_program_id: pubkey_of(matches, "tip_distribution_program_pubkey") + .unwrap_or_else(|| { + if !voting_disabled { + panic!("--tip-distribution-program-pubkey argument required when validator is voting"); + } + Pubkey::new_unique() + }), + tip_distribution_account_config: TipDistributionAccountConfig { + merkle_root_upload_authority: pubkey_of(matches, "merkle_root_upload_authority") + .unwrap_or_else(|| { + if !voting_disabled { + panic!("--merkle-root-upload-authority argument required when validator is voting"); + } + Pubkey::new_unique() + }), + vote_account: pubkey_of(matches, "vote_account").unwrap_or_else(|| { + if !voting_disabled { + panic!("--vote-account argument required when validator is voting"); + } + Pubkey::new_unique() + }), + commission_bps: value_t!(matches, "commission_bps", u16).unwrap_or_else(|_| { + if !voting_disabled { + panic!("--commission-bps argument required when validator is voting"); + } + 0 + }), + }, + } +}