From b6c86ffe0edb7de4593a348b017e36fd65d10a34 Mon Sep 17 00:00:00 2001 From: buffalu <85544055+buffalu@users.noreply.github.com> Date: Tue, 23 Aug 2022 16:47:08 -0500 Subject: [PATCH] master merge (#113) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: increase timeout limit for coverage test * chore: install NDK r21 in runtime (#26916) chore: install ndk 21 * Revert "chore: increase timeout limit for coverage test" (#26917) This reverts commit b6ae6c1fe17e4b64c5051c651ca2585e4f55468c. * core: fix double-readlock in replay_stage (#26052) * Revert "Enable QUIC client by default. Add arg to disable QUIC client… (#26913) Revert "Enable QUIC client by default. Add arg to disable QUIC client. (#26879)" This reverts commit 4c297500959368342a94e953f59e28de60195dd5. * Make `solana-ledger-tool` run AccountsBackgroundService (#26914) Prior to this change, long running commands like `solana-ledger-tool verify` would OOM due to AccountsDb cleanup not happening. Co-authored-by: Michael Vines * spl: Bump token to 3.5.0 and ata to 1.1.0 (#26921) * Remove v1.9 backport actions, add v1.11 backport actions * restore BackportAssignee definition * Remove runtime dependency from solana-transaction-status (#26930) * Move RewardType out of runtime * Move collect_token_balances to solana-ledger * Remove solana-runtime dependency * Bench tps: refactor client creation (#26862) * relax Sized restriction functions using client * extract function to build client * protect access to rent paying account vec (#26919) * fix version of spl-token to prevent conflict (#26947) * Remove bank test_max_accounts_data_size_exceeded() (#26772) bank: remove test_max_accounts_data_size_exceeded() * Remove accounts data size checks in blockstore_processor (#26776) * Add minor version clean up tasks to release doc (#26954) * add program account to bpf loader close instruction parser (#26926) * Io stats v2 (#26898) * Use sysfs instead of procfs for disk stats * Filter map to filter dmcrypt and mdraid volumes * Unit test cover different kernel formats * Refactor QUIC new connection handler function (#26855) * Refactor QUIC new connection handler function * cleanup setup_connection * more cleanup * Support jsonParsed address lookup table accounts (#26723) Parse address lookup table accounts * Fix sol_get_processed_sibling_instruction on 32-bit hosts (#26522) * Refactor: Add `RuntimeConfig` field to Bank (#26946) * Refactor: Simplify arguments for bank constructor methods * Refactor: Add RuntimeConfig to Bank fields * Arc wrap runtime_config * Arc wrap all runtime config usages * Remove Copy trait derivation from RuntimeConfig * Remove some arc wrapping * chore: bump indicatif from 0.16.2 to 0.17.0 (#26890) * chore: bump indicatif from 0.16.2 to 0.17.0 Bumps [indicatif](https://github.com/console-rs/indicatif) from 0.16.2 to 0.17.0. - [Release notes](https://github.com/console-rs/indicatif/releases) - [Commits](https://github.com/console-rs/indicatif/compare/0.16.2...0.17.0) --- updated-dependencies: - dependency-name: indicatif dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files * Accommodate api changes Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite Co-authored-by: Tyera Eulberg * Unpin tokio for non-rpc crates (#26957) * fix: fixed the incorrect/unknown redirects * bpf-loader: make syscalls pub (#26918) * transaction-status, storage-proto: add compute_units_consumed (#26528) * transaction-status, storage-proto: add compute_units_consumed * fix bpf test Co-authored-by: Justin Starry * Keypair: implement clone() (#26248) * Keypair: implement clone() This was not implemented upstream in ed25519-dalek to force everyone to think twice before creating another copy of a potentially sensitive private key in memory. See https://github.com/dalek-cryptography/ed25519-dalek/issues/76 However, there are now 9 instances of Keypair::from_bytes(&keypair.to_bytes()) in the solana codebase and it would be preferable to have a function. In particular since this also comes up when writing programs and can cause users to either start messing with lifetimes or discover the from_bytes() workaround themselves. This patch opts to not implement the Clone trait. This avoids automatic use in order to preserve some of the original "let developers think twice about this" intention. * Use Keypair::clone * Bump version to v1.12 (#26967) * Bump sbf-tools version to v1.29 * Switch to cargo-build-sbf for building tests in programs/bpf * added arg --rpc-max-request-payload-size to validator (#26377) * added ability to pass --rpc-max-request-payload-size to validator * fixed lint errors * more lint fix * patch Co-authored-by: ultd Co-authored-by: Justin Starry * A more convenient store-tool (#26796) * Use new_from_file_unchecked - don't sanitize input length for appendvec file * Exit-early on completely zeroed accounts * Fix programs/bpf bpf_c and bpf_rust features to work independently `cargo test --features="bpf_c"` fails to compile without this fix. * Do not check accounts data size in InvokeContext (#26773) * Fix windows release builds (#26986) * Don't try to build protobuf-src on windows * Set protoc envar * chore: bump const_format from 0.2.25 to 0.2.26 (#26985) Bumps [const_format](https://github.com/rodrimati1992/const_format_crates) from 0.2.25 to 0.2.26. - [Release notes](https://github.com/rodrimati1992/const_format_crates/releases) - [Changelog](https://github.com/rodrimati1992/const_format_crates/blob/master/Changelog.md) - [Commits](https://github.com/rodrimati1992/const_format_crates/commits/0.2.26) --- updated-dependencies: - dependency-name: const_format dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Delete files older than the lowest_cleanup_slot in LedgerCleanupService::cleanup_ledger (#26651) #### Problem LedgerCleanupService requires compactions to propagate & digest range-delete tombstones to eventually reclaim disk space. #### Summary of Changes This PR makes LedgerCleanupService::cleanup_ledger delete any file whose slot-range is older than the lowest_cleanup_slot. This allows us to reclaim disk space more often with fewer IOps. Experimental results on mainnet validators show that the PR can effectively reduce 33% to 40% ledger disk size. * Bail out of execute_batches() early for empty batches slice (#26932) The caller of execute_batches() that assembles batches may call this function with empty batches; we know we can bail early in this scenario. * chore: bump rustversion from 1.0.7 to 1.0.9 (#26984) * chore: bump rustversion from 1.0.7 to 1.0.9 Bumps [rustversion](https://github.com/dtolnay/rustversion) from 1.0.7 to 1.0.9. - [Release notes](https://github.com/dtolnay/rustversion/releases) - [Commits](https://github.com/dtolnay/rustversion/compare/1.0.7...1.0.9) --- updated-dependencies: - dependency-name: rustversion dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * chore: bump pickledb from 0.4.1 to 0.5.1 (#25511) * chore: bump pickledb from 0.4.1 to 0.5.1 Bumps [pickledb](https://github.com/seladb/pickledb-rs) from 0.4.1 to 0.5.1. - [Release notes](https://github.com/seladb/pickledb-rs/releases) - [Commits](https://github.com/seladb/pickledb-rs/commits/0.5.1) --- updated-dependencies: - dependency-name: pickledb dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Add yaml feature declaration * Turn off default-features Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Tyera Eulberg * docs: updated absolute routes to local routes * Add API docs for secp256k1_instruction and secp256k1_recover (#26065) * Add API docs for secp256k1_instruction and secp256k1_recover * typo * Remove unused variable from secp256k1 program test * Bump solana_bpf_rust_secp256k1_recover ix count Co-authored-by: Tyera Eulberg * Refactor cargo-build-sbf integration tests using assert_cmd crate * chore: bump async-trait from 0.1.56 to 0.1.57 (#27003) * chore: bump async-trait from 0.1.56 to 0.1.57 Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.56 to 0.1.57. - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.56...0.1.57) --- updated-dependencies: - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * chore: bump sha3 from 0.10.1 to 0.10.2 (#27002) * chore: bump sha3 from 0.10.1 to 0.10.2 Bumps [sha3](https://github.com/RustCrypto/hashes) from 0.10.1 to 0.10.2. - [Release notes](https://github.com/RustCrypto/hashes/releases) - [Commits](https://github.com/RustCrypto/hashes/compare/sha3-v0.10.1...sha3-v0.10.2) --- updated-dependencies: - dependency-name: sha3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files * Revert sha3 bump in zk-token-sdk Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite Co-authored-by: Tyera Eulberg * chore: bump bytecount from 0.6.2 to 0.6.3 (#27014) Bumps [bytecount](https://github.com/llogiq/bytecount) from 0.6.2 to 0.6.3. - [Release notes](https://github.com/llogiq/bytecount/releases) - [Commits](https://github.com/llogiq/bytecount/commits) --- updated-dependencies: - dependency-name: bytecount dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: bump prost-types from 0.11.0 to 0.11.1 (#27013) * chore: bump prost-types from 0.11.0 to 0.11.1 Bumps [prost-types](https://github.com/tokio-rs/prost) from 0.11.0 to 0.11.1. - [Release notes](https://github.com/tokio-rs/prost/releases) - [Commits](https://github.com/tokio-rs/prost/compare/v0.11.0...prost-types-0.11.1) --- updated-dependencies: - dependency-name: prost-types dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Revert "Remove resolver=2 from Cargo.toml and add it to the Windows build" (#27011) Revert "Remove resolver=2 from Cargo.toml and add it to the Windows build (#26706)" This reverts commit 2f6f5b11dae9389c92125b24b03fe07157aca8ed. * Add `Signers` impls for `Arc` (#27000) * Add `Signers` impls for `Arc` * Reformat * client: Use async TPU client in sync TPU client by sharing tokio runtime (#26996) * Make the sync tpu client use the async tpu client * Try to fix CI errors * Fix formatting * Make rpc_client::get_nonblocking_client public only in the crate * Save work * Temporary hack to test sharing runtime between tpu_client and rpc_client * [WIP] Copy rpc client * Fix build * Small refactoring * Remove copies * Refactor access to RPC client fields * Change `clone_inner_client` to `get_inner_client` Co-authored-by: Ryan Leung * Patch crossbeam-epoch to avoid overhead (#26555) * tracer-packet-stats reporting should not reset id (#27012) * adds number of coding shreds to broadcast metrics (#27006) * ledger-tool: support Geyser accounts updates (#26909) * Set receive_window per quic connection (#26936) This change sets the receive_window for non-staked node to 1 * PACKET_DATA_SIZE, and maps the staked nodes's connection's receive_window between 1.2 * PACKET_DATA_SIZE to 10 * PACKET_DATA_SIZE based on the stakes. The changes is based on Quinn library change to support per connection receive_window tweak at the server side. quinn-rs/quinn#1393 * chore: bump semver from 1.0.10 to 1.0.13 (#27016) * chore: bump semver from 1.0.10 to 1.0.13 Bumps [semver](https://github.com/dtolnay/semver) from 1.0.10 to 1.0.13. - [Release notes](https://github.com/dtolnay/semver/releases) - [Commits](https://github.com/dtolnay/semver/compare/1.0.10...1.0.13) --- updated-dependencies: - dependency-name: semver dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Fix typo in test function (#27031) * Correct StakeInstruction::DeactivateDelinquent instruction type * Fix VoteInstruction order (#27035) * Feature: disable CPI setting `is_executable` and `rent_epoch` (#26987) * Adds the feature disable_cpi_setting_executable_and_rent_epoch. * Adds the feature gate for disable_cpi_setting_executable_and_rent_epoch. * Removes TEST_EXECUTABLE_LAMPORTS. * Test that is_executable and rent_epoch are ignored. * Increase timeout to reduce the flakyness of rpc signature receving test (#27008) * Increase timeout to reduce the flakyness of rpc signature receving test * Minor fmt fix * Different staked vs unstaked chunks_received (#27033) * Different staked vs unstaked chunks_received * Suppress a clippy warning * Implement nonblocking version of BlockhashQuery (#27040) * Move vote program state and instructions to solana-program * ancestor hashes socket ping/pong support (#26866) * chore: bump serde from 1.0.138 to 1.0.143 (#27015) * chore: bump serde from 1.0.138 to 1.0.143 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.138 to 1.0.143. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.138...v1.0.143) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Separate file for ImmutableDeserializedPacket type (#26951) * Explorer: Refactor parsed transaction handling (#27050) * Fix windows build after crossbeam-epoch patch (#27052) * Fix quic client on TestValidator, alternative (#27046) Add new method to enable custom offset * chore: bump serde_json from 1.0.81 to 1.0.83 (#27036) * chore: bump serde_json from 1.0.81 to 1.0.83 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.81 to 1.0.83. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.81...v1.0.83) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * docs: fix typo in return-data.md (#27056) langauge -> language * `solana-validator monitor` how displays slot and gossip stake % while waiting for supermajority * chore: bump dialoguer from 0.10.1 to 0.10.2 (#27054) * chore: bump dialoguer from 0.10.1 to 0.10.2 Bumps [dialoguer](https://github.com/mitsuhiko/dialoguer) from 0.10.1 to 0.10.2. - [Release notes](https://github.com/mitsuhiko/dialoguer/releases) - [Changelog](https://github.com/mitsuhiko/dialoguer/blob/master/CHANGELOG.md) - [Commits](https://github.com/mitsuhiko/dialoguer/compare/v0.10.1...v0.10.2) --- updated-dependencies: - dependency-name: dialoguer dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * chore: bump once_cell from 1.12.0 to 1.13.0 (#27049) * chore: bump once_cell from 1.12.0 to 1.13.0 Bumps [once_cell](https://github.com/matklad/once_cell) from 1.12.0 to 1.13.0. - [Release notes](https://github.com/matklad/once_cell/releases) - [Changelog](https://github.com/matklad/once_cell/blob/master/CHANGELOG.md) - [Commits](https://github.com/matklad/once_cell/compare/v1.12.0...v1.13.0) --- updated-dependencies: - dependency-name: once_cell dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Fixed a cargo warning on specifing quinn dependencies (#27057) * feat: handle `loadedAddresses` field in tx meta responses (#27065) feat: handle loadedAddresses field in tx meta responses * chore: bump serde_bytes from 0.11.6 to 0.11.7 (#27062) * chore: bump serde_bytes from 0.11.6 to 0.11.7 Bumps [serde_bytes](https://github.com/serde-rs/bytes) from 0.11.6 to 0.11.7. - [Release notes](https://github.com/serde-rs/bytes/releases) - [Commits](https://github.com/serde-rs/bytes/compare/0.11.6...0.11.7) --- updated-dependencies: - dependency-name: serde_bytes dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * chore: annotate more types as deprecated (#27067) * chore: bump libc from 0.2.126 to 0.2.129 (#27063) * chore: bump libc from 0.2.126 to 0.2.129 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.126 to 0.2.129. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.126...0.2.129) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Fix duplicate / incorrect docs in solana_sdk by removing the solana_program::* import (#26588) * Make solana_sdk imports from solana_program explicit. * Adjust imports * chore: bump goauth from 0.13.0 to 0.13.1 (#27066) * chore: bump goauth from 0.13.0 to 0.13.1 Bumps [goauth](https://github.com/durch/rust-goauth) from 0.13.0 to 0.13.1. - [Release notes](https://github.com/durch/rust-goauth/releases) - [Commits](https://github.com/durch/rust-goauth/commits) --- updated-dependencies: - dependency-name: goauth dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Fix rust flags handling in cargo-build-sbf * chore: bump tokio-tungstenite from 0.17.1 to 0.17.2 (#27069) * chore: bump tokio-tungstenite from 0.17.1 to 0.17.2 Bumps [tokio-tungstenite](https://github.com/snapview/tokio-tungstenite) from 0.17.1 to 0.17.2. - [Release notes](https://github.com/snapview/tokio-tungstenite/releases) - [Changelog](https://github.com/snapview/tokio-tungstenite/blob/master/CHANGELOG.md) - [Commits](https://github.com/snapview/tokio-tungstenite/compare/v0.17.1...v0.17.2) --- updated-dependencies: - dependency-name: tokio-tungstenite dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Fix local cluster tests for QUIC usage (#27071) * removes buffering when generating coding shreds in broadcast (#25807) Given the 32:32 erasure recovery schema, current implementation requires exactly 32 data shreds to generate coding shreds for the batch (except for the final erasure batch in each slot). As a result, when serializing ledger entries to data shreds, if the number of data shreds is not a multiple of 32, the coding shreds for the last batch cannot be generated until there are more data shreds to complete the batch to 32 data shreds. This adds latency in generating and broadcasting coding shreds. In addition, with Merkle variants for shreds, data shreds cannot be signed and broadcasted until coding shreds are also generated. As a result *both* code and data shreds will be delayed before broadcast if we still require exactly 32 data shreds for each batch. This commit instead always generates and broadcast coding shreds as soon as there any number of data shreds available. When serializing entries to shreds: * if the number of resulting data shreds is less than 32, then more coding shreds will be generated so that the resulting erasure batch has the same recovery probabilities as a 32:32 batch. * if the number of data shreds is more than 32, then the data shreds are split uniformly into erasure batches with _at least_ 32 data shreds in each batch. Each erasure batch will have the same number of code and data shreds. For example: * If there are 19 data shreds, 27 coding shreds are generated. The resulting 19(data):27(code) erasure batch has the same recovery probabilities as a 32:32 batch. * If there are 107 data shreds, they are split into 3 batches of 36:36, 36:36 and 35:35 data:code shreds each. A consequence of this change is that code and data shreds indices will no longer align as there will be more coding shreds than data shreds (not only in the last batch in each slot but also in the intermediate ones); * feat: support minContextSlot in getParsedAccountInfo method (#27084) * Bump rust-rocksdb to 0.19.0 tag (#26949) * chore: bump chrono from 0.4.19 to 0.4.21 (#27076) * chore: bump chrono from 0.4.19 to 0.4.21 Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.19 to 0.4.21. - [Release notes](https://github.com/chronotope/chrono/releases) - [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md) - [Commits](https://github.com/chronotope/chrono/compare/v0.4.19...v0.4.21) --- updated-dependencies: - dependency-name: chrono dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * chore: bump crossbeam-channel from 0.5.5 to 0.5.6 (#27072) * chore: bump crossbeam-channel from 0.5.5 to 0.5.6 Bumps [crossbeam-channel](https://github.com/crossbeam-rs/crossbeam) from 0.5.5 to 0.5.6. - [Release notes](https://github.com/crossbeam-rs/crossbeam/releases) - [Changelog](https://github.com/crossbeam-rs/crossbeam/blob/master/CHANGELOG.md) - [Commits](https://github.com/crossbeam-rs/crossbeam/compare/crossbeam-channel-0.5.5...crossbeam-channel-0.5.6) --- updated-dependencies: - dependency-name: crossbeam-channel dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Connect to RPC nodes in parallel w/ reduced timeout (#26892) * Connect to RPC nodes in parallel w/ reduced timeout * allow staked nodes weight override (#26870) * Allowed staked nodes weight override (#26407) * Allowed staked nodes weight override, passing only HashMap over to core module Co-authored-by: Ondra Chaloupka * add subcommand to set randomized compute-unit-price to transactions. (#26891) * add subcommand to set randomized compute-unit-price to transactions. * add compute-unit-limit to limit additional cost from prioritization. * increase funding if use_randomized_compute_unit_price is enabled. * Handle JsonRpcService startup failure (#27075) * chore: bump predicates from 2.0.3 to 2.1.1 (#27087) Bumps [predicates](https://github.com/assert-rs/predicates-rs) from 2.0.3 to 2.1.1. - [Release notes](https://github.com/assert-rs/predicates-rs/releases) - [Changelog](https://github.com/assert-rs/predicates-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/assert-rs/predicates-rs/compare/v2.0.3...v2.1.1) --- updated-dependencies: - dependency-name: predicates dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Fix local-cluster for QUIC more (#27096) * chore: bump js-sys from 0.3.58 to 0.3.59 (#27088) * chore: bump js-sys from 0.3.58 to 0.3.59 Bumps [js-sys](https://github.com/rustwasm/wasm-bindgen) from 0.3.58 to 0.3.59. - [Release notes](https://github.com/rustwasm/wasm-bindgen/releases) - [Changelog](https://github.com/rustwasm/wasm-bindgen/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustwasm/wasm-bindgen/commits) --- updated-dependencies: - dependency-name: js-sys dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Allow overriding the runtime transaction account lock limit (#26948) * Add --transaction-account-lock-limit cli arg to test-validator * Allow overriding the tx account lock limit in ProgramTest * Explorer: Display address lookup table instruction type (#27106) * chore: restructure transaction files * chore: restucture message files * chore: restructure program files * chore: restructure utils code * Fix test_accounts_data_size_and_resize_transactions (#27105) * adjusts max coding shreds per slot (#27083) As a consequence of removing buffering when generating coding shreds: https://github.com/solana-labs/solana/pull/25807 more coding shreds are generated than data shreds, and so MAX_CODE_SHREDS_PER_SLOT needs to be adjusted accordingly. The respective value is tied to ERASURE_BATCH_SIZE. * Reduce quic multi write test packets count (#27074) * Reduce counts of packets to reduce time taken to send all of them * Reduce count to 3000 * Sleep between vote refreshes (#27115) * Sleep between vote refreshes in unit test * cli: Require `--bypass-warning` flag to close program accounts (#27108) * cli: Display warning when closing program accounts * Fix parsing tests * clean-feature: `default_units_per_instruction` (#27101) clean-feature: default_units_per_instruction * Bench tps add nonce flag (#27030) * add durable nonce option * make blockhash thread optional * add nonce test to bench-tps * clean feature: `request_units_deprecated` (#27102) clean feature: request_units_deprecated * don't log when there is no work to do for combining ancient slots (#26925) * feat: add getAddressLookupTable method to Connection (#27127) * Add fallback for ledger-tool commands to create new column families (#26565) RocksDB settings include an option to create_if_missing, which will create missing columns or the entire rocksdb directory if starting from scratch. However, create_if_missing functionality only works if the session has Primary (read+write) access. Many ledger-tool commands only need Secondary (read-only) access to the database, so these commands are unable to open the Blockstore when a column must be added. This change detects when Secondary access open fails due to missing column(s) or files, opens the database temporarily with Primary access, and then reattempts to open the database Secondary access. * explorer: Bump @solana/web3.js to v1.53.0 (#27128) * chore:(deps): bump react-dom and @types/react-dom in /explorer (#27129) Bumps [react-dom](https://github.com/facebook/react/tree/HEAD/packages/react-dom) and [@types/react-dom](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react-dom). These dependencies needed to be updated together. Updates `react-dom` from 18.1.0 to 18.2.0 - [Release notes](https://github.com/facebook/react/releases) - [Changelog](https://github.com/facebook/react/blob/main/CHANGELOG.md) - [Commits](https://github.com/facebook/react/commits/v18.2.0/packages/react-dom) Updates `@types/react-dom` from 18.0.3 to 18.0.6 - [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases) - [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/react-dom) --- updated-dependencies: - dependency-name: react-dom dependency-type: direct:production update-type: version-update:semver-minor - dependency-name: "@types/react-dom" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore:(deps): bump prettier from 2.5.1 to 2.7.1 in /explorer (#27130) Bumps [prettier](https://github.com/prettier/prettier) from 2.5.1 to 2.7.1. - [Release notes](https://github.com/prettier/prettier/releases) - [Changelog](https://github.com/prettier/prettier/blob/main/CHANGELOG.md) - [Commits](https://github.com/prettier/prettier/compare/2.5.1...2.7.1) --- updated-dependencies: - dependency-name: prettier dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Explorer: Add details page for address lookup table accounts (#27133) * chore:(deps): bump @testing-library/jest-dom from 5.16.1 to 5.16.5 in /explorer (#27131) chore:(deps): bump @testing-library/jest-dom in /explorer Bumps [@testing-library/jest-dom](https://github.com/testing-library/jest-dom) from 5.16.1 to 5.16.5. - [Release notes](https://github.com/testing-library/jest-dom/releases) - [Changelog](https://github.com/testing-library/jest-dom/blob/main/CHANGELOG.md) - [Commits](https://github.com/testing-library/jest-dom/compare/v5.16.1...v5.16.5) --- updated-dependencies: - dependency-name: "@testing-library/jest-dom" dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: add constant for pubkey byte length (#27134) * chore: bump @babel/preset-env from 7.18.0 to 7.18.10 in /web3.js (#27138) Bumps [@babel/preset-env](https://github.com/babel/babel/tree/HEAD/packages/babel-preset-env) from 7.18.0 to 7.18.10. - [Release notes](https://github.com/babel/babel/releases) - [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md) - [Commits](https://github.com/babel/babel/commits/v7.18.10/packages/babel-preset-env) --- updated-dependencies: - dependency-name: "@babel/preset-env" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * test-validator: improve multi-value arg help output (#26650) * Bumps solana_rbpf to v0.2.32 (#27059) * wait for bg hash calc to complete before 'calculate_capitalization' (#27145) * Add more sysvar API docs (#26849) * Add more sysvar API docs * Remove println from examples * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/clock.rs Co-authored-by: Tyera Eulberg * Fix docs for ACCOUNT_STORAGE_OVERHEAD * Update sdk/program/src/epoch_schedule.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/sysvar/slot_hashes.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/sysvar/slot_history.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/sysvar/slot_history.rs Co-authored-by: Tyera Eulberg * Update sdk/program/src/sysvar/mod.rs Co-authored-by: Tyera Eulberg * Fix docs for DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET * Fix recent_blockhash short description * Fix whitespace Co-authored-by: Tyera Eulberg * Remove the deprecated `KeyedAccount` interface (#27147) * Removes the deprecated KeyedAccount interface. * Removes outdated example code. * adds Shred{Code,Data}::SIZE_OF_HEADERS trait constants (#27144) * Update quinn lib to 0.8.4 (#27119) * Add stats for readonly cache evicts (#26938) * add stats for readonly cache evicts * bump up account cache to 400M * aggregate num_evicts in the loop * chore: bump serial_test from 0.8.0 to 0.9.0 (#27097) Bumps [serial_test](https://github.com/palfrey/serial_test) from 0.8.0 to 0.9.0. - [Release notes](https://github.com/palfrey/serial_test/releases) - [Commits](https://github.com/palfrey/serial_test/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: serial_test dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * [docs] Docs sidebar/navbar restructure (#27005) * fix: minor title changes * fix: updated structure of 2 pages * fix: proposals page and submitting one added proposals page and made it clear how to submit one * fix: minor update to docs home page * fix: footer links added more footer links and minor restructure * fix: sidebar restructure * fix: removed duplicate geyser link * fix: sidebars and navbar final separation of sidebars and updated navbar * fix: formatting for the navbar icons * fix: changes some sidebar links to refs * style: changed order of footer's SPL link * style: removed comment and fixed whitespace check * chore: upload test results to buildkite and datadog (#27139) * export test-stable result * export test-stable-perf result * export test-local-cluster result * export test-local-cluster-flakey result * export test-local-cluster-slow-1 result * export test-local-cluster-slow-2 result * export test-docs result * export test-stable-bpf result * upload test result to buildkite and datadog * Remove `fn slot_deltas()` from StatusCache (#26931) * renames size_of_erasure_encoded_slice to ShredCode::capacity (#27157) Maintain symmetry between code and data shreds: * ShredData::capacity -> data buffer capacity * ShredCode::capacity -> erasure code capacity * store hash calc failures in a separate folder by slot (#27146) * store hash calc failures in a separate folder by slot * Update runtime/src/accounts_db.rs Co-authored-by: Brooks Prumo Co-authored-by: Brooks Prumo * remove abort() from test-validator (#27124) * chore: bump bytes from 1.1.0 to 1.2.1 (#27172) * chore: bump bytes from 1.1.0 to 1.2.1 Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.1.0 to 1.2.1. - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.1.0...v1.2.1) --- updated-dependencies: - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Share Ancestors API get with contains_key (#27161) consolidate similar fns * Rename to `MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA` (#27175) * chore: bump libc from 0.2.129 to 0.2.131 (#27162) * chore: bump libc from 0.2.129 to 0.2.131 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.129 to 0.2.131. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.129...0.2.131) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * reverts wide fanout in broadcast when the root node is down (#26359) A change included in https://github.com/solana-labs/solana/pull/20480 was that when the root node in turbine broadcast tree is down, the leader will broadcast the shred to all nodes in the first layer. The intention was to mitigate the impact of dead nodes on shreds propagation, because if the root node is down, then the entire cluster will miss out the shred. On the other hand, if x% of stake is down, this will cause 200*x% + 1 packets/shreds ratio at the broadcast stage which might contribute to line-rate saturation and packet drop. To avoid this bandwidth saturation issue, this commit reverts that logic and always broadcasts shreds from the leader only to the root node. As before we rely on erasure codes to recover shreds lost due to staked nodes being offline. * add getTokenLargestAccounts rpc method to rust client (#26840) * add get token largest accounts rpc call to client * split to include with commitment * Bump spl-token-2022 (#27181) * Bump token-2022 to 0.4.3 * Allow cargo to bump stuff to v1.11.5 * VoteProgram.safeWithdraw function to safeguard against accidental vote account closures (#26586) feat: safe withdraw function Co-authored-by: aschonfeld * chore: bump futures from 0.3.21 to 0.3.23 (#27182) * chore: bump futures from 0.3.21 to 0.3.23 Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.21 to 0.3.23. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.21...0.3.23) --- updated-dependencies: - dependency-name: futures dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * chore: bump nix from 0.24.2 to 0.25.0 (#27179) * chore: bump nix from 0.24.2 to 0.25.0 Bumps [nix](https://github.com/nix-rust/nix) from 0.24.2 to 0.25.0. - [Release notes](https://github.com/nix-rust/nix/releases) - [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md) - [Commits](https://github.com/nix-rust/nix/compare/v0.24.2...v0.25.0) --- updated-dependencies: - dependency-name: nix dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Parse ConfidentialTransaction instructions (#26825) Parse ConfidentialTransfer instructions * snapshots: serialize version file first (#27192) serialize version file first * serialize incremental_snapshot_hash (#26839) * serialize incremental_snapshot_hash * pr feedback * derives Error trait for ClusterInfoError and core::result::Error (#27208) * Add clean_accounts_for_tests() (#27200) * Rust v1.63.0 (#27148) * Upgrade to Rust v1.63.0 * Add nightly_clippy_allows * Resolve some new clippy nightly lints * Increase QUIC packets completion timeout Co-authored-by: Michael Vines * docs: updated "transaction fees" page (#26861) * docs: transaction fees, compute units, compute budget * docs: added messages definition * Revert "docs: added messages definition" This reverts commit 3c56156dfaaf17158c5eafbc5877080a83607a06. * docs: added messages definition * Update docs/src/transaction_fees.md Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> * fix: updates from feedback Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> * sdk: Fix args after "--" in build-bpf and test-bpf (#27221) * Flaky Unit Test test_rpc_subscriptions (#27214) Increase unit test timeout from 5 seconds to 10 seconds * chore: only buildkite pipelines use sccache in docker-run.sh (#27204) chore: only buildkite ci use sccache * clean feature: `prevent_calling_precompiles_as_programs` (#27100) * clean feature: prevent_calling_precompiles_as_programs * fix tests * fix test * remove comment * fix test * feedback * Add get_account_with_commitment to BenchTpsClient (#27176) * Fix a corner-case panic in get_entries_in_data_block() (#27195) #### Problem get_entries_in_data_block() panics when there's inconsistency between slot_meta and data_shred. However, as we don't lock on reads, reading across multiple column families is not atomic (especially for older slots) and thus does not guarantee consistency as the background cleanup service could purge the slot in the middle. Such panic was reported in #26980 when the validator serves a high load of RPC calls. #### Summary of Changes This PR makes get_entries_in_data_block() panic only when the inconsistency between slot-meta and data-shred happens on a slot older than lowest_cleanup_slot. * Verify snapshot slot deltas (#26666) * store-tool: log lamports for each account (#27168) log lamports for each account * add an assert for a debug feature to avoid wasted time (#27210) * remove redundant call that bumps age to future (#27215) * Use from_secs api to create duration (#27222) use from_secs api to create duration * reorder slot # in debug hash data path (#27217) * create helper fn for clarity (#27216) * Verifying snapshot bank must always specify the snapshot slot (#27234) * Remove `Bank::ensure_no_storage_rewards_pool()` (#26468) * cli: Add subcommands for address lookup tables (#27123) * cli: Add subcommand for creating address lookup tables * cli: Add additional subcommands for address lookup tables * short commands * adds hash domain to ping-pong protocol (#27193) In order to maintain backward compatibility, for now the responding node will hash the token both with and without domain so that the other node will accept the response regardless of its upgrade status. Once the cluster has upgraded to the new code, we will remove the legacy domain = false case. * Revert "Rust v1.63.0 (#27148)" (#27245) This reverts commit a2e7bdf50ac5e1d4c633f64f6362028b4164c003. * correct double negation (#27240) * Enable QUIC client by default. Add arg to disable QUIC client. (Forward port #26927) (#27194) Enable QUIC client by default. Add arg to disable QUIC client. * Enable QUIC client by default. Add arg to disable QUIC client. * Deprecate --disable-quic-servers arg * Add #[ignore] annotation to failing tests * slots_connected: check if the range is connected (>= ending_slot) (#27152) * create-snapshot check if snapshot slot exists (#27153) * Add Bank::clean_accounts_for_tests() (#27209) * Call `AccountsDb::shrink_all_slots()` directly (#27235) * add ed25519_program to built-in instruction cost list (#27199) * add ed25519_program to built-in instruction cost list * Remove unnecessary and stale comment * simple refactorings to disk idx (#27238) * add _inclusive for clarity (#27239) * eliminate unnecessary ZERO_RAW_LAMPORTS_SENTINEL (#27218) * make test code more clear (#27260) * banking stage: actually aggregate tracer packet stats (#27118) * aggregated_tracer_packet_stats_option was alwasys None * Actually accumulate tracer packet stats * Refactor epoch reward 1 (#27253) * refactor: extract store_stake_accounts fn * clippy: slice Co-authored-by: haoran * recovers merkle shreds from erasure codes (#27136) The commit * Identifies Merkle shreds when recovering from erasure codes and dispatches specialized code to reconstruct shreds. * Coding shred headers are added to recovered erasure shards. * Merkle tree is reconstructed for the erasure batch and added to recovered shreds. * The common signature (for the root of Merkle tree) is attached to all recovered shreds. * Simplify `Bank::clean_accounts()` by removing params (#27254) * Account files remove (#26910) * Create a new function cleanup_accounts_paths, a trivial change * Remove account files asynchronously * Update and simplify the implementation after the validator test runs. * Fixes after testing on the dev device * Discard tokio. Use thread instead * Fix comments format * Fix config type to pass the github test * Fix failed tests. Handle the case of non-existing path * Final cleanup, addressing the review comments Avoided OsString. Made the function more generic with "impl AsRef" Co-authored-by: Jeff Washington * Refactor: Flattens `TransactionContext::instruction_trace` (#27109) * Flattens TransactionContext::instruction_trace. * Stop the search at transaction level. * Renames get_instruction_context_at => get_instruction_context_at_nesting_level. * Removes TransactionContext::get_instruction_trace(). Adds TransactionContext::get_instruction_trace_length() and TransactionContext::get_instruction_context_at_index(). * Have TransactionContext::instruction_accounts_lamport_sum() accept an iterator instead of a slice. * Removes instruction_trace from ExecutionRecord. * make InstructionContext::new() private * Parallel insertion of dirty store keys during clean (#27058) parallelize dirty store key insertion * Refactor epoch reward 2 (#27257) * refactor: extract store_stake_accounts fn * refactor: extract store_vote_account fn * clippy: slice * clippy: slice * fix merge error Co-authored-by: haoran * Standardize thread names Tenets: 1. Limit thread names to 15 characters 2. Prefix all Solana-controlled threads with "sol" 3. Use Camel case. It's more character dense than Snake or Kebab case * cleanup comment on filter_zero_lamport_clean_for_incremental_snapshots (#27273) * remove inaccurate log (#27255) * patches metrics for invalid cached vote/stake accounts (#27266) patches invalid cached vote/stake accounts metrics Invalid cached vote accounts is overcounting actual mismatches, and invalid cached stake accounts is undercounting. * Refactor epoch reward 3 (#27259) * refactor: extract store_stake_accounts fn * refactor: extract store_vote_account fn * refactor: extract reward history update fn * clippy: slice * clippy: slice Co-authored-by: haoran * building * Update `solana deploy` subcommand to warn non-upgradable (#27264) Update subcommand text to warn deploy deprecated Update the about text for `solana deploy` to warn this is only for non-upgradeable deploys. Fixes #27228 * Refactor epoch reward 4 (#27261) * refactor: extract store_stake_accounts fn * refactor: extract store_vote_account fn * refactor: extract reward history update fn * remove avg point value from pay_valiator fn. not used * clippy: slice * clippy: slice * remove abort() from test-validator (#27124) * chore: bump bytes from 1.1.0 to 1.2.1 (#27172) * chore: bump bytes from 1.1.0 to 1.2.1 Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.1.0 to 1.2.1. - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.1.0...v1.2.1) --- updated-dependencies: - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Share Ancestors API get with contains_key (#27161) consolidate similar fns * Rename to `MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA` (#27175) * chore: bump libc from 0.2.129 to 0.2.131 (#27162) * chore: bump libc from 0.2.129 to 0.2.131 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.129 to 0.2.131. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.129...0.2.131) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * reverts wide fanout in broadcast when the root node is down (#26359) A change included in https://github.com/solana-labs/solana/pull/20480 was that when the root node in turbine broadcast tree is down, the leader will broadcast the shred to all nodes in the first layer. The intention was to mitigate the impact of dead nodes on shreds propagation, because if the root node is down, then the entire cluster will miss out the shred. On the other hand, if x% of stake is down, this will cause 200*x% + 1 packets/shreds ratio at the broadcast stage which might contribute to line-rate saturation and packet drop. To avoid this bandwidth saturation issue, this commit reverts that logic and always broadcasts shreds from the leader only to the root node. As before we rely on erasure codes to recover shreds lost due to staked nodes being offline. * add getTokenLargestAccounts rpc method to rust client (#26840) * add get token largest accounts rpc call to client * split to include with commitment * Bump spl-token-2022 (#27181) * Bump token-2022 to 0.4.3 * Allow cargo to bump stuff to v1.11.5 * VoteProgram.safeWithdraw function to safeguard against accidental vote account closures (#26586) feat: safe withdraw function Co-authored-by: aschonfeld * chore: bump futures from 0.3.21 to 0.3.23 (#27182) * chore: bump futures from 0.3.21 to 0.3.23 Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.21 to 0.3.23. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.21...0.3.23) --- updated-dependencies: - dependency-name: futures dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * chore: bump nix from 0.24.2 to 0.25.0 (#27179) * chore: bump nix from 0.24.2 to 0.25.0 Bumps [nix](https://github.com/nix-rust/nix) from 0.24.2 to 0.25.0. - [Release notes](https://github.com/nix-rust/nix/releases) - [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md) - [Commits](https://github.com/nix-rust/nix/compare/v0.24.2...v0.25.0) --- updated-dependencies: - dependency-name: nix dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite * Parse ConfidentialTransaction instructions (#26825) Parse ConfidentialTransfer instructions * snapshots: serialize version file first (#27192) serialize version file first * serialize incremental_snapshot_hash (#26839) * serialize incremental_snapshot_hash * pr feedback * derives Error trait for ClusterInfoError and core::result::Error (#27208) * Add clean_accounts_for_tests() (#27200) * Rust v1.63.0 (#27148) * Upgrade to Rust v1.63.0 * Add nightly_clippy_allows * Resolve some new clippy nightly lints * Increase QUIC packets completion timeout Co-authored-by: Michael Vines * docs: updated "transaction fees" page (#26861) * docs: transaction fees, compute units, compute budget * docs: added messages definition * Revert "docs: added messages definition" This reverts commit 3c56156dfaaf17158c5eafbc5877080a83607a06. * docs: added messages definition * Update docs/src/transaction_fees.md Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> * fix: updates from feedback Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> * sdk: Fix args after "--" in build-bpf and test-bpf (#27221) * Flaky Unit Test test_rpc_subscriptions (#27214) Increase unit test timeout from 5 seconds to 10 seconds * chore: only buildkite pipelines use sccache in docker-run.sh (#27204) chore: only buildkite ci use sccache * clean feature: `prevent_calling_precompiles_as_programs` (#27100) * clean feature: prevent_calling_precompiles_as_programs * fix tests * fix test * remove comment * fix test * feedback * Add get_account_with_commitment to BenchTpsClient (#27176) * Fix a corner-case panic in get_entries_in_data_block() (#27195) #### Problem get_entries_in_data_block() panics when there's inconsistency between slot_meta and data_shred. However, as we don't lock on reads, reading across multiple column families is not atomic (especially for older slots) and thus does not guarantee consistency as the background cleanup service could purge the slot in the middle. Such panic was reported in #26980 when the validator serves a high load of RPC calls. #### Summary of Changes This PR makes get_entries_in_data_block() panic only when the inconsistency between slot-meta and data-shred happens on a slot older than lowest_cleanup_slot. * Verify snapshot slot deltas (#26666) * store-tool: log lamports for each account (#27168) log lamports for each account * add an assert for a debug feature to avoid wasted time (#27210) * remove redundant call that bumps age to future (#27215) * Use from_secs api to create duration (#27222) use from_secs api to create duration * reorder slot # in debug hash data path (#27217) * create helper fn for clarity (#27216) * Verifying snapshot bank must always specify the snapshot slot (#27234) * Remove `Bank::ensure_no_storage_rewards_pool()` (#26468) * cli: Add subcommands for address lookup tables (#27123) * cli: Add subcommand for creating address lookup tables * cli: Add additional subcommands for address lookup tables * short commands * adds hash domain to ping-pong protocol (#27193) In order to maintain backward compatibility, for now the responding node will hash the token both with and without domain so that the other node will accept the response regardless of its upgrade status. Once the cluster has upgraded to the new code, we will remove the legacy domain = false case. * Revert "Rust v1.63.0 (#27148)" (#27245) This reverts commit a2e7bdf50ac5e1d4c633f64f6362028b4164c003. * correct double negation (#27240) * Enable QUIC client by default. Add arg to disable QUIC client. (Forward port #26927) (#27194) Enable QUIC client by default. Add arg to disable QUIC client. * Enable QUIC client by default. Add arg to disable QUIC client. * Deprecate --disable-quic-servers arg * Add #[ignore] annotation to failing tests * slots_connected: check if the range is connected (>= ending_slot) (#27152) * create-snapshot check if snapshot slot exists (#27153) * Add Bank::clean_accounts_for_tests() (#27209) * Call `AccountsDb::shrink_all_slots()` directly (#27235) * add ed25519_program to built-in instruction cost list (#27199) * add ed25519_program to built-in instruction cost list * Remove unnecessary and stale comment * simple refactorings to disk idx (#27238) * add _inclusive for clarity (#27239) * eliminate unnecessary ZERO_RAW_LAMPORTS_SENTINEL (#27218) * make test code more clear (#27260) * banking stage: actually aggregate tracer packet stats (#27118) * aggregated_tracer_packet_stats_option was alwasys None * Actually accumulate tracer packet stats * Refactor epoch reward 1 (#27253) * refactor: extract store_stake_accounts fn * clippy: slice Co-authored-by: haoran * recovers merkle shreds from erasure codes (#27136) The commit * Identifies Merkle shreds when recovering from erasure codes and dispatches specialized code to reconstruct shreds. * Coding shred headers are added to recovered erasure shards. * Merkle tree is reconstructed for the erasure batch and added to recovered shreds. * The common signature (for the root of Merkle tree) is attached to all recovered shreds. * Simplify `Bank::clean_accounts()` by removing params (#27254) * Account files remove (#26910) * Create a new function cleanup_accounts_paths, a trivial change * Remove account files asynchronously * Update and simplify the implementation after the validator test runs. * Fixes after testing on the dev device * Discard tokio. Use thread instead * Fix comments format * Fix config type to pass the github test * Fix failed tests. Handle the case of non-existing path * Final cleanup, addressing the review comments Avoided OsString. Made the function more generic with "impl AsRef" Co-authored-by: Jeff Washington * Refactor: Flattens `TransactionContext::instruction_trace` (#27109) * Flattens TransactionContext::instruction_trace. * Stop the search at transaction level. * Renames get_instruction_context_at => get_instruction_context_at_nesting_level. * Removes TransactionContext::get_instruction_trace(). Adds TransactionContext::get_instruction_trace_length() and TransactionContext::get_instruction_context_at_index(). * Have TransactionContext::instruction_accounts_lamport_sum() accept an iterator instead of a slice. * Removes instruction_trace from ExecutionRecord. * make InstructionContext::new() private * Parallel insertion of dirty store keys during clean (#27058) parallelize dirty store key insertion * Refactor epoch reward 2 (#27257) * refactor: extract store_stake_accounts fn * refactor: extract store_vote_account fn * clippy: slice * clippy: slice * fix merge error Co-authored-by: haoran * Standardize thread names Tenets: 1. Limit thread names to 15 characters 2. Prefix all Solana-controlled threads with "sol" 3. Use Camel case. It's more character dense than Snake or Kebab case * cleanup comment on filter_zero_lamport_clean_for_incremental_snapshots (#27273) * remove inaccurate log (#27255) * patches metrics for invalid cached vote/stake accounts (#27266) patches invalid cached vote/stake accounts metrics Invalid cached vote accounts is overcounting actual mismatches, and invalid cached stake accounts is undercounting. * Refactor epoch reward 3 (#27259) * refactor: extract store_stake_accounts fn * refactor: extract store_vote_account fn * refactor: extract reward history update fn * clippy: slice * clippy: slice Co-authored-by: haoran * fix merges Signed-off-by: dependabot[bot] Co-authored-by: haoran Co-authored-by: Jeff Biseda Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite Co-authored-by: Brooks Prumo Co-authored-by: behzad nouri Co-authored-by: AJ Taylor Co-authored-by: Tyera Eulberg Co-authored-by: Andrew Schonfeld Co-authored-by: aschonfeld Co-authored-by: apfitzge Co-authored-by: Jeff Washington (jwash) Co-authored-by: Brennan Watt Co-authored-by: Michael Vines Co-authored-by: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> Co-authored-by: Jon Cinque Co-authored-by: Yihau Chen Co-authored-by: Justin Starry Co-authored-by: kirill lykov Co-authored-by: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Co-authored-by: leonardkulms <42893075+leonardkulms@users.noreply.github.com> Co-authored-by: Will Hickey Co-authored-by: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Co-authored-by: Xiang Zhu Co-authored-by: Jeff Washington Co-authored-by: Alexander Meißner * checks that cached vote accounts are consistent with accounts-db (#27286) The commit adds sanity checks that when loading a bank from snapshots: * cached vote accounts are consistent with accounts-db. * all valid vote-accounts referenced in stake delegations are already cached. * slots_per_epoch shouldn't be optional (#27256) * rename unused parameter for clarity (#27271) * release roots tracker read lock earlier (#27267) * run hash calc with index on failure (#27279) * Update delete_path thread name * update cargo.lock * sort workspace * account_iter instead of accounts() (#27252) * Call AccountsDb::clean_accounts() directly, inside Bank::verify_snapshot_bank() (#27258) * Assign custom names to the Rayon global thread pool * refactor/document calc_delete_dependencies (#27272) * wtf * add _inclusive (#27302) * add _inclusive (#27301) * add _inclusive (#27300) * fix test * log reason why oldest store_id isn't being cleaned (#27298) * Remove unused `Bank::_clean_accounts()` (#27262) * Relax authority signer check for lookup table creation (#27248) * Relax authority signer check for lookup table creation * cli: support creating lookup tables without authority signer * add another create lookup table ix function * improve help message * add _inclusive (#27310) * add _inclusive (#27308) * in ancient shrink, unref accounts that were already ancient shrunk (#27294) * add missing derives (#27311) * add missing derives * undo some impossible Eqs * derive Clone for UiConfirmedBlock * add missing clones Co-authored-by: Kevin Heavey * remove unnecessary references (#27219) * Rust v1.63 (#27303) * Upgrade to Rust v1.63.0 * Add nightly_clippy_allows * Resolve some new clippy nightly lints * Increase QUIC packets completion timeout * Update quinn-udp crate Co-authored-by: Michael Vines * comment out test * ok * Remove redundant and stale comment (#27229) * Add documentation for JSON parsing (#27268) * Add documentation about json parsing * Link jsonParsed to info section * Include version information * Fallback to synchronous rm_dir call if path moving fails (#27306) Remove some log lines, as suggested in PR #26910 * chore: bump @babel/core from 7.18.0 to 7.18.13 in /web3.js (#27329) Bumps [@babel/core](https://github.com/babel/babel/tree/HEAD/packages/babel-core) from 7.18.0 to 7.18.13. - [Release notes](https://github.com/babel/babel/releases) - [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md) - [Commits](https://github.com/babel/babel/commits/v7.18.13/packages/babel-core) --- updated-dependencies: - dependency-name: "@babel/core" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: bump @babel/register from 7.17.7 to 7.18.9 in /web3.js (#27330) Bumps [@babel/register](https://github.com/babel/babel/tree/HEAD/packages/babel-register) from 7.17.7 to 7.18.9. - [Release notes](https://github.com/babel/babel/releases) - [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md) - [Commits](https://github.com/babel/babel/commits/v7.18.9/packages/babel-register) --- updated-dependencies: - dependency-name: "@babel/register" dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: bump @commitlint/travis-cli from 17.0.0 to 17.0.3 in /web3.js (#27331) Bumps [@commitlint/travis-cli](https://github.com/conventional-changelog/commitlint/tree/HEAD/@commitlint/travis-cli) from 17.0.0 to 17.0.3. - [Release notes](https://github.com/conventional-changelog/commitlint/releases) - [Changelog](https://github.com/conventional-changelog/commitlint/blob/master/@commitlint/travis-cli/CHANGELOG.md) - [Commits](https://github.com/conventional-changelog/commitlint/commits/v17.0.3/@commitlint/travis-cli) --- updated-dependencies: - dependency-name: "@commitlint/travis-cli" dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore: bump eslint-plugin-mocha from 10.0.4 to 10.1.0 in /web3.js (#27332) Bumps [eslint-plugin-mocha](https://github.com/lo1tuma/eslint-plugin-mocha) from 10.0.4 to 10.1.0. - [Release notes](https://github.com/lo1tuma/eslint-plugin-mocha/releases) - [Changelog](https://github.com/lo1tuma/eslint-plugin-mocha/blob/master/CHANGELOG.md) - [Commits](https://github.com/lo1tuma/eslint-plugin-mocha/compare/10.0.4...10.1.0) --- updated-dependencies: - dependency-name: eslint-plugin-mocha dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Remove `total_data_size` and `data_size_changed` from `ExecuteDetailsTimings` (#27051) * Removes total_data_size and data_size_changed from ExecuteDetailsTimings. * Another timeout tweak to reduce rpc test flakiness (#27313) * Another timeout tweak to reduce rpc test flakiness * Increase the same rpc timeout further * don't mark ancient append vecs as dirty wrt clean (#27312) * Allow Ctrl-C when querying balances (#27314) * Update config parsing doc (#27340) * bump up number * rev * fix compile err * update lock * add ignore flag for flaky test * ci * ci * clippy Signed-off-by: dependabot[bot] Co-authored-by: Yihau Chen Co-authored-by: Boqin Qin(秦 伯钦) Co-authored-by: Will Hickey Co-authored-by: steviez Co-authored-by: Michael Vines Co-authored-by: Jon Cinque Co-authored-by: Tyera Eulberg Co-authored-by: kirill lykov Co-authored-by: Jeff Washington (jwash) Co-authored-by: Brooks Prumo Co-authored-by: Xavier59 Co-authored-by: Brennan Watt Co-authored-by: Pankaj Garg Co-authored-by: Richard Patel Co-authored-by: Justin Starry Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite Co-authored-by: nickfrosty Co-authored-by: Richard Patel Co-authored-by: Christian Kamm Co-authored-by: Dmitri Makarov Co-authored-by: Ahmad <12675427+ultd@users.noreply.github.com> Co-authored-by: apfitzge Co-authored-by: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Co-authored-by: Brian Anderson Co-authored-by: dependabot-buildkite Co-authored-by: Ryo Onodera Co-authored-by: Justin Malčić Co-authored-by: Ryan Leung Co-authored-by: behzad nouri Co-authored-by: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Co-authored-by: Alexander Meißner Co-authored-by: Xiang Zhu Co-authored-by: hana <81144685+2501babe@users.noreply.github.com> Co-authored-by: Jeff Biseda Co-authored-by: Ikko Ashimine Co-authored-by: janlegner <32453746+janlegner@users.noreply.github.com> Co-authored-by: Ondra Chaloupka Co-authored-by: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Co-authored-by: Trent Nelson Co-authored-by: Tyera Eulberg Co-authored-by: HaoranYi Co-authored-by: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Co-authored-by: Jeff Washington (jwash) Co-authored-by: Brooks Prumo Co-authored-by: AJ Taylor Co-authored-by: Andrew Schonfeld Co-authored-by: aschonfeld Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> Co-authored-by: leonardkulms <42893075+leonardkulms@users.noreply.github.com> Co-authored-by: haoran Co-authored-by: Chris Coudron Co-authored-by: Kevin Heavey <24635973+kevinheavey@users.noreply.github.com> Co-authored-by: Kevin Heavey --- .buildkite/hooks/post-command | 12 + CONTRIBUTING.md | 26 +- Cargo.lock | 410 +-- account-decoder/Cargo.toml | 6 +- .../src/parse_address_lookup_table.rs | 2 +- accounts-bench/src/main.rs | 2 +- banks-interface/Cargo.toml | 2 +- banks-server/src/banks_server.rs | 12 +- banks-server/src/rpc_banks_service.rs | 2 +- bench-batch-simulate-bundle/src/simulator.rs | 4 +- .../src/main.rs | 4 +- bench-tps/Cargo.toml | 5 +- bench-tps/src/bench.rs | 157 +- bench-tps/src/bench_tps_client.rs | 7 + bench-tps/src/bench_tps_client/bank_client.rs | 14 + bench-tps/src/bench_tps_client/rpc_client.rs | 17 +- bench-tps/src/bench_tps_client/thin_client.rs | 16 +- bench-tps/src/bench_tps_client/tpu_client.rs | 18 +- bench-tps/src/cli.rs | 14 + bench-tps/src/main.rs | 21 +- bench-tps/tests/bench_tps.rs | 25 +- bloom/Cargo.toml | 2 +- ci/docker-run.sh | 18 +- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/publish-tarball.sh | 2 + ci/rust-version.sh | 4 +- ci/test-checks.sh | 14 + ci/test-stable.sh | 40 +- cli-config/Cargo.toml | 2 +- cli-output/Cargo.toml | 8 +- cli-output/src/cli_output.rs | 69 + cli/Cargo.toml | 9 +- cli/src/address_lookup_table.rs | 854 +++++ cli/src/clap_app.rs | 5 +- cli/src/cli.rs | 14 +- cli/src/lib.rs | 1 + cli/src/program.rs | 50 +- cli/tests/address_lookup_table.rs | 218 ++ cli/tests/program.rs | 22 +- client-test/Cargo.toml | 4 +- client/Cargo.toml | 17 +- client/src/nonblocking/blockhash_query.rs | 433 +++ client/src/nonblocking/mod.rs | 2 + client/src/nonblocking/nonce_utils.rs | 247 ++ client/src/nonblocking/rpc_client.rs | 25 + client/src/nonce_utils.rs | 207 +- client/src/rpc_client.rs | 27 +- client/src/rpc_request.rs | 6 + client/src/rpc_response.rs | 48 +- client/src/transaction_executor.rs | 2 +- client/tests/quic_client.rs | 6 +- core/Cargo.toml | 8 +- core/benches/shredder.rs | 22 +- core/src/accounts_hash_verifier.rs | 23 +- core/src/ancestor_hashes_service.rs | 213 +- core/src/banking_stage.rs | 32 +- core/src/broadcast_stage.rs | 35 +- .../broadcast_duplicates_run.rs | 23 +- .../broadcast_fake_shreds_run.rs | 1 + core/src/broadcast_stage/broadcast_utils.rs | 4 - .../fail_entry_verification_broadcast_run.rs | 2 +- .../broadcast_stage/standard_broadcast_run.rs | 158 +- core/src/bundle_account_locker.rs | 2 +- core/src/bundle_sanitizer.rs | 4 +- core/src/bundle_stage.rs | 13 +- core/src/cache_block_meta_service.rs | 2 +- core/src/cluster_info_vote_listener.rs | 6 +- core/src/cluster_nodes.rs | 91 +- core/src/cluster_slots_service.rs | 2 +- core/src/commitment_service.rs | 34 +- core/src/completed_data_sets_service.rs | 2 +- core/src/consensus.rs | 24 +- core/src/cost_update_service.rs | 2 +- core/src/drop_bank_service.rs | 2 +- core/src/fetch_stage.rs | 4 +- core/src/find_packet_sender_stake_stage.rs | 2 +- .../src/forward_packet_batches_by_accounts.rs | 4 +- core/src/immutable_deserialized_packet.rs | 137 + core/src/ledger_cleanup_service.rs | 6 +- core/src/ledger_metric_report_service.rs | 2 +- core/src/lib.rs | 1 + core/src/poh_timing_report_service.rs | 2 +- core/src/qos_service.rs | 2 +- core/src/repair_service.rs | 2 +- core/src/replay_stage.rs | 20 +- core/src/result.rs | 80 +- core/src/retransmit_stage.rs | 18 +- core/src/rewards_recorder_service.rs | 2 +- core/src/serve_repair.rs | 231 +- core/src/serve_repair_service.rs | 2 +- core/src/shred_fetch_stage.rs | 5 +- core/src/sigverify_shreds.rs | 4 +- core/src/sigverify_stage.rs | 2 +- core/src/snapshot_packager_service.rs | 2 +- core/src/staked_nodes_updater_service.rs | 67 +- core/src/stats_reporter_service.rs | 2 +- core/src/system_monitor_service.rs | 2 +- core/src/tower1_7_14.rs | 2 +- core/src/tpu.rs | 79 +- core/src/transaction_priority_details.rs | 1 - core/src/tvu.rs | 11 +- core/src/unprocessed_packet_batches.rs | 116 +- core/src/validator.rs | 495 +-- core/src/voting_service.rs | 2 +- core/src/warm_quic_cache_service.rs | 2 +- core/src/window_service.rs | 6 +- core/tests/snapshots.rs | 3 + docs/docusaurus.config.js | 92 +- docs/sidebars.js | 506 +-- docs/src/css/custom.css | 49 +- docs/src/developing/clients/jsonrpc-api.md | 48 +- .../developing/on-chain-programs/debugging.md | 2 +- .../developing/on-chain-programs/deploying.md | 4 +- .../developing/on-chain-programs/examples.md | 2 +- .../developing/programming-model/runtime.md | 37 +- .../implemented-proposals.md | 16 +- docs/src/pages/index.js | 6 +- docs/src/proposals.md | 49 + .../proposals/accepted-design-proposals.md | 18 +- docs/src/proposals/return-data.md | 2 +- docs/src/terminology.md | 32 +- docs/src/transaction_fees.md | 67 +- dos/Cargo.toml | 6 +- dos/src/cli.rs | 1 + dos/src/main.rs | 2 + entry/Cargo.toml | 2 +- entry/src/entry.rs | 74 +- explorer/package-lock.json | 200 +- explorer/package.json | 10 +- .../components/account/TokenHistoryCard.tsx | 18 +- .../AddressLookupTableAccountSection.tsx | 84 + .../LookupTableEntriesCard.tsx | 60 + .../account/address-lookup-table/types.ts | 13 + .../account/history/TokenInstructionsCard.tsx | 16 +- .../account/history/TokenTransfersCard.tsx | 12 +- .../src/components/account/history/common.tsx | 20 +- .../AddressLookupTableDetailsCard.tsx | 46 + .../instruction/address-lookup-table/types.ts | 31 + .../transaction/InstructionsSection.tsx | 14 +- .../transaction/ProgramLogSection.tsx | 10 +- .../transaction/TokenBalancesCard.tsx | 9 +- explorer/src/pages/AccountDetailsPage.tsx | 42 +- explorer/src/pages/TransactionDetailsPage.tsx | 15 +- explorer/src/providers/accounts/history.tsx | 15 +- .../src/providers/transactions/parsed.tsx | 10 +- explorer/src/utils/instruction.ts | 56 +- faucet/Cargo.toml | 2 +- frozen-abi/Cargo.toml | 4 +- frozen-abi/src/abi_example.rs | 2 +- genesis/Cargo.toml | 4 +- geyser-plugin-manager/Cargo.toml | 2 +- .../src/slot_status_observer.rs | 2 +- gossip/Cargo.toml | 4 +- gossip/src/cluster_info.rs | 69 +- gossip/src/crds_gossip_pull.rs | 2 +- gossip/src/gossip_service.rs | 2 +- gossip/src/ping_pong.rs | 40 +- gossip/tests/crds_gossip.rs | 2 +- gossip/tests/gossip.rs | 6 +- install/Cargo.toml | 8 +- ledger-tool/Cargo.toml | 4 +- ledger-tool/src/bigtable.rs | 2 + ledger-tool/src/main.rs | 128 +- ledger/Cargo.toml | 15 +- ledger/src/bigtable_upload.rs | 58 +- ledger/src/bigtable_upload_service.rs | 2 +- ledger/src/blockstore.rs | 119 +- ledger/src/blockstore_db.rs | 14 +- ledger/src/blockstore_meta.rs | 2 +- ledger/src/blockstore_processor.rs | 2 +- ledger/src/shred.rs | 67 +- ledger/src/shred/legacy.rs | 46 +- ledger/src/shred/merkle.rs | 547 +++- ledger/src/shred/shred_code.rs | 12 +- ledger/src/shred/traits.rs | 2 + ledger/src/shredder.rs | 254 +- ledger/src/sigverify_shreds.rs | 2 +- ledger/src/token_balances.rs | 34 +- ledger/tests/shred.rs | 22 +- local-cluster/Cargo.toml | 2 +- local-cluster/src/cluster_tests.rs | 32 +- local-cluster/src/local_cluster.rs | 21 +- local-cluster/src/validator_configs.rs | 2 +- local-cluster/tests/local_cluster.rs | 8 +- local-cluster/tests/local_cluster_slow_1.rs | 2 + local-cluster/tests/local_cluster_slow_2.rs | 1 + log-analyzer/Cargo.toml | 4 +- metrics/Cargo.toml | 2 +- metrics/src/metrics.rs | 6 +- net-shaper/Cargo.toml | 4 +- net-utils/Cargo.toml | 4 +- net-utils/src/lib.rs | 117 +- perf/Cargo.toml | 6 +- perf/src/sigverify.rs | 9 +- poh/src/poh_recorder.rs | 2 +- poh/src/poh_service.rs | 4 +- program-runtime/src/compute_budget.rs | 187 +- program-runtime/src/invoke_context.rs | 87 +- program-runtime/src/pre_account.rs | 3 - program-runtime/src/timings.rs | 17 - program-test/Cargo.toml | 2 +- program-test/src/lib.rs | 27 +- .../tests/create_lookup_table_ix.rs | 87 +- programs/address-lookup-table/Cargo.toml | 2 +- .../address-lookup-table/src/instruction.rs | 35 +- .../address-lookup-table/src/processor.rs | 22 +- programs/bpf/Cargo.lock | 52 +- programs/bpf/Cargo.toml | 2 +- programs/bpf/c/src/invoke/invoke.c | 47 +- programs/bpf/rust/invoke/src/instructions.rs | 13 +- programs/bpf/rust/invoke/src/processor.rs | 32 - programs/bpf/tests/programs.rs | 10 +- programs/bpf_loader/Cargo.toml | 2 +- programs/bpf_loader/src/allocator_bump.rs | 6 +- programs/bpf_loader/src/lib.rs | 12 +- programs/bpf_loader/src/serialization.rs | 31 +- programs/bpf_loader/src/syscalls/cpi.rs | 21 +- programs/bpf_loader/src/syscalls/mod.rs | 74 +- programs/config/Cargo.toml | 4 +- .../tests/process_transaction.rs | 25 - programs/stake/Cargo.toml | 2 +- programs/stake/src/stake_instruction.rs | 2 +- programs/stake/src/stake_state.rs | 4 +- programs/vote/Cargo.toml | 3 +- programs/vote/src/lib.rs | 8 +- programs/vote/src/vote_processor.rs | 19 +- programs/vote/src/vote_state/mod.rs | 2810 +++++------------ programs/vote/src/vote_transaction.rs | 6 +- rbpf-cli/Cargo.toml | 6 +- remote-wallet/Cargo.toml | 2 +- rpc-test/Cargo.toml | 4 +- rpc-test/tests/rpc.rs | 11 +- rpc/Cargo.toml | 10 +- .../optimistically_confirmed_bank_tracker.rs | 2 +- rpc/src/rpc.rs | 10 +- rpc/src/rpc_completed_slots_service.rs | 2 +- rpc/src/rpc_pubsub_service.rs | 4 +- rpc/src/rpc_service.rs | 20 +- rpc/src/rpc_subscriptions.rs | 9 +- rpc/src/transaction_status_service.rs | 2 +- runtime/Cargo.toml | 4 +- runtime/benches/accounts.rs | 2 +- runtime/benches/status_cache.rs | 18 +- runtime/src/account_rent_state.rs | 2 +- runtime/src/accounts.rs | 51 +- runtime/src/accounts_background_service.rs | 11 +- runtime/src/accounts_db.rs | 450 ++- runtime/src/accounts_hash.rs | 16 +- runtime/src/accounts_index.rs | 31 +- runtime/src/accounts_index_storage.rs | 2 +- runtime/src/ancestors.rs | 8 +- runtime/src/bank.rs | 671 ++-- runtime/src/bank_client.rs | 2 +- runtime/src/block_cost_limits.rs | 10 +- runtime/src/bucket_map_holder.rs | 1 + runtime/src/builtins.rs | 32 +- runtime/src/cost_tracker.rs | 6 +- runtime/src/execute_cost_table.rs | 1 - runtime/src/expected_rent_collection.rs | 8 +- runtime/src/genesis_utils.rs | 2 + runtime/src/hardened_unpack.rs | 2 +- runtime/src/in_mem_accounts_index.rs | 15 +- runtime/src/message_processor.rs | 10 +- runtime/src/read_only_accounts_cache.rs | 11 +- runtime/src/runtime_config.rs | 1 + runtime/src/serde_snapshot.rs | 20 +- runtime/src/serde_snapshot/newer.rs | 21 +- runtime/src/serde_snapshot/tests.rs | 56 +- runtime/src/shared_buffer_reader.rs | 2 +- runtime/src/snapshot_minimizer.rs | 2 +- runtime/src/snapshot_utils.rs | 311 +- runtime/src/stakes.rs | 54 +- runtime/src/status_cache.rs | 35 +- runtime/src/storable_accounts.rs | 2 +- runtime/src/system_instruction_processor.rs | 7 +- .../src/verify_accounts_hash_in_background.rs | 2 +- runtime/src/vote_account.rs | 12 +- runtime/store-tool/src/main.rs | 8 +- runtime/tests/accounts.rs | 2 +- sdk/Cargo.toml | 6 +- sdk/cargo-build-bpf/src/main.rs | 5 +- sdk/cargo-build-sbf/Cargo.toml | 1 + sdk/cargo-build-sbf/src/main.rs | 60 +- sdk/cargo-build-sbf/tests/crates.rs | 48 +- sdk/cargo-test-bpf/src/main.rs | 5 +- sdk/macro/src/lib.rs | 12 +- sdk/program/src/clock.rs | 101 +- sdk/program/src/epoch_schedule.rs | 37 +- sdk/program/src/example_mocks.rs | 4 + sdk/program/src/lib.rs | 95 +- sdk/program/src/message/compiled_keys.rs | 10 +- sdk/program/src/nonce/state/mod.rs | 2 +- sdk/program/src/rent.rs | 72 +- sdk/program/src/slot_hashes.rs | 2 +- sdk/program/src/slot_history.rs | 3 +- sdk/program/src/stake/tools.rs | 2 +- sdk/program/src/sysvar/clock.rs | 128 +- sdk/program/src/sysvar/epoch_schedule.rs | 127 +- sdk/program/src/sysvar/fees.rs | 21 +- sdk/program/src/sysvar/instructions.rs | 84 +- sdk/program/src/sysvar/mod.rs | 105 +- sdk/program/src/sysvar/recent_blockhashes.rs | 18 + sdk/program/src/sysvar/rent.rs | 128 +- sdk/program/src/sysvar/rewards.rs | 4 +- sdk/program/src/sysvar/slot_hashes.rs | 47 +- sdk/program/src/sysvar/slot_history.rs | 50 +- sdk/program/src/sysvar/stake_history.rs | 47 +- .../program/src/vote}/authorized_voters.rs | 5 +- .../program/src/vote/error.rs | 5 +- .../program/src/vote/instruction.rs | 44 +- sdk/program/src/vote/mod.rs | 11 + sdk/program/src/vote/state/mod.rs | 1219 +++++++ .../src/vote/state}/vote_state_0_23_5.rs | 1 + .../src/vote/state}/vote_state_versions.rs | 2 +- sdk/src/builtins.rs | 61 - sdk/src/bundle/error.rs | 4 +- sdk/src/feature_set.rs | 17 +- sdk/src/keyed_account.rs | 258 -- sdk/src/lib.rs | 17 +- sdk/src/precompiles.rs | 8 +- sdk/src/quic.rs | 12 + sdk/src/transaction/error.rs | 4 +- sdk/src/transaction/sanitized.rs | 7 +- sdk/src/transaction_context.rs | 222 +- .../src/send_transaction_service.rs | 4 +- storage-bigtable/Cargo.toml | 8 +- storage-bigtable/build-proto/Cargo.lock | 8 +- storage-proto/Cargo.toml | 2 +- streamer/Cargo.toml | 9 +- streamer/src/nonblocking/quic.rs | 152 +- streamer/src/quic.rs | 26 +- streamer/src/streamer.rs | 10 +- sys-tuner/Cargo.toml | 4 +- test-validator/Cargo.toml | 2 +- test-validator/src/lib.rs | 60 +- tokens/src/commands.rs | 10 +- tokens/src/main.rs | 2 +- transaction-status/Cargo.toml | 6 +- transaction-status/src/lib.rs | 2 +- transaction-status/src/parse_stake.rs | 2 +- transaction-status/src/parse_token.rs | 10 +- .../extension/confidential_transfer.rs | 399 +++ .../src/parse_token/extension/mod.rs | 1 + upload-perf/Cargo.toml | 2 +- validator/Cargo.toml | 10 +- validator/src/admin_rpc_service.rs | 101 +- validator/src/bin/solana-test-validator.rs | 39 +- validator/src/bootstrap.rs | 394 ++- validator/src/lib.rs | 23 +- validator/src/main.rs | 91 +- version/Cargo.toml | 4 +- web3.js/package-lock.json | 2249 ++++++------- web3.js/src/account-data.ts | 39 + web3.js/src/account.ts | 2 +- web3.js/src/connection.ts | 64 +- web3.js/src/fee-calculator.ts | 2 + web3.js/src/index.ts | 17 +- web3.js/src/loader.ts | 9 +- web3.js/src/message/index.ts | 32 + web3.js/src/{message.ts => message/legacy.ts} | 54 +- web3.js/src/nonce-account.ts | 2 +- .../address-lookup-table/index.ts} | 14 +- .../programs/address-lookup-table/state.ts | 84 + web3.js/src/{ => programs}/compute-budget.ts | 8 +- .../ed25519.ts} | 8 +- web3.js/src/programs/index.ts | 7 + .../secp256k1.ts} | 8 +- .../{stake-program.ts => programs/stake.ts} | 14 +- .../{system-program.ts => programs/system.ts} | 16 +- .../src/{vote-program.ts => programs/vote.ts} | 35 +- web3.js/src/publickey.ts | 13 +- .../constants.ts} | 0 .../expiry-custom-errors.ts} | 0 web3.js/src/transaction/index.ts | 3 + .../{transaction.ts => transaction/legacy.ts} | 23 +- .../__forks__/react-native/url-impl.ts | 0 web3.js/src/{util => utils}/assert.ts | 0 web3.js/src/{util => utils}/bigint.ts | 0 web3.js/src/{util => utils}/borsh-schema.ts | 0 web3.js/src/{util => utils}/cluster.ts | 0 web3.js/src/utils/index.ts | 4 + .../src/{util => utils}/makeWebsocketUrl.ts | 0 .../src/{util => utils}/promise-timeout.ts | 0 .../send-and-confirm-raw-transaction.ts | 0 .../send-and-confirm-transaction.ts | 0 .../src/{util => utils}/shortvec-encoding.ts | 0 web3.js/src/{util => utils}/sleep.ts | 0 web3.js/src/{util => utils}/to-buffer.ts | 0 web3.js/src/{util => utils}/url-impl.ts | 0 web3.js/src/validator-info.ts | 10 +- web3.js/src/vote-account.ts | 2 +- web3.js/test/agent-manager.test.ts | 2 +- web3.js/test/cluster.test.ts | 2 +- web3.js/test/connection.test.ts | 101 +- web3.js/test/mocks/rpc-http.ts | 2 +- .../address-lookup-table.test.ts} | 8 +- .../compute-budget.test.ts | 6 +- .../ed25519.test.ts} | 4 +- .../secp256k1.test.ts} | 4 +- .../stake.test.ts} | 6 +- .../system.test.ts} | 10 +- .../vote.test.ts} | 21 +- web3.js/test/shortvec-encoding.test.ts | 2 +- web3.js/test/transaction-payer.test.ts | 2 +- web3.js/test/transaction.test.ts | 7 +- web3.js/test/websocket.test.ts | 2 +- web3.js/yarn.lock | 1208 +++---- zk-token-sdk/src/instruction/close_account.rs | 2 +- zk-token-sdk/src/instruction/withdraw.rs | 2 +- 410 files changed, 14116 insertions(+), 8693 deletions(-) create mode 100644 cli/src/address_lookup_table.rs create mode 100644 cli/tests/address_lookup_table.rs create mode 100644 client/src/nonblocking/blockhash_query.rs create mode 100644 client/src/nonblocking/nonce_utils.rs create mode 100644 core/src/immutable_deserialized_packet.rs create mode 100644 docs/src/proposals.md create mode 100644 explorer/src/components/account/address-lookup-table/AddressLookupTableAccountSection.tsx create mode 100644 explorer/src/components/account/address-lookup-table/LookupTableEntriesCard.tsx create mode 100644 explorer/src/components/account/address-lookup-table/types.ts create mode 100644 explorer/src/components/instruction/AddressLookupTableDetailsCard.tsx create mode 100644 explorer/src/components/instruction/address-lookup-table/types.ts rename {programs/vote/src => sdk/program/src/vote}/authorized_voters.rs (97%) rename programs/vote/src/vote_error.rs => sdk/program/src/vote/error.rs (96%) rename programs/vote/src/vote_instruction.rs => sdk/program/src/vote/instruction.rs (98%) create mode 100644 sdk/program/src/vote/mod.rs create mode 100644 sdk/program/src/vote/state/mod.rs rename {programs/vote/src/vote_state => sdk/program/src/vote/state}/vote_state_0_23_5.rs (97%) rename {programs/vote/src/vote_state => sdk/program/src/vote/state}/vote_state_versions.rs (96%) delete mode 100644 sdk/src/keyed_account.rs create mode 100644 transaction-status/src/parse_token/extension/confidential_transfer.rs create mode 100644 web3.js/src/account-data.ts create mode 100644 web3.js/src/message/index.ts rename web3.js/src/{message.ts => message/legacy.ts} (81%) rename web3.js/src/{address-lookup-table-program.ts => programs/address-lookup-table/index.ts} (96%) create mode 100644 web3.js/src/programs/address-lookup-table/state.ts rename web3.js/src/{ => programs}/compute-budget.ts (97%) rename web3.js/src/{ed25519-program.ts => programs/ed25519.ts} (96%) create mode 100644 web3.js/src/programs/index.ts rename web3.js/src/{secp256k1-program.ts => programs/secp256k1.ts} (97%) rename web3.js/src/{stake-program.ts => programs/stake.ts} (98%) rename web3.js/src/{system-program.ts => programs/system.ts} (98%) rename web3.js/src/{vote-program.ts => programs/vote.ts} (90%) rename web3.js/src/{transaction-constants.ts => transaction/constants.ts} (100%) rename web3.js/src/{util/tx-expiry-custom-errors.ts => transaction/expiry-custom-errors.ts} (100%) create mode 100644 web3.js/src/transaction/index.ts rename web3.js/src/{transaction.ts => transaction/legacy.ts} (97%) rename web3.js/src/{util => utils}/__forks__/react-native/url-impl.ts (100%) rename web3.js/src/{util => utils}/assert.ts (100%) rename web3.js/src/{util => utils}/bigint.ts (100%) rename web3.js/src/{util => utils}/borsh-schema.ts (100%) rename web3.js/src/{util => utils}/cluster.ts (100%) create mode 100644 web3.js/src/utils/index.ts rename web3.js/src/{util => utils}/makeWebsocketUrl.ts (100%) rename web3.js/src/{util => utils}/promise-timeout.ts (100%) rename web3.js/src/{util => utils}/send-and-confirm-raw-transaction.ts (100%) rename web3.js/src/{util => utils}/send-and-confirm-transaction.ts (100%) rename web3.js/src/{util => utils}/shortvec-encoding.ts (100%) rename web3.js/src/{util => utils}/sleep.ts (100%) rename web3.js/src/{util => utils}/to-buffer.ts (100%) rename web3.js/src/{util => utils}/url-impl.ts (100%) rename web3.js/test/{address-lookup-table-program.test.ts => program-tests/address-lookup-table.test.ts} (98%) rename web3.js/test/{ => program-tests}/compute-budget.test.ts (98%) rename web3.js/test/{ed25519-program.test.ts => program-tests/ed25519.test.ts} (96%) rename web3.js/test/{secp256k1-program.test.ts => program-tests/secp256k1.test.ts} (98%) rename web3.js/test/{stake-program.test.ts => program-tests/stake.test.ts} (99%) rename web3.js/test/{system-program.test.ts => program-tests/system.test.ts} (98%) rename web3.js/test/{vote-program.test.ts => program-tests/vote.test.ts} (93%) diff --git a/.buildkite/hooks/post-command b/.buildkite/hooks/post-command index 513b1b1f89..6423f7ee17 100644 --- a/.buildkite/hooks/post-command +++ b/.buildkite/hooks/post-command @@ -20,6 +20,18 @@ else SUCCESS=false fi + if [[ "$BUILDKITE_BRANCH" == 'master' && -f "results.json" ]]; then + # prepare result file + awk '/{ "type": .* }/' results.json > sanitized-results.json + + # upload to buildkite + buildkite-test-collector < sanitized-results.json + + # upload to datadog + cargo2junit > results.xml < sanitized-results.json + datadog-ci junit upload --service solana results.xml + fi + point_tags="pipeline=$BUILDKITE_PIPELINE_SLUG,job=$CI_LABEL,pr=$PR,success=$SUCCESS" point_tags="${point_tags// /\\ }" # Escape spaces diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3f4918fd7f..4a64afe79e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -250,20 +250,12 @@ confused with 3-letter acronyms. ## Design Proposals -Solana's architecture is described by docs generated from markdown files in -the `docs/src/` directory, maintained by an *editor* (currently @garious). To -add a design proposal, you'll need to include it in the -[Accepted Design Proposals](https://docs.solana.com/proposals/accepted-design-proposals) -section of the Solana docs. Here's the full process: - -1. Propose a design by creating a PR that adds a markdown document to the - `docs/src/proposals` directory and references it from the [table of - contents](docs/src/SUMMARY.md). Add any relevant *maintainers* to the PR - review. -2. The PR being merged indicates your proposed change was accepted and that the - maintainers support your plan of attack. -3. Submit PRs that implement the proposal. When the implementation reveals the - need for tweaks to the proposal, be sure to update the proposal and have that - change reviewed by the same people as in step 1. -4. Once the implementation is complete, submit a PR that moves the link from - the Accepted Proposals to the Implemented Proposals section. +Solana's architecture is described by docs generated from markdown files in the `docs/src/` +directory and viewable on the official [Solana Documentation](https://docs.solana.com) website. + +Current design proposals may be viewed on the docs site: + +1. [Accepted Proposals](https://docs.solana.com/proposals/accepted-design-proposals.md). +2. [Implemented Proposals](https://docs.solana.com/implemented-proposals/implemented-proposals.md) + +New design proposals should follow this guide on [how to submit a design proposal](./docs/src/proposals.md#submit-a-design-proposal). \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index c33336481d..1ae7d349f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -254,9 +254,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.60" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c794e162a5eff65c72ef524dfe393eb923c354e350bb78b9c7383df13f3bc142" +checksum = "1485d4d2cc45e7b201ee3767015c96faa5904387c9d87c6efdd0fb511f12d305" [[package]] name = "arc-swap" @@ -531,9 +531,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" dependencies = [ "bitflags", "cexpr", @@ -709,9 +709,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] name = "bv" @@ -740,18 +740,18 @@ checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" [[package]] name = "bytemuck" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5377c8865e74a160d21f29c2d40669f53286db6eab59b88540cbb12ffc8b835" +checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.1.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd2f4180c5721da6335cc9e9061cce522b87a35e51cc57636d28d22a9863c80" +checksum = "1b9e1f5fa78f69496407a27ae9ed989e3c3b072310286f5ef385525e4cbc24a9" dependencies = [ "proc-macro2 1.0.43", "quote 1.0.21", @@ -799,9 +799,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.0.9" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" +checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" dependencies = [ "serde", ] @@ -879,9 +879,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f725f340c3854e3cb3ab736dc21f0cca183303acea3b3ffec30f141503ac8eb" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ "iana-time-zone", "js-sys", @@ -895,9 +895,9 @@ dependencies = [ [[package]] name = "chrono-humanize" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eddc119501d583fd930cb92144e605f44e0252c38dd89d9247fffa1993375cb" +checksum = "32dce1ea1988dbdf9f9815ff11425828523bd2a134ec0805d2ac8af26ee6096e" dependencies = [ "chrono", ] @@ -949,9 +949,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.16" +version = "3.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3dbbb6653e7c55cc8595ad3e1f7be8f32aba4eb7ff7f0fd1163d4f3d137c0a9" +checksum = "29e724a68d9319343bb3328c9cc2dfde263f4b3142ee1059a9980580171c954b" dependencies = [ "atty", "bitflags", @@ -966,9 +966,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.2.15" +version = "3.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba52acd3b0a5c33aeada5cdaa3267cdc7c594a98731d4268cdc1532f4264cb4" +checksum = "13547f7012c01ab4a0e8f8967730ada8f9fdf419e8b6c792788f39cf4e46eefa" dependencies = [ "heck 0.4.0", "proc-macro-error", @@ -1101,9 +1101,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "dc948ebb96241bb40ab73effeb80d9f93afaad49359d159a5e61be51619fe813" dependencies = [ "libc", ] @@ -1232,9 +1232,9 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.2" +version = "3.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" +checksum = "1d91974fbbe88ec1df0c24a4f00f99583667a7e2e6272b2b92d294d81e462173" dependencies = [ "nix", "winapi 0.3.9", @@ -1265,6 +1265,18 @@ dependencies = [ "rayon", ] +[[package]] +name = "dashmap" +version = "5.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3495912c9c1ccf2e18976439f4443f3fee0fd61f424ff99fde6a66b15ecb448f" +dependencies = [ + "cfg-if 1.0.0", + "hashbrown 0.12.3", + "lock_api", + "parking_lot_core 0.9.3", +] + [[package]] name = "data-encoding" version = "2.3.2" @@ -1475,9 +1487,9 @@ dependencies = [ [[package]] name = "either" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "encode_unicode" @@ -1675,6 +1687,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1726,9 +1747,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1741,9 +1762,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1751,15 +1772,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", @@ -1769,15 +1790,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2 1.0.43", "quote 1.0.21", @@ -1786,21 +1807,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1922,7 +1943,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.21", + "futures 0.3.23", "log", "reqwest", "serde", @@ -1936,9 +1957,9 @@ dependencies = [ [[package]] name = "goblin" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91766b1121940d622933a13e20665857648681816089c9bc2075c4b75a6e4f6b" +checksum = "a7666983ed0dd8d21a6f6576ee00053ca0926fb281a5522577a4dbd0f1b54143" dependencies = [ "log", "plain", @@ -1947,9 +1968,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" dependencies = [ "bytes", "fnv", @@ -2170,7 +2191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "headers", "http", "hyper", @@ -2221,12 +2242,12 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.41" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1779539f58004e5dba1c1f093d44325ebeb244bfc04b791acdc0aaeca9c04570" +checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501" dependencies = [ "android_system_properties", - "core-foundation", + "core-foundation-sys", "js-sys", "wasm-bindgen", "winapi 0.3.9", @@ -2324,9 +2345,9 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c3f4eff5495aee4c0399d7b6a0dc2b6e81be84242ffbfcf253ebacccc1d0cb" +checksum = "1ea37f355c05dde75b84bba2d767906ad522e97cd9e2eef2be7a4ab7fb442c06" [[package]] name = "ipnet" @@ -2402,7 +2423,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2420,7 +2441,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "futures-executor", "futures-util", "log", @@ -2435,7 +2456,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-client-transports", ] @@ -2457,7 +2478,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2473,7 +2494,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2488,7 +2509,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "lazy_static", "log", @@ -2504,7 +2525,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "globset", "jsonrpc-core", "lazy_static", @@ -2545,9 +2566,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.127" +version = "0.2.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "505e71a4706fa491e9b1b55f51b95d4037d0821ee40131190475f692b35b009b" +checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" [[package]] name = "libloading" @@ -2561,15 +2582,15 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da83a57f3f5ba3680950aa3cbc806fc297bc0b289d42e8942ed528ace71b8145" +checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" [[package]] name = "librocksdb-sys" -version = "0.6.1+6.28.2" +version = "0.8.0+7.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" +checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" dependencies = [ "bindgen", "bzip2-sys", @@ -2718,9 +2739,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a79b39c93a7a5a27eeaf9a23b5ff43f1b9e0ad6b1cdd441140ae53c35613fc7" +checksum = "95af15f345b17af2efc8ead6080fb8bc376f8cec1b35277b935637595fe77498" dependencies = [ "libc", ] @@ -2853,14 +2874,16 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" dependencies = [ + "autocfg", "bitflags", "cfg-if 1.0.0", "libc", "memoffset", + "pin-utils", ] [[package]] @@ -2873,6 +2896,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "ntapi" version = "0.3.7" @@ -3038,9 +3067,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" +checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e" [[package]] name = "opaque-debug" @@ -3124,15 +3153,15 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.2.0" +version = "6.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4" +checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" [[package]] name = "ouroboros" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7425ea87a1e31df63a27b6d31e21a35a9003268032a876465e8d43c2364b0de2" +checksum = "55190d158a4c09a30bdb5e3b2c50a37f299b8dd9f59d0e1510782732e8bf8877" dependencies = [ "aliasable", "ouroboros_macro", @@ -3140,9 +3169,9 @@ dependencies = [ [[package]] name = "ouroboros_macro" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "734aa7a4a6390b162112523cac2923a18e4f23b917880a68c826bf6e8bf48f06" +checksum = "816c4556bb87c05aad7710d02e88ed50a93f837d73dfe417ec5e890a9e1bbec7" dependencies = [ "Inflector", "proc-macro-error", @@ -3157,7 +3186,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "libc", "log", "rand 0.7.3", @@ -3269,9 +3298,9 @@ dependencies = [ [[package]] name = "pest" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69486e2b8c2d2aeb9762db7b4e00b0331156393555cff467f4163ff06821eef8" +checksum = "4b0560d531d1febc25a3c9398a62a71256c0178f2e3443baedd9ad4bb8c9deb4" dependencies = [ "thiserror", "ucd-trie", @@ -3279,9 +3308,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b13570633aff33c6d22ce47dd566b10a3b9122c2fe9d8e7501895905be532b91" +checksum = "905708f7f674518498c1f8d644481440f476d39ca6ecae83319bba7c6c12da91" dependencies = [ "pest", "pest_generator", @@ -3289,9 +3318,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c567e5702efdc79fb18859ea74c3eb36e14c43da7b8c1f098a4ed6514ec7a0" +checksum = "5803d8284a629cc999094ecd630f55e91b561a1d1ba75e233b00ae13b91a69ad" dependencies = [ "pest", "pest_meta", @@ -3302,9 +3331,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eb32be5ee3bbdafa8c7a18b0a8a8d962b66cfa2ceee4037f49267a50ee821fe" +checksum = "1538eb784f07615c6d9a8ab061089c6c54a344c5b4301db51990ca1c241e8c04" dependencies = [ "once_cell", "pest", @@ -3343,18 +3372,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2 1.0.43", "quote 1.0.21", @@ -3421,8 +3450,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5aab5be6e4732b473071984b3164dbbfb7a3674d30ea5ff44410b6bcd960c3c" dependencies = [ "difflib", + "float-cmp", "itertools", + "normalize-line-endings", "predicates-core", + "regex", ] [[package]] @@ -3741,9 +3773,9 @@ checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quinn" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7542006acd6e057ff632307d219954c44048f818898da03113d6c0086bfddd9" +checksum = "5b435e71d9bfa0d8889927231970c51fb89c58fa63bffcab117c9c7a41e5ef8f" dependencies = [ "bytes", "futures-channel", @@ -3760,9 +3792,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a13a5c0a674c1ce7150c9df7bc4a1e46c2fbbe7c710f56c0dc78b1a810e779e" +checksum = "3fce546b9688f767a57530652488420d419a8b1f44a478b451c3d1ab6d992a55" dependencies = [ "bytes", "fxhash", @@ -3961,7 +3993,7 @@ dependencies = [ name = "rbpf-cli" version = "1.12.0" dependencies = [ - "clap 3.2.16", + "clap 3.2.17", "serde", "serde_json", "solana-bpf-loader-program", @@ -4119,9 +4151,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" +checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" dependencies = [ "libc", "librocksdb-sys", @@ -4171,9 +4203,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.35.7" +version = "0.35.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51cc38aa10f6bbb377ed28197aa052aa4e2b762c22be9d3153d01822587e787" +checksum = "72c825b8aa8010eb9ee99b75f05e10180b9278d161583034d7574c9d617aeada" dependencies = [ "bitflags", "errno", @@ -4329,9 +4361,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -4361,9 +4393,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.143" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e8e5d5b70924f74ff5c6d64d9a5acd91422117c60f48c4e07855238a254553" +checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860" dependencies = [ "serde_derive", ] @@ -4379,9 +4411,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.143" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3d8e8de557aee63c26b85b947f5e59b690d0454c753f3adeb5cd7835ab88391" +checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00" dependencies = [ "proc-macro2 1.0.43", "quote 1.0.21", @@ -4390,9 +4422,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.83" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38dd04e3c8279e75b31ef29dbdceebfe5ad89f4d0937213c53f7d49d01b3d5a7" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa 1.0.3", "ryu", @@ -4425,11 +4457,12 @@ dependencies = [ [[package]] name = "serial_test" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eec42e7232e5ca56aa59d63af3c7f991fe71ee6a3ddd2d3480834cf3902b007" +checksum = "92761393ee4dc3ff8f4af487bd58f4307c9329bbedea02cac0089ad9c411e153" dependencies = [ - "futures 0.3.21", + "dashmap 5.3.4", + "futures 0.3.23", "lazy_static", "log", "parking_lot 0.12.1", @@ -4438,14 +4471,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b95bb2f4f624565e8fe8140c789af7e2082c0e0561b5a82a1b678baa9703dc" +checksum = "4b6f5d1c3087fb119617cff2966fe3808a80e5eb59a8c1601d5994d66f4346a5" dependencies = [ "proc-macro-error", "proc-macro2 1.0.43", "quote 1.0.21", - "rustversion", "syn 1.0.99", ] @@ -4555,9 +4587,9 @@ dependencies = [ [[package]] name = "signature" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" +checksum = "f0ea32af43239f0d353a7dd75a22d94c329c8cdaafdcb4c1c1335aa10c298a4a" [[package]] name = "simpl" @@ -4624,7 +4656,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.0", "bytes", - "futures 0.3.21", + "futures 0.3.23", "httparse", "log", "rand 0.8.5", @@ -4729,7 +4761,7 @@ dependencies = [ name = "solana-banking-bench" version = "1.12.0" dependencies = [ - "clap 3.2.16", + "clap 3.2.17", "crossbeam-channel", "log", "rand 0.7.3", @@ -4753,7 +4785,7 @@ name = "solana-banks-client" version = "1.12.0" dependencies = [ "borsh", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-banks-server", "solana-program 1.12.0", @@ -4780,7 +4812,7 @@ version = "1.12.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-client", "solana-gossip", @@ -4798,7 +4830,7 @@ dependencies = [ name = "solana-bench-batch-simulate-bundle" version = "1.12.0" dependencies = [ - "clap 3.2.16", + "clap 3.2.17", "env_logger", "log", "num-traits", @@ -4813,7 +4845,7 @@ dependencies = [ name = "solana-bench-streamer" version = "1.12.0" dependencies = [ - "clap 3.2.16", + "clap 3.2.17", "crossbeam-channel", "solana-net-utils", "solana-streamer", @@ -4827,6 +4859,7 @@ dependencies = [ "clap 2.34.0", "crossbeam-channel", "log", + "rand 0.7.3", "rayon", "serde_json", "serde_yaml", @@ -4920,7 +4953,7 @@ name = "solana-cargo-build-bpf" version = "1.12.0" dependencies = [ "cargo_metadata", - "clap 3.2.16", + "clap 3.2.17", "solana-sdk 1.12.0", ] @@ -4931,8 +4964,9 @@ dependencies = [ "assert_cmd", "bzip2", "cargo_metadata", - "clap 3.2.16", + "clap 3.2.17", "log", + "predicates", "regex", "serial_test", "solana-download-utils", @@ -4946,7 +4980,7 @@ name = "solana-cargo-test-bpf" version = "1.12.0" dependencies = [ "cargo_metadata", - "clap 3.2.16", + "clap 3.2.17", ] [[package]] @@ -4954,7 +4988,7 @@ name = "solana-cargo-test-sbf" version = "1.12.0" dependencies = [ "cargo_metadata", - "clap 3.2.16", + "clap 3.2.17", ] [[package]] @@ -4979,7 +5013,7 @@ name = "solana-clap-v3-utils" version = "1.12.0" dependencies = [ "chrono", - "clap 3.2.16", + "clap 3.2.17", "rpassword", "solana-perf", "solana-remote-wallet", @@ -5013,6 +5047,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", + "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-clap-utils", "solana-cli-config", @@ -5092,7 +5127,7 @@ dependencies = [ "clap 2.34.0", "crossbeam-channel", "enum_dispatch", - "futures 0.3.21", + "futures 0.3.23", "futures-util", "indexmap", "indicatif", @@ -5103,6 +5138,7 @@ dependencies = [ "log", "quinn", "quinn-proto", + "quinn-udp", "rand 0.7.3", "rand_chacha 0.2.2", "rayon", @@ -5192,13 +5228,13 @@ dependencies = [ "bs58 0.4.0", "bytes", "chrono", - "clap 3.2.16", + "clap 3.2.17", "crossbeam-channel", - "dashmap", + "dashmap 4.0.2", "eager", "etcd-client", "fs_extra", - "futures 0.3.21", + "futures 0.3.23", "futures-util", "histogram", "indexmap", @@ -5268,7 +5304,7 @@ name = "solana-dos" version = "1.12.0" dependencies = [ "bincode", - "clap 3.2.16", + "clap 3.2.17", "crossbeam-channel", "itertools", "log", @@ -5361,9 +5397,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.11.5" +version = "1.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28e4e35bc58c465f161bde764ebce41fdfcb503583cf3a77e0211274cc12b22d" +checksum = "853bb08e658cfef8a2cab32459539b238fbaac9d5f34a916867f51467092e12e" dependencies = [ "ahash", "blake3", @@ -5388,7 +5424,7 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.2", - "solana-frozen-abi-macro 1.11.5", + "solana-frozen-abi-macro 1.11.7", "subtle", "thiserror", ] @@ -5428,9 +5464,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.11.5" +version = "1.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708f837d748e574b1e53b250ab1f4a69ba330bbc10d041d02381165f0f36291a" +checksum = "bbdc3ca4b60d9b9ec0474f745a8a0e8317440f148dc4910c8b045d86ad83f0ed" dependencies = [ "proc-macro2 1.0.43", "quote 1.0.21", @@ -5593,7 +5629,7 @@ name = "solana-keygen" version = "1.12.0" dependencies = [ "bs58 0.4.0", - "clap 3.2.16", + "clap 3.2.17", "dirs-next", "num_cpus", "solana-clap-v3-utils", @@ -5616,9 +5652,9 @@ dependencies = [ "chrono", "chrono-humanize", "crossbeam-channel", - "dashmap", + "dashmap 4.0.2", "fs_extra", - "futures 0.3.21", + "futures 0.3.23", "itertools", "lazy_static", "libc", @@ -5659,6 +5695,7 @@ dependencies = [ "spl-token-2022", "static_assertions", "tempfile", + "test-case", "thiserror", "tokio", "tokio-stream", @@ -5676,7 +5713,7 @@ dependencies = [ "clap 2.34.0", "crossbeam-channel", "csv", - "dashmap", + "dashmap 4.0.2", "histogram", "itertools", "log", @@ -5737,7 +5774,7 @@ name = "solana-log-analyzer" version = "1.12.0" dependencies = [ "byte-unit", - "clap 3.2.16", + "clap 3.2.17", "serde", "serde_json", "solana-logger 1.12.0", @@ -5746,9 +5783,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.11.5" +version = "1.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ea6fc68d63d33d862d919d4c8ad7f613ec243ccf6762d595c660020b289b57" +checksum = "4edef304d4b316e45690db8251f55683a6bca3d13b1ba19a73e9c15e4fdc2d9b" dependencies = [ "env_logger", "lazy_static", @@ -5814,7 +5851,7 @@ dependencies = [ name = "solana-net-shaper" version = "1.12.0" dependencies = [ - "clap 3.2.16", + "clap 3.2.17", "rand 0.7.3", "serde", "serde_json", @@ -5826,7 +5863,7 @@ name = "solana-net-utils" version = "1.12.0" dependencies = [ "bincode", - "clap 3.2.16", + "clap 3.2.17", "crossbeam-channel", "log", "nix", @@ -5903,7 +5940,7 @@ dependencies = [ name = "solana-poh-bench" version = "1.12.0" dependencies = [ - "clap 3.2.16", + "clap 3.2.17", "log", "rand 0.7.3", "rayon", @@ -5917,9 +5954,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.11.5" +version = "1.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdd314d85b171bb20ccdcaf07346a9d52a012b10d84f4706f0628813d002fef8" +checksum = "cbc3839dd967928f16f138d42f718212e99d15fc15b26ebef104277d9809775e" dependencies = [ "base64 0.13.0", "bincode", @@ -5955,9 +5992,9 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.11.5", - "solana-frozen-abi-macro 1.11.5", - "solana-sdk-macro 1.11.5", + "solana-frozen-abi 1.11.7", + "solana-frozen-abi-macro 1.11.7", + "solana-sdk-macro 1.11.7", "thiserror", "tiny-bip39", "wasm-bindgen", @@ -6097,7 +6134,7 @@ dependencies = [ "bincode", "bs58 0.4.0", "crossbeam-channel", - "dashmap", + "dashmap 4.0.2", "itertools", "jsonrpc-core", "jsonrpc-core-client", @@ -6180,7 +6217,7 @@ dependencies = [ "byteorder", "bzip2", "crossbeam-channel", - "dashmap", + "dashmap 4.0.2", "dir-diff", "ed25519-dalek", "flate2", @@ -6232,9 +6269,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.11.5" +version = "1.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad7d954df63b267857e26670e3aacfd8e2943ca703653b0418e5afc85046c2f3" +checksum = "d9017cf106b3a645f098ace248c849002f594773a4a9a0e4570ed4c9e063a140" dependencies = [ "assert_matches", "base64 0.13.0", @@ -6271,11 +6308,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.11.5", - "solana-frozen-abi-macro 1.11.5", - "solana-logger 1.11.5", - "solana-program 1.11.5", - "solana-sdk-macro 1.11.5", + "solana-frozen-abi 1.11.7", + "solana-frozen-abi-macro 1.11.7", + "solana-logger 1.11.7", + "solana-program 1.11.7", + "solana-sdk-macro 1.11.7", "thiserror", "uriparse", "wasm-bindgen", @@ -6339,9 +6376,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.11.5" +version = "1.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d9e81bc46edcc517b2df504856d57a5101c7586ec63f3143ae11fbe2eba613" +checksum = "a39cae167a571797569bbc9c679412591f7a0a091a1ac1b636b5f16a4b3d1ad0" dependencies = [ "bs58 0.4.0", "proc-macro2 1.0.43", @@ -6426,7 +6463,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.21", + "futures 0.3.23", "goauth", "http", "hyper", @@ -6492,6 +6529,8 @@ dependencies = [ "percentage", "pkcs8", "quinn", + "quinn-proto", + "quinn-udp", "rand 0.7.3", "rcgen", "rustls 0.20.6", @@ -6549,9 +6588,9 @@ version = "1.12.0" dependencies = [ "anchor-lang", "bigdecimal", - "clap 3.2.16", + "clap 3.2.17", "env_logger", - "futures 0.3.21", + "futures 0.3.23", "im", "itertools", "log", @@ -6682,8 +6721,10 @@ dependencies = [ "log", "num_cpus", "rand 0.7.3", + "rayon", "serde", "serde_json", + "serde_yaml", "signal-hook", "solana-clap-utils", "solana-cli-config", @@ -6742,6 +6783,7 @@ dependencies = [ "solana-frozen-abi-macro 1.12.0", "solana-logger 1.12.0", "solana-metrics", + "solana-program 1.12.0", "solana-program-runtime", "solana-sdk 1.12.0", "thiserror", @@ -6780,9 +6822,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.11.5" +version = "1.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62415c05a9ebfffaf8befaa61b24492ebf88269cf84cbeba714bac4125ec4ea3" +checksum = "258fb750d39c9dee375e43559189124aec32b028b54b1f3494f9a8ff5ef82cee" dependencies = [ "aes-gcm-siv", "arrayref", @@ -6801,8 +6843,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.11.5", - "solana-sdk 1.11.5", + "solana-program 1.11.7", + "solana-sdk 1.11.7", "subtle", "thiserror", "zeroize", @@ -6838,9 +6880,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80a28c5dfe7e8af38daa39d6561c8e8b9ed7a2f900951ebe7362ad6348d36c73" +checksum = "fe055100805e9069715acf73529ec563ad987a4d042da9defe9b7554560f2df4" dependencies = [ "byteorder", "combine", @@ -6886,7 +6928,7 @@ dependencies = [ "borsh", "num-derive", "num-traits", - "solana-program 1.11.5", + "solana-program 1.11.7", "spl-token", "spl-token-2022", "thiserror", @@ -6898,7 +6940,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.11.5", + "solana-program 1.11.7", ] [[package]] @@ -6912,23 +6954,23 @@ dependencies = [ "num-derive", "num-traits", "num_enum", - "solana-program 1.11.5", + "solana-program 1.11.7", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" +checksum = "e4c0ebca4740cc4c892aa31e07d0b4dc1a24cac4748376d4b34f8eb0fee9ff46" dependencies = [ "arrayref", "bytemuck", "num-derive", "num-traits", "num_enum", - "solana-program 1.11.5", - "solana-zk-token-sdk 1.11.5", + "solana-program 1.11.7", + "solana-zk-token-sdk 1.11.7", "spl-memo", "spl-token", "thiserror", @@ -7062,9 +7104,9 @@ dependencies = [ [[package]] name = "systemstat" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5dc96f7634f46ac7e485b8c051f5b89ec8ee5cc023236dd12fe4ae2fb52f80" +checksum = "91a3cae256f8af5246c2daad51ff29c32de4b4b0b0222063920af445fa3e12ab" dependencies = [ "bytesize", "chrono", @@ -7093,7 +7135,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.21", + "futures 0.3.23", "humantime", "opentelemetry", "pin-project", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index 4cc8005711..96b97a5652 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -16,15 +16,15 @@ bincode = "1.3.3" bs58 = "0.4.0" bv = "0.11.1" lazy_static = "1.4.0" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" -serde_json = "1.0.81" +serde_json = "1.0.83" solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.12.0" } solana-config-program = { path = "../programs/config", version = "=1.12.0" } solana-sdk = { path = "../sdk", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" zstd = "0.11.2" diff --git a/account-decoder/src/parse_address_lookup_table.rs b/account-decoder/src/parse_address_lookup_table.rs index 26955d74a7..ca461f2636 100644 --- a/account-decoder/src/parse_address_lookup_table.rs +++ b/account-decoder/src/parse_address_lookup_table.rs @@ -19,7 +19,7 @@ pub fn parse_address_lookup_table( }) } -#[derive(Debug, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase", tag = "type", content = "info")] pub enum LookupTableAccountType { Uninitialized, diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 987915d8c9..3d1c18633f 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -110,7 +110,7 @@ fn main() { for x in 0..iterations { if clean { let mut time = Measure::start("clean"); - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); time.stop(); println!("{}", time); for slot in 0..num_slots { diff --git a/banks-interface/Cargo.toml b/banks-interface/Cargo.toml index 0a25eb4d39..644b873c18 100644 --- a/banks-interface/Cargo.toml +++ b/banks-interface/Cargo.toml @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-banks-interface" edition = "2021" [dependencies] -serde = { version = "1.0.138", features = ["derive"] } +serde = { version = "1.0.143", features = ["derive"] } solana-sdk = { path = "../sdk", version = "=1.12.0" } tarpc = { version = "0.29.0", features = ["full"] } diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 75f9c2d8b4..56fd889086 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -106,7 +106,7 @@ impl BanksServer { } let server_bank_forks = bank_forks.clone(); Builder::new() - .name("solana-bank-forks-client".to_string()) + .name("solBankForksCli".to_string()) .spawn(move || Self::run(server_bank_forks, transaction_receiver)) .unwrap(); Self::new( @@ -154,13 +154,9 @@ fn verify_transaction( transaction: &Transaction, feature_set: &Arc, ) -> transaction::Result<()> { - if let Err(err) = transaction.verify() { - Err(err) - } else if let Err(err) = transaction.verify_precompiles(feature_set) { - Err(err) - } else { - Ok(()) - } + transaction.verify()?; + transaction.verify_precompiles(feature_set)?; + Ok(()) } fn simulate_transaction( diff --git a/banks-server/src/rpc_banks_service.rs b/banks-server/src/rpc_banks_service.rs index 8e0bfbeaaf..dbefcfc323 100644 --- a/banks-server/src/rpc_banks_service.rs +++ b/banks-server/src/rpc_banks_service.rs @@ -89,7 +89,7 @@ impl RpcBanksService { let connection_cache = connection_cache.clone(); let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-rpc-banks".to_string()) + .name("solRpcBanksSvc".to_string()) .spawn(move || { Self::run( listen_addr, diff --git a/bench-batch-simulate-bundle/src/simulator.rs b/bench-batch-simulate-bundle/src/simulator.rs index 9805e2fe01..e8d844ba58 100644 --- a/bench-batch-simulate-bundle/src/simulator.rs +++ b/bench-batch-simulate-bundle/src/simulator.rs @@ -113,7 +113,7 @@ impl Simulator { }) .collect::>(); - return match rpc_client + match rpc_client .batch_simulate_bundle_with_config(bundles.into_iter().zip(configs).collect()) { Ok(response) => { @@ -144,6 +144,6 @@ impl Simulator { error!("error from rpc {}", e); None } - }; + } } } diff --git a/bench-get-confirmed-blocks-with-data/src/main.rs b/bench-get-confirmed-blocks-with-data/src/main.rs index 92b0bf1c4f..5361ab667c 100644 --- a/bench-get-confirmed-blocks-with-data/src/main.rs +++ b/bench-get-confirmed-blocks-with-data/src/main.rs @@ -110,10 +110,8 @@ fn main() { }) }) .collect(); - let mut results = Vec::new(); for t in tasks { - let r = t.await.expect("results fetched"); - results.push(r); + t.await.expect("results fetched"); } }); diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index bbbf4df729..f0c71fcb6e 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -12,8 +12,9 @@ publish = false clap = "2.33.1" crossbeam-channel = "0.5" log = "0.4.17" +rand = "0.7.0" rayon = "1.5.3" -serde_json = "1.0.81" +serde_json = "1.0.83" serde_yaml = "0.8.26" solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } @@ -34,7 +35,7 @@ solana-version = { path = "../version", version = "=1.12.0" } thiserror = "1.0" [dev-dependencies] -serial_test = "0.8.0" +serial_test = "0.9.0" solana-local-cluster = { path = "../local-cluster", version = "=1.12.0" } solana-test-validator = { path = "../test-validator", version = "=1.12.0" } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 53a82c143d..33ff5bc51f 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -6,18 +6,20 @@ use { send_batch::*, }, log::*, + rand::distributions::{Distribution, Uniform}, rayon::prelude::*, solana_client::nonce_utils, solana_metrics::{self, datapoint_info}, solana_sdk::{ clock::{DEFAULT_MS_PER_SLOT, DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE}, + compute_budget::ComputeBudgetInstruction, hash::Hash, instruction::{AccountMeta, Instruction}, message::Message, native_token::Sol, pubkey::Pubkey, signature::{Keypair, Signer}, - system_transaction, + system_instruction, system_transaction, timing::{duration_as_ms, duration_as_s, duration_as_us, timestamp}, transaction::Transaction, }, @@ -36,6 +38,28 @@ use { // The point at which transactions become "too old", in seconds. const MAX_TX_QUEUE_AGE: u64 = (MAX_PROCESSING_AGE as f64 * DEFAULT_S_PER_SLOT) as u64; +// Add prioritization fee to transfer transactions, when `--use-randomized-compute-unit-price` +// is used, compute-unit-price is randomly generated in range of (0..MAX_COMPUTE_UNIT_PRICE). +// It also sets transaction's compute-unit to TRANSFER_TRANSACTION_COMPUTE_UNIT. Therefore the +// max additional cost is `TRANSFER_TRANSACTION_COMPUTE_UNIT * MAX_COMPUTE_UNIT_PRICE / 1_000_000` +const MAX_COMPUTE_UNIT_PRICE: u64 = 50; +const TRANSFER_TRANSACTION_COMPUTE_UNIT: u32 = 200; +/// calculate maximum possible prioritizatino fee, if `use-randomized-compute-unit-price` is +/// enabled, round to nearest lamports. +pub fn max_lamporots_for_prioritization(use_randomized_compute_unit_price: bool) -> u64 { + if use_randomized_compute_unit_price { + const MICRO_LAMPORTS_PER_LAMPORT: u64 = 1_000_000; + let micro_lamport_fee: u128 = (MAX_COMPUTE_UNIT_PRICE as u128) + .saturating_mul(TRANSFER_TRANSACTION_COMPUTE_UNIT as u128); + let fee = micro_lamport_fee + .saturating_add(MICRO_LAMPORTS_PER_LAMPORT.saturating_sub(1) as u128) + .saturating_div(MICRO_LAMPORTS_PER_LAMPORT as u128); + u64::try_from(fee).unwrap_or(u64::MAX) + } else { + 0u64 + } +} + pub type TimestampedTransaction = (Transaction, Option); pub type SharedTransactions = Arc>>>; @@ -68,6 +92,7 @@ struct TransactionChunkGenerator<'a, 'b, T: ?Sized> { nonce_chunks: Option>, chunk_index: usize, reclaim_lamports_back_to_source_account: bool, + use_randomized_compute_unit_price: bool, } impl<'a, 'b, T> TransactionChunkGenerator<'a, 'b, T> @@ -79,6 +104,7 @@ where gen_keypairs: &'a [Keypair], nonce_keypairs: Option<&'b Vec>, chunk_size: usize, + use_randomized_compute_unit_price: bool, ) -> Self { let account_chunks = KeypairChunks::new(gen_keypairs, chunk_size); let nonce_chunks = @@ -90,6 +116,7 @@ where nonce_chunks, chunk_index: 0, reclaim_lamports_back_to_source_account: false, + use_randomized_compute_unit_price, } } @@ -123,6 +150,7 @@ where dest_chunk, self.reclaim_lamports_back_to_source_account, blockhash.unwrap(), + self.use_randomized_compute_unit_price, ) }; @@ -217,13 +245,20 @@ fn generate_chunked_transfers(client: Arc, config: Config, gen_keypairs: Vec) -> u64 +pub fn do_bench_tps( + client: Arc, + config: Config, + gen_keypairs: Vec, + nonce_keypairs: Option>, +) -> u64 where T: 'static + BenchTpsClient + Send + Sync + ?Sized, { @@ -302,6 +342,8 @@ where tx_count, sustained, target_slots_per_epoch, + use_randomized_compute_unit_price, + use_durable_nonce, .. } = config; @@ -309,8 +351,9 @@ where let chunk_generator = TransactionChunkGenerator::new( client.clone(), &gen_keypairs, - None, // TODO(klykov): to be added in the follow up PR + nonce_keypairs.as_ref(), tx_count, + use_randomized_compute_unit_price, ); let first_tx_count = loop { @@ -338,17 +381,22 @@ where let shared_tx_active_thread_count = Arc::new(AtomicIsize::new(0)); let total_tx_sent_count = Arc::new(AtomicUsize::new(0)); - let blockhash_thread = { + // if we use durable nonce, we don't need blockhash thread + let blockhash_thread = if !use_durable_nonce { let exit_signal = exit_signal.clone(); let blockhash = blockhash.clone(); let client = client.clone(); let id = id.pubkey(); - Builder::new() - .name("solana-blockhash-poller".to_string()) - .spawn(move || { - poll_blockhash(&exit_signal, &blockhash, &client, &id); - }) - .unwrap() + Some( + Builder::new() + .name("solana-blockhash-poller".to_string()) + .spawn(move || { + poll_blockhash(&exit_signal, &blockhash, &client, &id); + }) + .unwrap(), + ) + } else { + None }; let s_threads = create_sender_threads( @@ -373,6 +421,7 @@ where threads, duration, sustained, + use_durable_nonce, ); // Stop the sampling threads so it will collect the stats @@ -391,9 +440,15 @@ where } } - info!("Waiting for blockhash thread..."); - if let Err(err) = blockhash_thread.join() { - info!(" join() failed with: {:?}", err); + if let Some(blockhash_thread) = blockhash_thread { + info!("Waiting for blockhash thread..."); + if let Err(err) = blockhash_thread.join() { + info!(" join() failed with: {:?}", err); + } + } + + if let Some(nonce_keypairs) = nonce_keypairs { + withdraw_durable_nonce_accounts(client.clone(), &gen_keypairs, &nonce_keypairs); } let balance = client.get_balance(&id.pubkey()).unwrap_or(0); @@ -423,6 +478,7 @@ fn generate_system_txs( dest: &VecDeque<&Keypair>, reclaim: bool, blockhash: &Hash, + use_randomized_compute_unit_price: bool, ) -> Vec { let pairs: Vec<_> = if !reclaim { source.iter().zip(dest.iter()).collect() @@ -430,15 +486,58 @@ fn generate_system_txs( dest.iter().zip(source.iter()).collect() }; - pairs - .par_iter() - .map(|(from, to)| { - ( - system_transaction::transfer(from, &to.pubkey(), 1, *blockhash), - Some(timestamp()), - ) - }) - .collect() + if use_randomized_compute_unit_price { + let mut rng = rand::thread_rng(); + let range = Uniform::from(0..MAX_COMPUTE_UNIT_PRICE); + let compute_unit_prices: Vec<_> = (0..pairs.len()) + .map(|_| range.sample(&mut rng) as u64) + .collect(); + let pairs_with_compute_unit_prices: Vec<_> = + pairs.iter().zip(compute_unit_prices.iter()).collect(); + + pairs_with_compute_unit_prices + .par_iter() + .map(|((from, to), compute_unit_price)| { + ( + transfer_with_compute_unit_price( + from, + &to.pubkey(), + 1, + *blockhash, + **compute_unit_price, + ), + Some(timestamp()), + ) + }) + .collect() + } else { + pairs + .par_iter() + .map(|(from, to)| { + ( + system_transaction::transfer(from, &to.pubkey(), 1, *blockhash), + Some(timestamp()), + ) + }) + .collect() + } +} + +fn transfer_with_compute_unit_price( + from_keypair: &Keypair, + to: &Pubkey, + lamports: u64, + recent_blockhash: Hash, + compute_unit_price: u64, +) -> Transaction { + let from_pubkey = from_keypair.pubkey(); + let instructions = vec![ + system_instruction::transfer(&from_pubkey, to, lamports), + ComputeBudgetInstruction::set_compute_unit_limit(TRANSFER_TRANSACTION_COMPUTE_UNIT), + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), + ]; + let message = Message::new(&instructions, Some(&from_pubkey)); + Transaction::new(&[from_keypair], message, recent_blockhash) } fn get_nonce_blockhash( @@ -500,10 +599,14 @@ fn generate_txs( blockhash: &Arc>, chunk_generator: &mut TransactionChunkGenerator<'_, '_, T>, threads: usize, + use_durable_nonce: bool, ) { - let blockhash = blockhash.read().map(|x| *x).ok(); - - let transactions = chunk_generator.generate(blockhash.as_ref()); + let transactions = if use_durable_nonce { + chunk_generator.generate(None) + } else { + let blockhash = blockhash.read().map(|x| *x).ok(); + chunk_generator.generate(blockhash.as_ref()) + }; let sz = transactions.len() / threads; let chunks: Vec<_> = transactions.chunks(sz).collect(); @@ -859,7 +962,7 @@ mod tests { let keypairs = generate_and_fund_keypairs(client.clone(), &config.id, keypair_count, 20).unwrap(); - do_bench_tps(client, config, keypairs); + do_bench_tps(client, config, keypairs, None); } #[test] diff --git a/bench-tps/src/bench_tps_client.rs b/bench-tps/src/bench_tps_client.rs index 3d34a3a041..0ecca308ef 100644 --- a/bench-tps/src/bench_tps_client.rs +++ b/bench-tps/src/bench_tps_client.rs @@ -83,6 +83,13 @@ pub trait BenchTpsClient { /// Returns all information associated with the account of the provided pubkey fn get_account(&self, pubkey: &Pubkey) -> Result; + + /// Returns all information associated with the account of the provided pubkey, using explicit commitment + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result; } mod bank_client; diff --git a/bench-tps/src/bench_tps_client/bank_client.rs b/bench-tps/src/bench_tps_client/bank_client.rs index 9fae1f7a93..20323656a3 100644 --- a/bench-tps/src/bench_tps_client/bank_client.rs +++ b/bench-tps/src/bench_tps_client/bank_client.rs @@ -93,4 +93,18 @@ impl BenchTpsClient for BankClient { }) }) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + SyncClient::get_account_with_commitment(self, pubkey, commitment_config) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/rpc_client.rs b/bench-tps/src/bench_tps_client/rpc_client.rs index dd34a11f58..158fddd0a4 100644 --- a/bench-tps/src/bench_tps_client/rpc_client.rs +++ b/bench-tps/src/bench_tps_client/rpc_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::rpc_client::RpcClient, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, @@ -84,4 +84,19 @@ impl BenchTpsClient for RpcClient { fn get_account(&self, pubkey: &Pubkey) -> Result { RpcClient::get_account(self, pubkey).map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + RpcClient::get_account_with_commitment(self, pubkey, commitment_config) + .map(|res| res.value) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/thin_client.rs b/bench-tps/src/bench_tps_client/thin_client.rs index 13d7707845..16686b8186 100644 --- a/bench-tps/src/bench_tps_client/thin_client.rs +++ b/bench-tps/src/bench_tps_client/thin_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::thin_client::ThinClient, solana_sdk::{ account::Account, @@ -90,4 +90,18 @@ impl BenchTpsClient for ThinClient { .get_account(pubkey) .map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + SyncClient::get_account_with_commitment(self, pubkey, commitment_config) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/tpu_client.rs b/bench-tps/src/bench_tps_client/tpu_client.rs index 53b0102a00..aa86e793a2 100644 --- a/bench-tps/src/bench_tps_client/tpu_client.rs +++ b/bench-tps/src/bench_tps_client/tpu_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::tpu_client::TpuClient, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, @@ -102,4 +102,20 @@ impl BenchTpsClient for TpuClient { .get_account(pubkey) .map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + self.rpc_client() + .get_account_with_commitment(pubkey, commitment_config) + .map(|res| res.value) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index 8c5c22ac09..9c583642d7 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -54,6 +54,8 @@ pub struct Config { pub external_client_type: ExternalClientType, pub use_quic: bool, pub tpu_connection_pool_size: usize, + pub use_randomized_compute_unit_price: bool, + pub use_durable_nonce: bool, } impl Default for Config { @@ -81,6 +83,8 @@ impl Default for Config { external_client_type: ExternalClientType::default(), use_quic: DEFAULT_TPU_USE_QUIC, tpu_connection_pool_size: DEFAULT_TPU_CONNECTION_POOL_SIZE, + use_randomized_compute_unit_price: false, + use_durable_nonce: false, } } } @@ -303,6 +307,12 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> { .help("Controls the connection pool size per remote address; only affects ThinClient (default) \ or TpuClient sends"), ) + .arg( + Arg::with_name("use_randomized_compute_unit_price") + .long("use-randomized-compute-unit-price") + .takes_value(false) + .help("Sets random compute-unit-price in range [0..100] to transfer transactions"), + ) } /// Parses a clap `ArgMatches` structure into a `Config` @@ -433,5 +443,9 @@ pub fn extract_args(matches: &ArgMatches) -> Config { .expect("can't parse target slots per epoch"); } + if matches.is_present("use_randomized_compute_unit_price") { + args.use_randomized_compute_unit_price = true; + } + args } diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 8d8fde7fda..ab0fc099f3 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -4,11 +4,11 @@ use { clap::value_t, log::*, solana_bench_tps::{ - bench::do_bench_tps, + bench::{do_bench_tps, max_lamporots_for_prioritization}, bench_tps_client::BenchTpsClient, cli::{self, ExternalClientType}, keypairs::get_keypairs, - send_batch::generate_keypairs, + send_batch::{generate_durable_nonce_accounts, generate_keypairs}, }, solana_client::{ connection_cache::ConnectionCache, @@ -153,6 +153,8 @@ fn main() { external_client_type, use_quic, tpu_connection_pool_size, + use_randomized_compute_unit_price, + use_durable_nonce, .. } = &cli_config; @@ -161,8 +163,11 @@ fn main() { info!("Generating {} keypairs", keypair_count); let (keypairs, _) = generate_keypairs(id, keypair_count as u64); let num_accounts = keypairs.len() as u64; - let max_fee = - FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature; + let max_fee = FeeRateGovernor::new(*target_lamports_per_signature, 0) + .max_lamports_per_signature + .saturating_add(max_lamporots_for_prioritization( + *use_randomized_compute_unit_price, + )); let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee) / num_accounts + num_lamports_per_account; @@ -226,5 +231,11 @@ fn main() { client_ids_and_stake_file, *read_from_client_file, ); - do_bench_tps(client, cli_config, keypairs); + + let nonce_keypairs = if *use_durable_nonce { + Some(generate_durable_nonce_accounts(client.clone(), &keypairs)) + } else { + None + }; + do_bench_tps(client, cli_config, keypairs, nonce_keypairs); } diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 6d1c32b47a..ec12c8b7aa 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -1,9 +1,11 @@ #![allow(clippy::integer_arithmetic)] + use { serial_test::serial, solana_bench_tps::{ bench::{do_bench_tps, generate_and_fund_keypairs}, cli::Config, + send_batch::generate_durable_nonce_accounts, }, solana_client::{ connection_cache::ConnectionCache, @@ -76,7 +78,7 @@ fn test_bench_tps_local_cluster(config: Config) { ) .unwrap(); - let _total = do_bench_tps(client, config, keypairs); + let _total = do_bench_tps(client, config, keypairs, None); #[cfg(not(debug_assertions))] assert!(_total > 100); @@ -110,7 +112,7 @@ fn test_bench_tps_test_validator(config: Config) { .unwrap(), ); - let lamports_per_account = 100; + let lamports_per_account = 1000; let keypair_count = config.tx_count * config.keypair_multiplier; let keypairs = generate_and_fund_keypairs( @@ -120,8 +122,13 @@ fn test_bench_tps_test_validator(config: Config) { lamports_per_account, ) .unwrap(); + let nonce_keypairs = if config.use_durable_nonce { + Some(generate_durable_nonce_accounts(client.clone(), &keypairs)) + } else { + None + }; - let _total = do_bench_tps(client, config, keypairs); + let _total = do_bench_tps(client, config, keypairs, nonce_keypairs); #[cfg(not(debug_assertions))] assert!(_total > 100); @@ -129,6 +136,7 @@ fn test_bench_tps_test_validator(config: Config) { #[test] #[serial] +#[ignore] fn test_bench_tps_local_cluster_solana() { test_bench_tps_local_cluster(Config { tx_count: 100, @@ -146,3 +154,14 @@ fn test_bench_tps_tpu_client() { ..Config::default() }); } + +#[test] +#[serial] +fn test_bench_tps_tpu_client_nonce() { + test_bench_tps_test_validator(Config { + tx_count: 100, + duration: Duration::from_secs(10), + use_durable_nonce: true, + ..Config::default() + }); +} diff --git a/bloom/Cargo.toml b/bloom/Cargo.toml index b098e789b3..20496c5c36 100644 --- a/bloom/Cargo.toml +++ b/bloom/Cargo.toml @@ -15,7 +15,7 @@ fnv = "1.0.7" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -serde = { version = "1.0.138", features = ["rc"] } +serde = { version = "1.0.143", features = ["rc"] } serde_derive = "1.0.103" solana-frozen-abi = { path = "../frozen-abi", version = "=1.12.0" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.12.0" } diff --git a/ci/docker-run.sh b/ci/docker-run.sh index e154de2eef..a7b94f902e 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -45,14 +45,16 @@ if [[ -n $CI ]]; then # Share the real ~/.cargo between docker containers in CI for speed ARGS+=(--volume "$HOME:/home") - # sccache - ARGS+=( - --env "RUSTC_WRAPPER=/home/.cargo/bin/sccache" - --env AWS_ACCESS_KEY_ID - --env AWS_SECRET_ACCESS_KEY - --env SCCACHE_BUCKET - --env SCCACHE_REGION - ) + if [[ -n $BUILDKITE ]]; then + # sccache + ARGS+=( + --env "RUSTC_WRAPPER=/home/.cargo/bin/sccache" + --env AWS_ACCESS_KEY_ID + --env AWS_SECRET_ACCESS_KEY + --env SCCACHE_BUCKET + --env SCCACHE_REGION + ) + fi else # Avoid sharing ~/.cargo when building locally to avoid a mixed macOS/Linux # ~/.cargo diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index fff0f366d3..12aeff7e5e 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.60.0 +FROM solanalabs/rust:1.63.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index cd638e5c28..cae9384053 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.60.0 +FROM rust:1.63.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index ef078f6636..5703b37166 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -58,6 +58,8 @@ windows) git config core.symlinks true find . -type l -delete git reset --hard + # patched crossbeam doesn't build on windows + sed -i 's/^crossbeam-epoch/#crossbeam-epoch/' Cargo.toml ) ;; *) diff --git a/ci/rust-version.sh b/ci/rust-version.sh index dc3570fa93..792863c328 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.60.0 + stable_version=1.63.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2022-04-01 + nightly_version=2022-08-12 fi diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 72c174395b..65e5e6271a 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -65,11 +65,25 @@ fi _ ci/order-crates-for-publishing.py +nightly_clippy_allows=( + # This lint occurs all over the code base + "--allow=clippy::significant_drop_in_scrutinee" + + # The prost crate, used by solana-storage-proto, generates Rust source that + # triggers this lint. Need to resolve upstream in prost + "--allow=clippy::derive_partial_eq_without_eq" + + # This link seems to incorrectly trigger in + # `programs/bpf_loader/src/syscalls/{lib,cpi}.rs` + "--allow=clippy::explicit_auto_deref" +) + # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there _ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- \ --deny=warnings \ --deny=clippy::integer_arithmetic \ + "${nightly_clippy_allows[@]}" _ scripts/cargo-for-all-lock-files.sh -- nightly sort --workspace --check _ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check diff --git a/ci/test-stable.sh b/ci/test-stable.sh index 2c5f136265..5ba5eb9d0a 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -12,6 +12,12 @@ annotate() { } } +exit_if_error() { + if [[ "$1" -ne 0 ]]; then + exit "$1" + fi +} + # Run the appropriate test based on entrypoint testName=$(basename "$0" .sh) @@ -35,7 +41,8 @@ JOBS=$((JOBS>NPROC ? NPROC : JOBS)) echo "Executing $testName" case $testName in test-stable) - _ "$cargo" stable test --jobs "$JOBS" --all --tests --exclude solana-local-cluster ${V:+--verbose} -- --nocapture + _ "$cargo" stable test --jobs "$JOBS" --all --tests --exclude solana-local-cluster ${V:+--verbose} -- -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" ;; test-stable-bpf) # Clear the C dependency files, if dependency moves these files are not regenerated @@ -59,7 +66,8 @@ test-stable-bpf) _ make -C programs/bpf/c tests _ "$cargo" stable test \ --manifest-path programs/bpf/Cargo.toml \ - --no-default-features --features=bpf_c,bpf_rust -- --nocapture + --no-default-features --features=bpf_c,bpf_rust -- -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" # BPF Rust program unit tests for bpf_test in programs/bpf/rust/*; do @@ -81,7 +89,7 @@ test-stable-bpf) # latest mainbeta release version. solana_program_count=$(grep -c 'solana-program v' cargo.log) rm -f cargo.log - if ((solana_program_count > 10)); then + if ((solana_program_count > 20)); then echo "Regression of build redundancy ${solana_program_count}." echo "Review dependency features that trigger redundant rebuilds of solana-program." exit 1 @@ -95,7 +103,8 @@ test-stable-bpf) _ "$cargo" stable test \ --manifest-path programs/bpf/Cargo.toml \ --no-default-features --features=bpf_c,bpf_rust assert_instruction_count \ - -- --nocapture &> "${bpf_target_path}"/deploy/instuction_counts.txt + -- -Z unstable-options --format json --report-time |& tee results.json + awk '!/{ "type": .* }/' results.json > "${bpf_target_path}"/deploy/instuction_counts.txt bpf_dump_archive="bpf-dumps.tar.bz2" rm -f "$bpf_dump_archive" @@ -120,27 +129,32 @@ test-stable-perf) fi _ "$cargo" stable build --bins ${V:+--verbose} - _ "$cargo" stable test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- --nocapture + _ "$cargo" stable test --package solana-perf --package solana-ledger --package solana-core --lib ${V:+--verbose} -- -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" _ "$cargo" stable run --manifest-path poh-bench/Cargo.toml ${V:+--verbose} -- --hashes-per-tick 10 ;; test-local-cluster) _ "$cargo" stable build --release --bins ${V:+--verbose} - _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --nocapture --test-threads=1 + _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster ${V:+--verbose} -- --test-threads=1 -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" exit 0 ;; test-local-cluster-flakey) _ "$cargo" stable build --release --bins ${V:+--verbose} - _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --nocapture --test-threads=1 + _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_flakey ${V:+--verbose} -- --test-threads=1 -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" exit 0 ;; test-local-cluster-slow-1) _ "$cargo" stable build --release --bins ${V:+--verbose} - _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow_1 ${V:+--verbose} -- --nocapture --test-threads=1 + _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow_1 ${V:+--verbose} -- --test-threads=1 -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" exit 0 ;; test-local-cluster-slow-2) _ "$cargo" stable build --release --bins ${V:+--verbose} - _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow_2 ${V:+--verbose} -- --nocapture --test-threads=1 + _ "$cargo" stable test --release --package solana-local-cluster --test local_cluster_slow_2 ${V:+--verbose} -- --test-threads=1 -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" exit 0 ;; test-wasm) @@ -157,12 +171,8 @@ test-wasm) exit 0 ;; test-docs) - echo "getting protoc" - echo "PROTOC: $PROTOC" - echo "PROTOC_INCLUDE: $PROTOC_INCLUDE" - command -v protoc - protoc --version - _ "$cargo" stable test --jobs "$JOBS" --all --doc --exclude solana-local-cluster ${V:+--verbose} -- --nocapture + _ "$cargo" stable test --jobs "$JOBS" --all --doc --exclude solana-local-cluster ${V:+--verbose} -- -Z unstable-options --format json --report-time | tee results.json + exit_if_error "${PIPESTATUS[0]}" exit 0 ;; *) diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index a58bdb6562..586176f66d 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -12,7 +12,7 @@ documentation = "https://docs.rs/solana-cli-config" [dependencies] dirs-next = "2.0.0" lazy_static = "1.4.0" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" serde_yaml = "0.8.26" solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 550c01f125..3a492dc447 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -12,15 +12,15 @@ documentation = "https://docs.rs/solana-cli-output" [dependencies] Inflector = "0.11.4" base64 = "0.13.0" -chrono = { version = "0.4.11", features = ["serde"] } +chrono = { version = "0.4.21", features = ["serde"] } clap = "2.33.0" console = "0.15.0" humantime = "2.0.1" indicatif = "0.17.0" pretty-hex = "0.3.0" -semver = "1.0.10" -serde = "1.0.138" -serde_json = "1.0.81" +semver = "1.0.13" +serde = "1.0.143" +serde_json = "1.0.83" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index f45c5713e4..645b7b66fb 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -2111,6 +2111,75 @@ impl fmt::Display for CliUpgradeableBuffers { } } +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct CliAddressLookupTable { + pub lookup_table_address: String, + pub authority: Option, + pub deactivation_slot: u64, + pub last_extended_slot: u64, + pub addresses: Vec, +} +impl QuietDisplay for CliAddressLookupTable {} +impl VerboseDisplay for CliAddressLookupTable {} +impl fmt::Display for CliAddressLookupTable { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln_name_value(f, "Lookup Table Address:", &self.lookup_table_address)?; + if let Some(authority) = &self.authority { + writeln_name_value(f, "Authority:", authority)?; + } else { + writeln_name_value(f, "Authority:", "None (frozen)")?; + } + if self.deactivation_slot == u64::MAX { + writeln_name_value(f, "Deactivation Slot:", "None (still active)")?; + } else { + writeln_name_value(f, "Deactivation Slot:", &self.deactivation_slot.to_string())?; + } + if self.last_extended_slot == 0 { + writeln_name_value(f, "Last Extended Slot:", "None (empty)")?; + } else { + writeln_name_value( + f, + "Last Extended Slot:", + &self.last_extended_slot.to_string(), + )?; + } + if self.addresses.is_empty() { + writeln_name_value(f, "Address Table Entries:", "None (empty)")?; + } else { + writeln!(f, "{}", style("Address Table Entries:".to_string()).bold())?; + writeln!(f)?; + writeln!( + f, + "{}", + style(format!(" {:<5} {}", "Index", "Address")).bold() + )?; + for (index, address) in self.addresses.iter().enumerate() { + writeln!(f, " {:<5} {}", index, address)?; + } + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliAddressLookupTableCreated { + pub lookup_table_address: String, + pub signature: String, +} +impl QuietDisplay for CliAddressLookupTableCreated {} +impl VerboseDisplay for CliAddressLookupTableCreated {} +impl fmt::Display for CliAddressLookupTableCreated { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln_name_value(f, "Signature:", &self.signature)?; + writeln_name_value(f, "Lookup Table Address:", &self.lookup_table_address)?; + Ok(()) + } +} + #[derive(Debug, Default)] pub struct ReturnSignersConfig { pub dump_transaction_message: bool, diff --git a/cli/Cargo.toml b/cli/Cargo.toml index ad15d4d60e..6fdfa258c4 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -23,11 +23,12 @@ log = "0.4.17" num-traits = "0.2" pretty-hex = "0.3.0" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } -semver = "1.0.10" -serde = "1.0.138" +semver = "1.0.13" +serde = "1.0.143" serde_derive = "1.0.103" -serde_json = "1.0.81" +serde_json = "1.0.83" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.12.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.12.0" } solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } @@ -42,7 +43,7 @@ solana-sdk = { path = "../sdk", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } -solana_rbpf = "=0.2.31" +solana_rbpf = "=0.2.32" spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0.31" tiny-bip39 = "0.8.2" diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs new file mode 100644 index 0000000000..9096fc1d49 --- /dev/null +++ b/cli/src/address_lookup_table.rs @@ -0,0 +1,854 @@ +use { + crate::cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, + clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, + solana_address_lookup_table_program::{ + instruction::{ + close_lookup_table, create_lookup_table, create_lookup_table_signed, + deactivate_lookup_table, extend_lookup_table, freeze_lookup_table, + }, + state::AddressLookupTable, + }, + solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}, + solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, CliSignature}, + solana_client::{rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig}, + solana_remote_wallet::remote_wallet::RemoteWalletManager, + solana_sdk::{ + account::from_account, clock::Clock, commitment_config::CommitmentConfig, message::Message, + pubkey::Pubkey, signer::Signer, sysvar, transaction::Transaction, + }, + std::sync::Arc, +}; + +#[derive(Debug, PartialEq, Eq)] +pub enum AddressLookupTableCliCommand { + CreateLookupTable { + authority_pubkey: Pubkey, + authority_signer_index: Option, + payer_signer_index: SignerIndex, + }, + FreezeLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + bypass_warning: bool, + }, + ExtendLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + payer_signer_index: SignerIndex, + new_addresses: Vec, + }, + DeactivateLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + bypass_warning: bool, + }, + CloseLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + recipient_pubkey: Pubkey, + }, + ShowLookupTable { + lookup_table_pubkey: Pubkey, + }, +} + +pub trait AddressLookupTableSubCommands { + fn address_lookup_table_subcommands(self) -> Self; +} + +impl AddressLookupTableSubCommands for App<'_, '_> { + fn address_lookup_table_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("address-lookup-table") + .about("Address lookup table management") + .setting(AppSettings::SubcommandRequiredElseHelp) + .subcommand( + SubCommand::with_name("create") + .about("Create a lookup table") + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_PUBKEY") + .takes_value(true) + .validator(is_pubkey) + .help( + "Lookup table authority address [default: the default configured keypair]. \ + WARNING: Cannot be used for creating a lookup table for a cluster running v1.11 + or earlier which requires the authority to sign for lookup table creation.", + ) + ) + .arg( + Arg::with_name("authority_signer") + .long("authority-signer") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .conflicts_with("authority") + .validator(is_valid_signer) + .help("Lookup table authority keypair [default: the default configured keypair].") + ) + .arg( + Arg::with_name("payer") + .long("payer") + .value_name("PAYER_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Account that will pay rent fees for the created lookup table [default: the default configured keypair]") + ) + ) + .subcommand( + SubCommand::with_name("freeze") + .about("Permanently freezes a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .validator(is_pubkey) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("bypass_warning") + .long("bypass-warning") + .takes_value(false) + .help("Bypass the permanent lookup table freeze warning"), + ), + ) + .subcommand( + SubCommand::with_name("extend") + .about("Append more addresses to a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .validator(is_pubkey) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("payer") + .long("payer") + .value_name("PAYER_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Account that will pay rent fees for the extended lookup table [default: the default configured keypair]") + ) + .arg( + Arg::with_name("addresses") + .long("addresses") + .value_name("ADDRESS_1,ADDRESS_2") + .takes_value(true) + .use_delimiter(true) + .required(true) + .validator(is_pubkey) + .help("Comma separated list of addresses to append") + ) + ) + .subcommand( + SubCommand::with_name("deactivate") + .about("Permanently deactivates a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("bypass_warning") + .long("bypass-warning") + .takes_value(false) + .help("Bypass the permanent lookup table deactivation warning"), + ), + ) + .subcommand( + SubCommand::with_name("close") + .about("Permanently closes a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("recipient") + .long("recipient") + .value_name("RECIPIENT_ADDRESS") + .takes_value(true) + .validator(is_pubkey) + .help("Address of the recipient account to deposit the closed account's lamports [default: the default configured keypair]") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + ) + .subcommand( + SubCommand::with_name("get") + .about("Display information about a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .help("Address of the lookup table to show") + ) + ) + ) + } +} + +pub fn parse_address_lookup_table_subcommand( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let (subcommand, sub_matches) = matches.subcommand(); + + let response = match (subcommand, sub_matches) { + ("create", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority_signer", wallet_manager) + { + bulk_signers.push(authority_signer); + authority_pubkey + } else if let Some(authority_pubkey) = pubkey_of(matches, "authority") { + authority_pubkey + } else { + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey() + }; + + let payer_pubkey = if let Ok((payer_signer, Some(payer_pubkey))) = + signer_of(matches, "payer", wallet_manager) + { + bulk_signers.push(payer_signer); + Some(payer_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::CreateLookupTable { + authority_pubkey, + authority_signer_index: signer_info.index_of(Some(authority_pubkey)), + payer_signer_index: signer_info.index_of(payer_pubkey).unwrap(), + }, + ), + signers: signer_info.signers, + } + } + ("freeze", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + bypass_warning: matches.is_present("bypass_warning"), + }, + ), + signers: signer_info.signers, + } + } + ("extend", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let payer_pubkey = if let Ok((payer_signer, Some(payer_pubkey))) = + signer_of(matches, "payer", wallet_manager) + { + bulk_signers.push(payer_signer); + Some(payer_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let new_addresses: Vec = values_of(matches, "addresses").unwrap(); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + payer_signer_index: signer_info.index_of(payer_pubkey).unwrap(), + new_addresses, + }, + ), + signers: signer_info.signers, + } + } + ("deactivate", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + bypass_warning: matches.is_present("bypass_warning"), + }, + ), + signers: signer_info.signers, + } + } + ("close", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let recipient_pubkey = if let Some(recipient_pubkey) = pubkey_of(matches, "recipient") { + recipient_pubkey + } else { + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey() + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::CloseLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + recipient_pubkey, + }, + ), + signers: signer_info.signers, + } + } + ("get", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }, + ), + signers: vec![], + } + } + _ => unreachable!(), + }; + Ok(response) +} + +pub fn process_address_lookup_table_subcommand( + rpc_client: Arc, + config: &CliConfig, + subcommand: &AddressLookupTableCliCommand, +) -> ProcessResult { + match subcommand { + AddressLookupTableCliCommand::CreateLookupTable { + authority_pubkey, + authority_signer_index, + payer_signer_index, + } => process_create_lookup_table( + &rpc_client, + config, + *authority_pubkey, + *authority_signer_index, + *payer_signer_index, + ), + AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index, + bypass_warning, + } => process_freeze_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *bypass_warning, + ), + AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index, + payer_signer_index, + new_addresses, + } => process_extend_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *payer_signer_index, + new_addresses.to_vec(), + ), + AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index, + bypass_warning, + } => process_deactivate_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *bypass_warning, + ), + AddressLookupTableCliCommand::CloseLookupTable { + lookup_table_pubkey, + authority_signer_index, + recipient_pubkey, + } => process_close_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *recipient_pubkey, + ), + AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + } => process_show_lookup_table(&rpc_client, config, *lookup_table_pubkey), + } +} + +fn process_create_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + authority_address: Pubkey, + authority_signer_index: Option, + payer_signer_index: usize, +) -> ProcessResult { + let authority_signer = authority_signer_index.map(|index| config.signers[index]); + let payer_signer = config.signers[payer_signer_index]; + + let get_clock_result = rpc_client + .get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::finalized())?; + let clock_account = get_clock_result.value.expect("Clock account doesn't exist"); + let clock: Clock = from_account(&clock_account).ok_or_else(|| { + CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string()) + })?; + + let payer_address = payer_signer.pubkey(); + let (create_lookup_table_ix, lookup_table_address) = if authority_signer.is_some() { + create_lookup_table_signed(authority_address, payer_address, clock.slot) + } else { + create_lookup_table(authority_address, payer_address, clock.slot) + }; + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[create_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + let mut keypairs: Vec<&dyn Signer> = vec![config.signers[0], payer_signer]; + if let Some(authority_signer) = authority_signer { + keypairs.push(authority_signer); + } + + tx.try_sign(&keypairs, blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Create failed: {}", err).into()), + Ok(signature) => Ok(config + .output_format + .formatted_string(&CliAddressLookupTableCreated { + lookup_table_address: lookup_table_address.to_string(), + signature: signature.to_string(), + })), + } +} + +pub const FREEZE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ +Once a lookup table is frozen, it can never be modified or unfrozen again. \ +To proceed with freezing, rerun the `freeze` command with the `--bypass-warning` flag"; + +fn process_freeze_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + bypass_warning: bool, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + if !bypass_warning { + return Err(String::from(FREEZE_LOOKUP_TABLE_WARNING).into()); + } + + let authority_address = authority_signer.pubkey(); + let freeze_lookup_table_ix = freeze_lookup_table(lookup_table_pubkey, authority_address); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[freeze_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Freeze failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_extend_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + payer_signer_index: usize, + new_addresses: Vec, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + let payer_signer = config.signers[payer_signer_index]; + + if new_addresses.is_empty() { + return Err("Lookup tables must be extended by at least one address".into()); + } + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let authority_address = authority_signer.pubkey(); + let payer_address = payer_signer.pubkey(); + let extend_lookup_table_ix = extend_lookup_table( + lookup_table_pubkey, + authority_address, + Some(payer_address), + new_addresses, + ); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[extend_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Extend failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +pub const DEACTIVATE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ +Once a lookup table is deactivated, it is no longer usable by transactions. +Deactivated lookup tables may only be closed and cannot be recreated at the same address. \ +To proceed with deactivation, rerun the `deactivate` command with the `--bypass-warning` flag"; + +fn process_deactivate_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + bypass_warning: bool, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + if !bypass_warning { + return Err(String::from(DEACTIVATE_LOOKUP_TABLE_WARNING).into()); + } + + let authority_address = authority_signer.pubkey(); + let deactivate_lookup_table_ix = + deactivate_lookup_table(lookup_table_pubkey, authority_address); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[deactivate_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Deactivate failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_close_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + recipient_pubkey: Pubkey, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; + if lookup_table_account.meta.deactivation_slot == u64::MAX { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not deactivated. Only deactivated lookup tables may be closed", + ) + .into()); + } + + let authority_address = authority_signer.pubkey(); + let close_lookup_table_ix = + close_lookup_table(lookup_table_pubkey, authority_address, recipient_pubkey); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[close_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Close failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_show_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, +) -> ProcessResult { + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; + Ok(config + .output_format + .formatted_string(&CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: lookup_table_account + .meta + .authority + .as_ref() + .map(ToString::to_string), + deactivation_slot: lookup_table_account.meta.deactivation_slot, + last_extended_slot: lookup_table_account.meta.last_extended_slot, + addresses: lookup_table_account + .addresses + .iter() + .map(ToString::to_string) + .collect(), + })) +} diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs index 3d48ed3716..1760b51617 100644 --- a/cli/src/clap_app.rs +++ b/cli/src/clap_app.rs @@ -1,7 +1,7 @@ use { crate::{ - cli::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, stake::*, - validator_info::*, vote::*, wallet::*, + address_lookup_table::AddressLookupTableSubCommands, cli::*, cluster_query::*, feature::*, + inflation::*, nonce::*, program::*, stake::*, validator_info::*, vote::*, wallet::*, }, clap::{App, AppSettings, Arg, ArgGroup, SubCommand}, solana_clap_utils::{self, input_validators::*, keypair::*}, @@ -130,6 +130,7 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .inflation_subcommands() .nonce_subcommands() .program_subcommands() + .address_lookup_table_subcommands() .stake_subcommands() .validator_info_subcommands() .vote_subcommands() diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 2a2397efd3..d202a2a69f 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1,7 +1,7 @@ use { crate::{ - clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, - spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, + address_lookup_table::*, clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, + program::*, spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, }, clap::{crate_description, crate_name, value_t_or_exit, ArgMatches, Shell}, log::*, @@ -440,6 +440,8 @@ pub enum CliCommand { StakeMinimumDelegation { use_lamports_unit: bool, }, + // Address lookup table commands + AddressLookupTable(AddressLookupTableCliCommand), } #[derive(Debug, PartialEq)] @@ -687,6 +689,9 @@ pub fn parse_command( ("program", Some(matches)) => { parse_program_subcommand(matches, default_signer, wallet_manager) } + ("address-lookup-table", Some(matches)) => { + parse_address_lookup_table_subcommand(matches, default_signer, wallet_manager) + } ("wait-for-max-stake", Some(matches)) => { let max_stake_percent = value_t_or_exit!(matches, "max_percent", f32); Ok(CliCommandInfo { @@ -1627,6 +1632,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { derived_address_program_id.as_ref(), compute_unit_price.as_ref(), ), + + // Address Lookup Table Commands + CliCommand::AddressLookupTable(subcommand) => { + process_address_lookup_table_subcommand(rpc_client, config, subcommand) + } } } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 85d90869ff..c271990b58 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -23,6 +23,7 @@ extern crate const_format; extern crate serde_derive; +pub mod address_lookup_table; pub mod checks; pub mod clap_app; pub mod cli; diff --git a/cli/src/program.rs b/cli/src/program.rs index 1b3431968c..c90eeb9c27 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -60,6 +60,11 @@ use { }, }; +pub const CLOSE_PROGRAM_WARNING: &str = "WARNING! \ +Closed programs cannot be recreated at the same program id. \ +Once a program is closed, it can never be invoked again. \ +To proceed with closing, rerun the `close` command with the `--bypass-warning` flag"; + #[derive(Debug, PartialEq, Eq)] pub enum ProgramCliCommand { Deploy { @@ -109,6 +114,7 @@ pub enum ProgramCliCommand { recipient_pubkey: Pubkey, authority_index: SignerIndex, use_lamports_unit: bool, + bypass_warning: bool, }, } @@ -131,7 +137,7 @@ impl ProgramSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("deploy") - .about("Deploy a program") + .about("Deploy an upgradeable program") .arg( Arg::with_name("program_location") .index(1) @@ -386,12 +392,18 @@ impl ProgramSubCommands for App<'_, '_> { .long("lamports") .takes_value(false) .help("Display balance in lamports instead of SOL"), + ) + .arg( + Arg::with_name("bypass_warning") + .long("bypass-warning") + .takes_value(false) + .help("Bypass the permanent program closure warning"), ), ) ) .subcommand( SubCommand::with_name("deploy") - .about("Deploy a program") + .about("Deploy a non-upgradeable program. Use `solana program deploy` instead to deploy upgradeable programs") .setting(AppSettings::Hidden) .arg( Arg::with_name("program_location") @@ -674,6 +686,7 @@ pub fn parse_program_subcommand( recipient_pubkey, authority_index: signer_info.index_of(authority_pubkey).unwrap(), use_lamports_unit: matches.is_present("lamports"), + bypass_warning: matches.is_present("bypass_warning"), }), signers: signer_info.signers, } @@ -781,6 +794,7 @@ pub fn process_program_subcommand( recipient_pubkey, authority_index, use_lamports_unit, + bypass_warning, } => process_close( &rpc_client, config, @@ -788,6 +802,7 @@ pub fn process_program_subcommand( *recipient_pubkey, *authority_index, *use_lamports_unit, + *bypass_warning, ), } } @@ -1553,6 +1568,7 @@ fn process_close( recipient_pubkey: Pubkey, authority_index: SignerIndex, use_lamports_unit: bool, + bypass_warning: bool, ) -> ProcessResult { let authority_signer = config.signers[authority_index]; @@ -1615,6 +1631,9 @@ fn process_close( ) .into()) } else { + if !bypass_warning { + return Err(String::from(CLOSE_PROGRAM_WARNING).into()); + } close( rpc_client, config, @@ -3010,6 +3029,30 @@ mod tests { recipient_pubkey: default_keypair.pubkey(), authority_index: 0, use_lamports_unit: false, + bypass_warning: false, + }), + signers: vec![read_keypair_file(&keypair_file).unwrap().into()], + } + ); + + // with bypass-warning + write_keypair_file(&authority_keypair, &authority_keypair_file).unwrap(); + let test_command = test_commands.clone().get_matches_from(vec![ + "test", + "program", + "close", + &buffer_pubkey.to_string(), + "--bypass-warning", + ]); + assert_eq!( + parse_command(&test_command, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::Program(ProgramCliCommand::Close { + account_pubkey: Some(buffer_pubkey), + recipient_pubkey: default_keypair.pubkey(), + authority_index: 0, + use_lamports_unit: false, + bypass_warning: true, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into()], } @@ -3033,6 +3076,7 @@ mod tests { recipient_pubkey: default_keypair.pubkey(), authority_index: 1, use_lamports_unit: false, + bypass_warning: false, }), signers: vec![ read_keypair_file(&keypair_file).unwrap().into(), @@ -3058,6 +3102,7 @@ mod tests { recipient_pubkey, authority_index: 0, use_lamports_unit: false, + bypass_warning: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into(),], } @@ -3079,6 +3124,7 @@ mod tests { recipient_pubkey: default_keypair.pubkey(), authority_index: 0, use_lamports_unit: true, + bypass_warning: false, }), signers: vec![read_keypair_file(&keypair_file).unwrap().into(),], } diff --git a/cli/tests/address_lookup_table.rs b/cli/tests/address_lookup_table.rs new file mode 100644 index 0000000000..9bf7db320c --- /dev/null +++ b/cli/tests/address_lookup_table.rs @@ -0,0 +1,218 @@ +use { + solana_cli::{ + address_lookup_table::{ + AddressLookupTableCliCommand, DEACTIVATE_LOOKUP_TABLE_WARNING, + FREEZE_LOOKUP_TABLE_WARNING, + }, + cli::{process_command, CliCommand, CliConfig}, + }, + solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, OutputFormat}, + solana_faucet::faucet::run_local_faucet, + solana_sdk::{ + native_token::LAMPORTS_PER_SOL, + pubkey::Pubkey, + signature::{Keypair, Signer}, + }, + solana_streamer::socket::SocketAddrSpace, + solana_test_validator::TestValidator, + std::str::FromStr, +}; + +#[test] +fn test_cli_create_extend_and_freeze_address_lookup_table() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.output_format = OutputFormat::JsonCompact; + + // Airdrop SOL for transaction fees + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 10 * LAMPORTS_PER_SOL, + }; + process_command(&config).unwrap(); + + // Create lookup table + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::CreateLookupTable { + authority_pubkey: keypair.pubkey(), + authority_signer_index: None, + payer_signer_index: 0, + }); + let response: CliAddressLookupTableCreated = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + let lookup_table_pubkey = Pubkey::from_str(&response.lookup_table_address).unwrap(); + + // Validate created lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let response: CliAddressLookupTable = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + response, + CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: Some(keypair.pubkey().to_string()), + deactivation_slot: u64::MAX, + last_extended_slot: 0, + addresses: vec![], + } + ); + } + + // Extend lookup table + let new_addresses: Vec = (0..5).map(|_| Pubkey::new_unique()).collect(); + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + payer_signer_index: 0, + new_addresses: new_addresses.clone(), + }); + process_command(&config).unwrap(); + + // Validate extended lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { + addresses, + last_extended_slot, + .. + } = serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + addresses + .into_iter() + .map(|address| Pubkey::from_str(&address).unwrap()) + .collect::>(), + new_addresses + ); + assert!(last_extended_slot > 0); + } + + // Freeze lookup table w/o bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: false, + }); + let process_err = process_command(&config).unwrap_err(); + assert_eq!(process_err.to_string(), FREEZE_LOOKUP_TABLE_WARNING); + + // Freeze lookup table w/ bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: true, + }); + process_command(&config).unwrap(); + + // Validate frozen lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { authority, .. } = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert!(authority.is_none()); + } +} + +#[test] +fn test_cli_create_and_deactivate_address_lookup_table() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.output_format = OutputFormat::JsonCompact; + + // Airdrop SOL for transaction fees + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 10 * LAMPORTS_PER_SOL, + }; + process_command(&config).unwrap(); + + // Create lookup table + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::CreateLookupTable { + authority_pubkey: keypair.pubkey(), + authority_signer_index: Some(0), + payer_signer_index: 0, + }); + let response: CliAddressLookupTableCreated = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + let lookup_table_pubkey = Pubkey::from_str(&response.lookup_table_address).unwrap(); + + // Validate created lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let response: CliAddressLookupTable = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + response, + CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: Some(keypair.pubkey().to_string()), + deactivation_slot: u64::MAX, + last_extended_slot: 0, + addresses: vec![], + } + ); + } + + // Deactivate lookup table w/o bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: false, + }); + let process_err = process_command(&config).unwrap_err(); + assert_eq!(process_err.to_string(), DEACTIVATE_LOOKUP_TABLE_WARNING); + + // Deactivate lookup table w/ bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: true, + }); + process_command(&config).unwrap(); + + // Validate deactivated lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { + deactivation_slot, .. + } = serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_ne!(deactivation_slot, u64::MAX); + } +} diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 6dbae5cefc..315e9213f0 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -1,9 +1,10 @@ #![allow(clippy::integer_arithmetic)] + use { serde_json::Value, solana_cli::{ cli::{process_command, CliCommand, CliConfig}, - program::ProgramCliCommand, + program::{ProgramCliCommand, CLOSE_PROGRAM_WARNING}, }, solana_cli_output::OutputFormat, solana_client::rpc_client::RpcClient, @@ -638,13 +639,30 @@ fn test_cli_program_close_program() { let programdata_lamports = close_account.lamports; let recipient_pubkey = Pubkey::new_unique(); config.signers = vec![&keypair, &upgrade_authority]; + + // Close without --bypass-warning flag + config.command = CliCommand::Program(ProgramCliCommand::Close { + account_pubkey: Some(program_keypair.pubkey()), + recipient_pubkey, + authority_index: 1, + use_lamports_unit: false, + bypass_warning: false, + }); + assert_eq!( + process_command(&config).unwrap_err().to_string(), + CLOSE_PROGRAM_WARNING.to_string() + ); + + // Close with --bypass-warning flag config.command = CliCommand::Program(ProgramCliCommand::Close { account_pubkey: Some(program_keypair.pubkey()), recipient_pubkey, authority_index: 1, use_lamports_unit: false, + bypass_warning: true, }); process_command(&config).unwrap(); + rpc_client.get_account(&programdata_pubkey).unwrap_err(); let recipient_account = rpc_client.get_account(&recipient_pubkey).unwrap(); assert_eq!(programdata_lamports, recipient_account.lamports); @@ -902,6 +920,7 @@ fn test_cli_program_write_buffer() { recipient_pubkey, authority_index: 1, use_lamports_unit: false, + bypass_warning: false, }); process_command(&config).unwrap(); rpc_client.get_account(&buffer_pubkey).unwrap_err(); @@ -938,6 +957,7 @@ fn test_cli_program_write_buffer() { recipient_pubkey: keypair.pubkey(), authority_index: 0, use_lamports_unit: false, + bypass_warning: false, }); process_command(&config).unwrap(); rpc_client.get_account(&new_buffer_pubkey).unwrap_err(); diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 6d1a0283db..e7727cefcd 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -12,8 +12,8 @@ publish = false [dependencies] futures-util = "0.3.21" -serde_json = "1.0.81" -serial_test = "0.8.0" +serde_json = "1.0.83" +serial_test = "0.9.0" solana-client = { path = "../client", version = "=1.12.0" } solana-ledger = { path = "../ledger", version = "=1.12.0" } solana-measure = { path = "../measure", version = "=1.12.0" } diff --git a/client/Cargo.toml b/client/Cargo.toml index 448d69a71c..97e5aba275 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -15,7 +15,7 @@ async-trait = "0.1.57" base64 = "0.13.0" bincode = "1.3.3" bs58 = "0.4.0" -bytes = "1.1.0" +bytes = "1.2.1" clap = "2.33.0" crossbeam-channel = "0.5" enum_dispatch = "0.3.8" @@ -27,17 +27,18 @@ itertools = "0.10.2" jsonrpc-core = "18.0.0" lazy_static = "1.4.0" log = "0.4.17" -quinn = "0.8.3" -quinn-proto = "0.8.3" +quinn = "0.8.4" +quinn-proto = "0.8.4" +quinn-udp = "0.1.3" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } rustls = { version = "0.20.6", features = ["dangerous_configuration"] } -semver = "1.0.10" -serde = "1.0.138" +semver = "1.0.13" +serde = "1.0.143" serde_derive = "1.0.103" -serde_json = "1.0.81" +serde_json = "1.0.83" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-faucet = { path = "../faucet", version = "=1.12.0" } @@ -49,11 +50,11 @@ solana-streamer = { path = "../streamer", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "1", features = ["full"] } tokio-stream = "0.1.9" -tokio-tungstenite = { version = "0.17.1", features = ["rustls-tls-webpki-roots"] } +tokio-tungstenite = { version = "0.17.2", features = ["rustls-tls-webpki-roots"] } tungstenite = { version = "0.17.2", features = ["rustls-tls-webpki-roots"] } url = "2.2.2" diff --git a/client/src/nonblocking/blockhash_query.rs b/client/src/nonblocking/blockhash_query.rs new file mode 100644 index 0000000000..28943ad6a8 --- /dev/null +++ b/client/src/nonblocking/blockhash_query.rs @@ -0,0 +1,433 @@ +use { + crate::nonblocking::{nonce_utils, rpc_client::RpcClient}, + clap::ArgMatches, + solana_clap_utils::{ + input_parsers::{pubkey_of, value_of}, + nonce::*, + offline::*, + }, + solana_sdk::{commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey}, +}; + +#[derive(Debug, PartialEq, Eq)] +pub enum Source { + Cluster, + NonceAccount(Pubkey), +} + +impl Source { + pub async fn get_blockhash( + &self, + rpc_client: &RpcClient, + commitment: CommitmentConfig, + ) -> Result> { + match self { + Self::Cluster => { + let (blockhash, _) = rpc_client + .get_latest_blockhash_with_commitment(commitment) + .await?; + Ok(blockhash) + } + Self::NonceAccount(ref pubkey) => { + #[allow(clippy::redundant_closure)] + let data = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment) + .await + .and_then(|ref a| nonce_utils::data_from_account(a))?; + Ok(data.blockhash()) + } + } + } + + pub async fn is_blockhash_valid( + &self, + rpc_client: &RpcClient, + blockhash: &Hash, + commitment: CommitmentConfig, + ) -> Result> { + Ok(match self { + Self::Cluster => rpc_client.is_blockhash_valid(blockhash, commitment).await?, + Self::NonceAccount(ref pubkey) => { + #[allow(clippy::redundant_closure)] + let _ = nonce_utils::get_account_with_commitment(rpc_client, pubkey, commitment) + .await + .and_then(|ref a| nonce_utils::data_from_account(a))?; + true + } + }) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum BlockhashQuery { + Static(Hash), + Validated(Source, Hash), + Rpc(Source), +} + +impl BlockhashQuery { + pub fn new(blockhash: Option, sign_only: bool, nonce_account: Option) -> Self { + let source = nonce_account + .map(Source::NonceAccount) + .unwrap_or(Source::Cluster); + match blockhash { + Some(hash) if sign_only => Self::Static(hash), + Some(hash) if !sign_only => Self::Validated(source, hash), + None if !sign_only => Self::Rpc(source), + _ => panic!("Cannot resolve blockhash"), + } + } + + pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self { + let blockhash = value_of(matches, BLOCKHASH_ARG.name); + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let nonce_account = pubkey_of(matches, NONCE_ARG.name); + BlockhashQuery::new(blockhash, sign_only, nonce_account) + } + + pub async fn get_blockhash( + &self, + rpc_client: &RpcClient, + commitment: CommitmentConfig, + ) -> Result> { + match self { + BlockhashQuery::Static(hash) => Ok(*hash), + BlockhashQuery::Validated(source, hash) => { + if !source + .is_blockhash_valid(rpc_client, hash, commitment) + .await? + { + return Err(format!("Hash has expired {:?}", hash).into()); + } + Ok(*hash) + } + BlockhashQuery::Rpc(source) => source.get_blockhash(rpc_client, commitment).await, + } + } +} + +impl Default for BlockhashQuery { + fn default() -> Self { + BlockhashQuery::Rpc(Source::Cluster) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + nonblocking::blockhash_query, + rpc_request::RpcRequest, + rpc_response::{Response, RpcBlockhash, RpcResponseContext}, + }, + clap::App, + serde_json::{self, json}, + solana_account_decoder::{UiAccount, UiAccountEncoding}, + solana_sdk::{ + account::Account, + fee_calculator::FeeCalculator, + hash::hash, + nonce::{self, state::DurableNonce}, + system_program, + }, + std::collections::HashMap, + }; + + #[test] + fn test_blockhash_query_new_ok() { + let blockhash = hash(&[1u8]); + let nonce_pubkey = Pubkey::new(&[1u8; 32]); + + assert_eq!( + BlockhashQuery::new(Some(blockhash), true, None), + BlockhashQuery::Static(blockhash), + ); + assert_eq!( + BlockhashQuery::new(Some(blockhash), false, None), + BlockhashQuery::Validated(blockhash_query::Source::Cluster, blockhash), + ); + assert_eq!( + BlockhashQuery::new(None, false, None), + BlockhashQuery::Rpc(blockhash_query::Source::Cluster) + ); + + assert_eq!( + BlockhashQuery::new(Some(blockhash), true, Some(nonce_pubkey)), + BlockhashQuery::Static(blockhash), + ); + assert_eq!( + BlockhashQuery::new(Some(blockhash), false, Some(nonce_pubkey)), + BlockhashQuery::Validated( + blockhash_query::Source::NonceAccount(nonce_pubkey), + blockhash + ), + ); + assert_eq!( + BlockhashQuery::new(None, false, Some(nonce_pubkey)), + BlockhashQuery::Rpc(blockhash_query::Source::NonceAccount(nonce_pubkey)), + ); + } + + #[test] + #[should_panic] + fn test_blockhash_query_new_no_nonce_fail() { + BlockhashQuery::new(None, true, None); + } + + #[test] + #[should_panic] + fn test_blockhash_query_new_nonce_fail() { + let nonce_pubkey = Pubkey::new(&[1u8; 32]); + BlockhashQuery::new(None, true, Some(nonce_pubkey)); + } + + #[test] + fn test_blockhash_query_new_from_matches_ok() { + let test_commands = App::new("blockhash_query_test") + .nonce_args(false) + .offline_args(); + let blockhash = hash(&[1u8]); + let blockhash_string = blockhash.to_string(); + + let matches = test_commands.clone().get_matches_from(vec![ + "blockhash_query_test", + "--blockhash", + &blockhash_string, + "--sign-only", + ]); + assert_eq!( + BlockhashQuery::new_from_matches(&matches), + BlockhashQuery::Static(blockhash), + ); + + let matches = test_commands.clone().get_matches_from(vec![ + "blockhash_query_test", + "--blockhash", + &blockhash_string, + ]); + assert_eq!( + BlockhashQuery::new_from_matches(&matches), + BlockhashQuery::Validated(blockhash_query::Source::Cluster, blockhash), + ); + + let matches = test_commands + .clone() + .get_matches_from(vec!["blockhash_query_test"]); + assert_eq!( + BlockhashQuery::new_from_matches(&matches), + BlockhashQuery::Rpc(blockhash_query::Source::Cluster), + ); + + let nonce_pubkey = Pubkey::new(&[1u8; 32]); + let nonce_string = nonce_pubkey.to_string(); + let matches = test_commands.clone().get_matches_from(vec![ + "blockhash_query_test", + "--blockhash", + &blockhash_string, + "--sign-only", + "--nonce", + &nonce_string, + ]); + assert_eq!( + BlockhashQuery::new_from_matches(&matches), + BlockhashQuery::Static(blockhash), + ); + + let matches = test_commands.clone().get_matches_from(vec![ + "blockhash_query_test", + "--blockhash", + &blockhash_string, + "--nonce", + &nonce_string, + ]); + assert_eq!( + BlockhashQuery::new_from_matches(&matches), + BlockhashQuery::Validated( + blockhash_query::Source::NonceAccount(nonce_pubkey), + blockhash + ), + ); + } + + #[test] + #[should_panic] + fn test_blockhash_query_new_from_matches_without_nonce_fail() { + let test_commands = App::new("blockhash_query_test") + .arg(blockhash_arg()) + // We can really only hit this case if the arg requirements + // are broken, so unset the requires() to recreate that condition + .arg(sign_only_arg().requires("")); + + let matches = test_commands + .clone() + .get_matches_from(vec!["blockhash_query_test", "--sign-only"]); + BlockhashQuery::new_from_matches(&matches); + } + + #[test] + #[should_panic] + fn test_blockhash_query_new_from_matches_with_nonce_fail() { + let test_commands = App::new("blockhash_query_test") + .arg(blockhash_arg()) + // We can really only hit this case if the arg requirements + // are broken, so unset the requires() to recreate that condition + .arg(sign_only_arg().requires("")); + let nonce_pubkey = Pubkey::new(&[1u8; 32]); + let nonce_string = nonce_pubkey.to_string(); + + let matches = test_commands.clone().get_matches_from(vec![ + "blockhash_query_test", + "--sign-only", + "--nonce", + &nonce_string, + ]); + BlockhashQuery::new_from_matches(&matches); + } + + #[tokio::test] + async fn test_blockhash_query_get_blockhash() { + let test_blockhash = hash(&[0u8]); + let rpc_blockhash = hash(&[1u8]); + + let get_latest_blockhash_response = json!(Response { + context: RpcResponseContext { + slot: 1, + api_version: None + }, + value: json!(RpcBlockhash { + blockhash: rpc_blockhash.to_string(), + last_valid_block_height: 42, + }), + }); + + let is_blockhash_valid_response = json!(Response { + context: RpcResponseContext { + slot: 1, + api_version: None + }, + value: true + }); + + let mut mocks = HashMap::new(); + mocks.insert( + RpcRequest::GetLatestBlockhash, + get_latest_blockhash_response.clone(), + ); + let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); + assert_eq!( + BlockhashQuery::default() + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .unwrap(), + rpc_blockhash, + ); + + let mut mocks = HashMap::new(); + mocks.insert( + RpcRequest::GetLatestBlockhash, + get_latest_blockhash_response.clone(), + ); + mocks.insert( + RpcRequest::IsBlockhashValid, + is_blockhash_valid_response.clone(), + ); + let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); + assert_eq!( + BlockhashQuery::Validated(Source::Cluster, test_blockhash) + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .unwrap(), + test_blockhash, + ); + + let mut mocks = HashMap::new(); + mocks.insert( + RpcRequest::GetLatestBlockhash, + get_latest_blockhash_response.clone(), + ); + let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); + assert_eq!( + BlockhashQuery::Static(test_blockhash) + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .unwrap(), + test_blockhash, + ); + + let rpc_client = RpcClient::new_mock("fails".to_string()); + assert!(BlockhashQuery::default() + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .is_err()); + + let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[2u8; 32])); + let nonce_blockhash = *durable_nonce.as_hash(); + let nonce_fee_calc = FeeCalculator::new(4242); + let data = nonce::state::Data { + authority: Pubkey::new(&[3u8; 32]), + durable_nonce, + fee_calculator: nonce_fee_calc, + }; + let nonce_account = Account::new_data_with_space( + 42, + &nonce::state::Versions::new(nonce::State::Initialized(data)), + nonce::State::size(), + &system_program::id(), + ) + .unwrap(); + let nonce_pubkey = Pubkey::new(&[4u8; 32]); + let rpc_nonce_account = UiAccount::encode( + &nonce_pubkey, + &nonce_account, + UiAccountEncoding::Base64, + None, + None, + ); + let get_account_response = json!(Response { + context: RpcResponseContext { + slot: 1, + api_version: None + }, + value: json!(Some(rpc_nonce_account)), + }); + + let mut mocks = HashMap::new(); + mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone()); + let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); + assert_eq!( + BlockhashQuery::Rpc(Source::NonceAccount(nonce_pubkey)) + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .unwrap(), + nonce_blockhash, + ); + + let mut mocks = HashMap::new(); + mocks.insert(RpcRequest::GetAccountInfo, get_account_response.clone()); + let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); + assert_eq!( + BlockhashQuery::Validated(Source::NonceAccount(nonce_pubkey), nonce_blockhash) + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .unwrap(), + nonce_blockhash, + ); + + let mut mocks = HashMap::new(); + mocks.insert(RpcRequest::GetAccountInfo, get_account_response); + let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); + assert_eq!( + BlockhashQuery::Static(nonce_blockhash) + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .unwrap(), + nonce_blockhash, + ); + + let rpc_client = RpcClient::new_mock("fails".to_string()); + assert!(BlockhashQuery::Rpc(Source::NonceAccount(nonce_pubkey)) + .get_blockhash(&rpc_client, CommitmentConfig::default()) + .await + .is_err()); + } +} diff --git a/client/src/nonblocking/mod.rs b/client/src/nonblocking/mod.rs index 844811c356..7ba96d3994 100644 --- a/client/src/nonblocking/mod.rs +++ b/client/src/nonblocking/mod.rs @@ -1,3 +1,5 @@ +pub mod blockhash_query; +pub mod nonce_utils; pub mod pubsub_client; pub mod quic_client; pub mod rpc_client; diff --git a/client/src/nonblocking/nonce_utils.rs b/client/src/nonblocking/nonce_utils.rs new file mode 100644 index 0000000000..fe0d2216d5 --- /dev/null +++ b/client/src/nonblocking/nonce_utils.rs @@ -0,0 +1,247 @@ +//! Durable transaction nonce helpers. + +use { + crate::nonblocking::rpc_client::RpcClient, + solana_sdk::{ + account::{Account, ReadableAccount}, + account_utils::StateMut, + commitment_config::CommitmentConfig, + hash::Hash, + nonce::{ + state::{Data, Versions}, + State, + }, + pubkey::Pubkey, + system_program, + }, +}; + +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +pub enum Error { + #[error("invalid account owner")] + InvalidAccountOwner, + #[error("invalid account data")] + InvalidAccountData, + #[error("unexpected account data size")] + UnexpectedDataSize, + #[error("provided hash ({provided}) does not match nonce hash ({expected})")] + InvalidHash { provided: Hash, expected: Hash }, + #[error("provided authority ({provided}) does not match nonce authority ({expected})")] + InvalidAuthority { provided: Pubkey, expected: Pubkey }, + #[error("invalid state for requested operation")] + InvalidStateForOperation, + #[error("client error: {0}")] + Client(String), +} + +/// Get a nonce account from the network. +/// +/// This is like [`RpcClient::get_account`] except: +/// +/// - it returns this module's [`Error`] type, +/// - it returns an error if any of the checks from [`account_identity_ok`] fail. +pub async fn get_account(rpc_client: &RpcClient, nonce_pubkey: &Pubkey) -> Result { + get_account_with_commitment(rpc_client, nonce_pubkey, CommitmentConfig::default()).await +} + +/// Get a nonce account from the network. +/// +/// This is like [`RpcClient::get_account_with_commitment`] except: +/// +/// - it returns this module's [`Error`] type, +/// - it returns an error if the account does not exist, +/// - it returns an error if any of the checks from [`account_identity_ok`] fail. +pub async fn get_account_with_commitment( + rpc_client: &RpcClient, + nonce_pubkey: &Pubkey, + commitment: CommitmentConfig, +) -> Result { + rpc_client + .get_account_with_commitment(nonce_pubkey, commitment) + .await + .map_err(|e| Error::Client(format!("{}", e))) + .and_then(|result| { + result + .value + .ok_or_else(|| Error::Client(format!("AccountNotFound: pubkey={}", nonce_pubkey))) + }) + .and_then(|a| account_identity_ok(&a).map(|()| a)) +} + +/// Perform basic checks that an account has nonce-like properties. +/// +/// # Errors +/// +/// Returns [`Error::InvalidAccountOwner`] if the account is not owned by the +/// system program. Returns [`Error::UnexpectedDataSize`] if the account +/// contains no data. +pub fn account_identity_ok(account: &T) -> Result<(), Error> { + if account.owner() != &system_program::id() { + Err(Error::InvalidAccountOwner) + } else if account.data().is_empty() { + Err(Error::UnexpectedDataSize) + } else { + Ok(()) + } +} + +/// Deserialize the state of a durable transaction nonce account. +/// +/// # Errors +/// +/// Returns an error if the account is not owned by the system program or +/// contains no data. +/// +/// # Examples +/// +/// Determine if a nonce account is initialized: +/// +/// ```no_run +/// use solana_client::nonblocking::{ +/// rpc_client::RpcClient, +/// nonce_utils, +/// }; +/// use solana_sdk::{ +/// nonce::State, +/// pubkey::Pubkey, +/// }; +/// use anyhow::Result; +/// +/// futures::executor::block_on(async { +/// async fn is_nonce_initialized( +/// client: &RpcClient, +/// nonce_account_pubkey: &Pubkey, +/// ) -> Result { +/// +/// // Sign the tx with nonce_account's `blockhash` instead of the +/// // network's latest blockhash. +/// let nonce_account = client.get_account(nonce_account_pubkey).await?; +/// let nonce_state = nonce_utils::state_from_account(&nonce_account)?; +/// +/// Ok(!matches!(nonce_state, State::Uninitialized)) +/// } +/// # +/// # let client = RpcClient::new(String::new()); +/// # let nonce_account_pubkey = Pubkey::new_unique(); +/// # is_nonce_initialized(&client, &nonce_account_pubkey).await?; +/// # Ok::<(), anyhow::Error>(()) +/// # })?; +/// # Ok::<(), anyhow::Error>(()) +/// ``` +pub fn state_from_account>( + account: &T, +) -> Result { + account_identity_ok(account)?; + let versions = StateMut::::state(account).map_err(|_| Error::InvalidAccountData)?; + Ok(State::from(versions)) +} + +/// Deserialize the state data of a durable transaction nonce account. +/// +/// # Errors +/// +/// Returns an error if the account is not owned by the system program or +/// contains no data. Returns an error if the account state is uninitialized or +/// fails to deserialize. +/// +/// # Examples +/// +/// Create and sign a transaction with a durable nonce: +/// +/// ```no_run +/// use solana_client::nonblocking::{ +/// rpc_client::RpcClient, +/// nonce_utils, +/// }; +/// use solana_sdk::{ +/// message::Message, +/// pubkey::Pubkey, +/// signature::{Keypair, Signer}, +/// system_instruction, +/// transaction::Transaction, +/// }; +/// use std::path::Path; +/// use anyhow::Result; +/// # use anyhow::anyhow; +/// +/// futures::executor::block_on(async { +/// async fn create_transfer_tx_with_nonce( +/// client: &RpcClient, +/// nonce_account_pubkey: &Pubkey, +/// payer: &Keypair, +/// receiver: &Pubkey, +/// amount: u64, +/// tx_path: &Path, +/// ) -> Result<()> { +/// +/// let instr_transfer = system_instruction::transfer( +/// &payer.pubkey(), +/// receiver, +/// amount, +/// ); +/// +/// // In this example, `payer` is `nonce_account_pubkey`'s authority +/// let instr_advance_nonce_account = system_instruction::advance_nonce_account( +/// nonce_account_pubkey, +/// &payer.pubkey(), +/// ); +/// +/// // The `advance_nonce_account` instruction must be the first issued in +/// // the transaction. +/// let message = Message::new( +/// &[ +/// instr_advance_nonce_account, +/// instr_transfer +/// ], +/// Some(&payer.pubkey()), +/// ); +/// +/// let mut tx = Transaction::new_unsigned(message); +/// +/// // Sign the tx with nonce_account's `blockhash` instead of the +/// // network's latest blockhash. +/// let nonce_account = client.get_account(nonce_account_pubkey).await?; +/// let nonce_data = nonce_utils::data_from_account(&nonce_account)?; +/// let blockhash = nonce_data.blockhash(); +/// +/// tx.try_sign(&[payer], blockhash)?; +/// +/// // Save the signed transaction locally for later submission. +/// save_tx_to_file(&tx_path, &tx)?; +/// +/// Ok(()) +/// } +/// # +/// # fn save_tx_to_file(path: &Path, tx: &Transaction) -> Result<()> { +/// # Ok(()) +/// # } +/// # +/// # let client = RpcClient::new(String::new()); +/// # let nonce_account_pubkey = Pubkey::new_unique(); +/// # let payer = Keypair::new(); +/// # let receiver = Pubkey::new_unique(); +/// # create_transfer_tx_with_nonce(&client, &nonce_account_pubkey, &payer, &receiver, 1024, Path::new("new_tx")).await?; +/// # +/// # Ok::<(), anyhow::Error>(()) +/// # })?; +/// # Ok::<(), anyhow::Error>(()) +/// ``` +pub fn data_from_account>( + account: &T, +) -> Result { + account_identity_ok(account)?; + state_from_account(account).and_then(|ref s| data_from_state(s).map(|d| d.clone())) +} + +/// Get the nonce data from its [`State`] value. +/// +/// # Errors +/// +/// Returns [`Error::InvalidStateForOperation`] if `state` is +/// [`State::Uninitialized`]. +pub fn data_from_state(state: &State) -> Result<&Data, Error> { + match state { + State::Uninitialized => Err(Error::InvalidStateForOperation), + State::Initialized(data) => Ok(data), + } +} diff --git a/client/src/nonblocking/rpc_client.rs b/client/src/nonblocking/rpc_client.rs index b4d79cace9..4aa3dd2383 100644 --- a/client/src/nonblocking/rpc_client.rs +++ b/client/src/nonblocking/rpc_client.rs @@ -5123,6 +5123,31 @@ impl RpcClient { .await } + pub async fn get_token_largest_accounts( + &self, + mint: &Pubkey, + ) -> ClientResult> { + Ok(self + .get_token_largest_accounts_with_commitment(mint, self.commitment()) + .await? + .value) + } + + pub async fn get_token_largest_accounts_with_commitment( + &self, + mint: &Pubkey, + commitment_config: CommitmentConfig, + ) -> RpcResult> { + self.send( + RpcRequest::GetTokenLargestAccounts, + json!([ + mint.to_string(), + self.maybe_map_commitment(commitment_config).await? + ]), + ) + .await + } + pub async fn get_token_supply(&self, mint: &Pubkey) -> ClientResult { Ok(self .get_token_supply_with_commitment(mint, self.commitment()) diff --git a/client/src/nonce_utils.rs b/client/src/nonce_utils.rs index 9d79777155..b00ef416c5 100644 --- a/client/src/nonce_utils.rs +++ b/client/src/nonce_utils.rs @@ -1,39 +1,13 @@ //! Durable transaction nonce helpers. +pub use crate::nonblocking::nonce_utils::{ + account_identity_ok, data_from_account, data_from_state, state_from_account, Error, +}; use { crate::rpc_client::RpcClient, - solana_sdk::{ - account::{Account, ReadableAccount}, - account_utils::StateMut, - commitment_config::CommitmentConfig, - hash::Hash, - nonce::{ - state::{Data, Versions}, - State, - }, - pubkey::Pubkey, - system_program, - }, + solana_sdk::{account::Account, commitment_config::CommitmentConfig, pubkey::Pubkey}, }; -#[derive(Debug, thiserror::Error, PartialEq, Eq)] -pub enum Error { - #[error("invalid account owner")] - InvalidAccountOwner, - #[error("invalid account data")] - InvalidAccountData, - #[error("unexpected account data size")] - UnexpectedDataSize, - #[error("provided hash ({provided}) does not match nonce hash ({expected})")] - InvalidHash { provided: Hash, expected: Hash }, - #[error("provided authority ({provided}) does not match nonce authority ({expected})")] - InvalidAuthority { provided: Pubkey, expected: Pubkey }, - #[error("invalid state for requested operation")] - InvalidStateForOperation, - #[error("client error: {0}")] - Client(String), -} - /// Get a nonce account from the network. /// /// This is like [`RpcClient::get_account`] except: @@ -66,176 +40,3 @@ pub fn get_account_with_commitment( }) .and_then(|a| account_identity_ok(&a).map(|()| a)) } - -/// Perform basic checks that an account has nonce-like properties. -/// -/// # Errors -/// -/// Returns [`Error::InvalidAccountOwner`] if the account is not owned by the -/// system program. Returns [`Error::UnexpectedDataSize`] if the account -/// contains no data. -pub fn account_identity_ok(account: &T) -> Result<(), Error> { - if account.owner() != &system_program::id() { - Err(Error::InvalidAccountOwner) - } else if account.data().is_empty() { - Err(Error::UnexpectedDataSize) - } else { - Ok(()) - } -} - -/// Deserialize the state of a durable transaction nonce account. -/// -/// # Errors -/// -/// Returns an error if the account is not owned by the system program or -/// contains no data. -/// -/// # Examples -/// -/// Determine if a nonce account is initialized: -/// -/// ```no_run -/// use solana_client::{ -/// rpc_client::RpcClient, -/// nonce_utils, -/// }; -/// use solana_sdk::{ -/// nonce::State, -/// pubkey::Pubkey, -/// }; -/// use anyhow::Result; -/// -/// fn is_nonce_initialized( -/// client: &RpcClient, -/// nonce_account_pubkey: &Pubkey, -/// ) -> Result { -/// -/// // Sign the tx with nonce_account's `blockhash` instead of the -/// // network's latest blockhash. -/// let nonce_account = client.get_account(nonce_account_pubkey)?; -/// let nonce_state = nonce_utils::state_from_account(&nonce_account)?; -/// -/// Ok(!matches!(nonce_state, State::Uninitialized)) -/// } -/// # -/// # let client = RpcClient::new(String::new()); -/// # let nonce_account_pubkey = Pubkey::new_unique(); -/// # is_nonce_initialized(&client, &nonce_account_pubkey)?; -/// # -/// # Ok::<(), anyhow::Error>(()) -/// ``` -pub fn state_from_account>( - account: &T, -) -> Result { - account_identity_ok(account)?; - let versions = StateMut::::state(account).map_err(|_| Error::InvalidAccountData)?; - Ok(State::from(versions)) -} - -/// Deserialize the state data of a durable transaction nonce account. -/// -/// # Errors -/// -/// Returns an error if the account is not owned by the system program or -/// contains no data. Returns an error if the account state is uninitialized or -/// fails to deserialize. -/// -/// # Examples -/// -/// Create and sign a transaction with a durable nonce: -/// -/// ```no_run -/// use solana_client::{ -/// rpc_client::RpcClient, -/// nonce_utils, -/// }; -/// use solana_sdk::{ -/// message::Message, -/// pubkey::Pubkey, -/// signature::{Keypair, Signer}, -/// system_instruction, -/// transaction::Transaction, -/// }; -/// use std::path::Path; -/// use anyhow::Result; -/// # use anyhow::anyhow; -/// -/// fn create_transfer_tx_with_nonce( -/// client: &RpcClient, -/// nonce_account_pubkey: &Pubkey, -/// payer: &Keypair, -/// receiver: &Pubkey, -/// amount: u64, -/// tx_path: &Path, -/// ) -> Result<()> { -/// -/// let instr_transfer = system_instruction::transfer( -/// &payer.pubkey(), -/// receiver, -/// amount, -/// ); -/// -/// // In this example, `payer` is `nonce_account_pubkey`'s authority -/// let instr_advance_nonce_account = system_instruction::advance_nonce_account( -/// nonce_account_pubkey, -/// &payer.pubkey(), -/// ); -/// -/// // The `advance_nonce_account` instruction must be the first issued in -/// // the transaction. -/// let message = Message::new( -/// &[ -/// instr_advance_nonce_account, -/// instr_transfer -/// ], -/// Some(&payer.pubkey()), -/// ); -/// -/// let mut tx = Transaction::new_unsigned(message); -/// -/// // Sign the tx with nonce_account's `blockhash` instead of the -/// // network's latest blockhash. -/// let nonce_account = client.get_account(nonce_account_pubkey)?; -/// let nonce_data = nonce_utils::data_from_account(&nonce_account)?; -/// let blockhash = nonce_data.blockhash(); -/// -/// tx.try_sign(&[payer], blockhash)?; -/// -/// // Save the signed transaction locally for later submission. -/// save_tx_to_file(&tx_path, &tx)?; -/// -/// Ok(()) -/// } -/// # -/// # fn save_tx_to_file(path: &Path, tx: &Transaction) -> Result<()> { -/// # Ok(()) -/// # } -/// # -/// # let client = RpcClient::new(String::new()); -/// # let nonce_account_pubkey = Pubkey::new_unique(); -/// # let payer = Keypair::new(); -/// # let receiver = Pubkey::new_unique(); -/// # create_transfer_tx_with_nonce(&client, &nonce_account_pubkey, &payer, &receiver, 1024, Path::new("new_tx"))?; -/// # -/// # Ok::<(), anyhow::Error>(()) -/// ``` -pub fn data_from_account>( - account: &T, -) -> Result { - account_identity_ok(account)?; - state_from_account(account).and_then(|ref s| data_from_state(s).map(|d| d.clone())) -} - -/// Get the nonce data from its [`State`] value. -/// -/// # Errors -/// -/// Returns [`Error::InvalidStateForOperation`] if `state` is -/// [`State::Uninitialized`]. -pub fn data_from_state(state: &State) -> Result<&Data, Error> { - match state { - State::Uninitialized => Err(Error::InvalidStateForOperation), - State::Initialized(data) => Ok(data), - } -} diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index f0e15b8e75..151749d1f1 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -179,7 +179,7 @@ impl RpcClient { )), runtime: Some( tokio::runtime::Builder::new_current_thread() - .thread_name("rpc-client") + .thread_name("solRpcClient") .enable_io() .enable_time() .build() @@ -1120,13 +1120,12 @@ impl RpcClient { bundles_and_configs: Vec<(VersionedBundle, RpcSimulateBundleConfig)>, ) -> BatchRpcResult { self.invoke( - self.rpc_client - .batch_simulate_bundle_with_config(bundles_and_configs), + (self.rpc_client.as_ref()).batch_simulate_bundle_with_config(bundles_and_configs), ) } pub fn simulate_bundle(&self, bundle: &VersionedBundle) -> RpcResult { - self.invoke(self.rpc_client.simulate_bundle(bundle)) + self.invoke((self.rpc_client.as_ref()).simulate_bundle(bundle)) } pub fn simulate_bundle_with_config( @@ -1134,7 +1133,7 @@ impl RpcClient { bundle: &VersionedBundle, config: RpcSimulateBundleConfig, ) -> RpcResult { - self.invoke(self.rpc_client.simulate_bundle_with_config(bundle, config)) + self.invoke((self.rpc_client.as_ref()).simulate_bundle_with_config(bundle, config)) } /// Returns the highest slot information that the node has snapshots for. @@ -3931,6 +3930,24 @@ impl RpcClient { ) } + pub fn get_token_largest_accounts( + &self, + mint: &Pubkey, + ) -> ClientResult> { + self.invoke((self.rpc_client.as_ref()).get_token_largest_accounts(mint)) + } + + pub fn get_token_largest_accounts_with_commitment( + &self, + mint: &Pubkey, + commitment_config: CommitmentConfig, + ) -> RpcResult> { + self.invoke( + (self.rpc_client.as_ref()) + .get_token_largest_accounts_with_commitment(mint, commitment_config), + ) + } + pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult { self.invoke((self.rpc_client.as_ref()).get_token_supply(mint)) } diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index 81aa4b7cbc..c28d59dedb 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -100,6 +100,7 @@ pub enum RpcRequest { GetTokenAccountBalance, GetTokenAccountsByDelegate, GetTokenAccountsByOwner, + GetTokenLargestAccounts, GetTokenSupply, GetTransaction, GetTransactionCount, @@ -176,6 +177,7 @@ impl fmt::Display for RpcRequest { RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate", RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner", RpcRequest::GetTokenSupply => "getTokenSupply", + RpcRequest::GetTokenLargestAccounts => "getTokenLargestAccounts", RpcRequest::GetTransaction => "getTransaction", RpcRequest::GetTransactionCount => "getTransactionCount", RpcRequest::GetVersion => "getVersion", @@ -325,6 +327,10 @@ mod tests { let test_request = RpcRequest::SendTransaction; let request = test_request.build_request_json(1, Value::Null); assert_eq!(request["method"], "sendTransaction"); + + let test_request = RpcRequest::GetTokenLargestAccounts; + let request = test_request.build_request_json(1, Value::Null); + assert_eq!(request["method"], "getTokenLargestAccounts"); } #[test] diff --git a/client/src/rpc_response.rs b/client/src/rpc_response.rs index a7eb7aab5d..742dde0d8f 100644 --- a/client/src/rpc_response.rs +++ b/client/src/rpc_response.rs @@ -96,28 +96,28 @@ pub struct Response { pub value: T, } -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct RpcBlockCommitment { pub commitment: Option, pub total_stake: u64, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcBlockhashFeeCalculator { pub blockhash: String, pub fee_calculator: FeeCalculator, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcBlockhash { pub blockhash: String, pub last_valid_block_height: u64, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcFees { pub blockhash: String, @@ -126,7 +126,7 @@ pub struct RpcFees { pub last_valid_block_height: u64, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct DeprecatedRpcFees { pub blockhash: String, @@ -134,7 +134,7 @@ pub struct DeprecatedRpcFees { pub last_valid_slot: Slot, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct Fees { pub blockhash: Hash, @@ -142,13 +142,13 @@ pub struct Fees { pub last_valid_block_height: u64, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcFeeCalculator { pub fee_calculator: FeeCalculator, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcFeeRateGovernor { pub fee_rate_governor: FeeRateGovernor, @@ -208,7 +208,7 @@ pub struct SlotTransactionStats { pub max_transactions_per_entry: u64, } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] #[serde(rename_all = "camelCase", tag = "type")] pub enum SlotUpdate { FirstShredReceived { @@ -258,14 +258,14 @@ impl SlotUpdate { } } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase", untagged)] pub enum RpcSignatureResult { ProcessedSignature(ProcessedSignatureResult), ReceivedSignature(ReceivedSignatureResult), } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcLogsResponse { pub signature: String, // Signature as base58 string @@ -273,19 +273,19 @@ pub struct RpcLogsResponse { pub logs: Vec, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct ProcessedSignatureResult { pub err: Option, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub enum ReceivedSignatureResult { ReceivedSignature, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcContactInfo { /// Pubkey of the node as a base-58 string @@ -322,7 +322,7 @@ pub struct RpcBlockProduction { pub range: RpcBlockProductionRange, } -#[derive(Serialize, Deserialize, Clone)] +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] pub struct RpcVersionInfo { /// The current version of solana-core @@ -348,14 +348,14 @@ impl fmt::Display for RpcVersionInfo { } } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] pub struct RpcIdentity { /// The current node identity pubkey pub identity: String, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub enum RpcBundleSimulationSummary { /// error and offending transaction signature @@ -384,14 +384,14 @@ pub struct RpcVote { pub signature: String, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcVoteAccountStatus { pub current: Vec, pub delinquent: Vec, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcVoteAccountInfo { /// Vote account address, as base-58 encoded string @@ -420,7 +420,7 @@ pub struct RpcVoteAccountInfo { pub root_slot: Slot, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcSignatureConfirmation { pub confirmations: usize, @@ -439,7 +439,7 @@ pub struct RpcSimulateBundleTransactionResult { pub return_data: Option, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcSimulateTransactionResult { pub err: Option, @@ -449,7 +449,7 @@ pub struct RpcSimulateTransactionResult { pub return_data: Option, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionReturnData { pub program_id: String, @@ -471,7 +471,7 @@ pub enum ReturnDataEncoding { Base64, } -#[derive(Serialize, Deserialize, Clone, Debug)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcStorageTurn { pub blockhash: String, @@ -558,7 +558,7 @@ pub enum RpcBlockUpdateError { UnsupportedTransactionVersion(u8), } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "camelCase")] pub struct RpcBlockUpdate { pub slot: Slot, diff --git a/client/src/transaction_executor.rs b/client/src/transaction_executor.rs index 56f7a80022..89a70d7ee5 100644 --- a/client/src/transaction_executor.rs +++ b/client/src/transaction_executor.rs @@ -91,7 +91,7 @@ impl TransactionExecutor { let exit = exit.clone(); let cleared = cleared.clone(); Builder::new() - .name("sig_clear".to_string()) + .name("solSigClear".to_string()) .spawn(move || { let client = RpcClient::new_socket_with_commitment( entrypoint_addr, diff --git a/client/tests/quic_client.rs b/client/tests/quic_client.rs index 2ccce36060..1c5348177d 100644 --- a/client/tests/quic_client.rs +++ b/client/tests/quic_client.rs @@ -27,7 +27,7 @@ mod tests { let mut all_packets = vec![]; let now = Instant::now(); let mut total_packets: usize = 0; - while now.elapsed().as_secs() < 5 { + while now.elapsed().as_secs() < 10 { if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) { total_packets = total_packets.saturating_add(packets.len()); all_packets.push(packets) @@ -93,7 +93,7 @@ mod tests { // Send a full size packet with single byte writes. let num_bytes = PACKET_DATA_SIZE; - let num_expected_packets: usize = 4000; + let num_expected_packets: usize = 3000; let packets = vec![vec![0u8; PACKET_DATA_SIZE]; num_expected_packets]; assert!(client.send_wire_transaction_batch_async(packets).is_ok()); @@ -138,7 +138,7 @@ mod tests { // Send a full size packet with single byte writes. let num_bytes = PACKET_DATA_SIZE; - let num_expected_packets: usize = 4000; + let num_expected_packets: usize = 3000; let packets = vec![vec![0u8; PACKET_DATA_SIZE]; num_expected_packets]; assert!(client.send_wire_transaction_batch(&packets).await.is_ok()); diff --git a/core/Cargo.toml b/core/Cargo.toml index 3746fc5963..307ab9254c 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -20,7 +20,7 @@ base64 = "0.13.0" bincode = "1.3.3" bs58 = "0.4.0" bytes = "1.1.0" -chrono = { version = "0.4.11", features = ["serde"] } +chrono = { version = "0.4.21", features = ["serde"] } clap = { version = "3.1.15", features = ["derive"] } crossbeam-channel = "0.5" dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } @@ -42,7 +42,7 @@ prost-types = "0.8.0" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.12.0" } solana-bloom = { path = "../bloom", version = "=1.12.0" } @@ -82,8 +82,8 @@ uuid = { version = "1.0.0", features = ["v4", "fast-rng"] } [dev-dependencies] matches = "0.1.9" raptorq = "1.7.0" -serde_json = "1.0.81" -serial_test = "0.8.0" +serde_json = "1.0.83" +serial_test = "0.9.0" solana-logger = { path = "../logger", version = "=1.12.0" } solana-program-runtime = { path = "../program-runtime", version = "=1.12.0" } solana-stake-program = { path = "../programs/stake", version = "=1.12.0" } diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index 68c3243efd..8df66e9b47 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -9,7 +9,7 @@ use { solana_entry::entry::{create_ticks, Entry}, solana_ledger::shred::{ max_entries_per_n_shred, max_ticks_per_n_shreds, ProcessShredsStats, Shred, ShredFlags, - Shredder, LEGACY_SHRED_DATA_CAPACITY, MAX_DATA_SHREDS_PER_FEC_BLOCK, + Shredder, DATA_SHREDS_PER_FEC_BLOCK, LEGACY_SHRED_DATA_CAPACITY, }, solana_perf::test_tx, solana_sdk::{hash::Hash, packet::PACKET_DATA_SIZE, signature::Keypair}, @@ -153,13 +153,12 @@ fn bench_deserialize_hdr(bencher: &mut Bencher) { #[bench] fn bench_shredder_coding(bencher: &mut Bencher) { - let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; + let symbol_count = DATA_SHREDS_PER_FEC_BLOCK; let data_shreds = make_shreds(symbol_count); bencher.iter(|| { Shredder::generate_coding_shreds( &data_shreds[..symbol_count], - true, // is_last_in_slot - 0, // next_code_index + 0, // next_code_index ) .len(); }) @@ -167,12 +166,11 @@ fn bench_shredder_coding(bencher: &mut Bencher) { #[bench] fn bench_shredder_decoding(bencher: &mut Bencher) { - let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; + let symbol_count = DATA_SHREDS_PER_FEC_BLOCK; let data_shreds = make_shreds(symbol_count); let coding_shreds = Shredder::generate_coding_shreds( &data_shreds[..symbol_count], - true, // is_last_in_slot - 0, // next_code_index + 0, // next_code_index ); bencher.iter(|| { Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap(); @@ -181,18 +179,18 @@ fn bench_shredder_decoding(bencher: &mut Bencher) { #[bench] fn bench_shredder_coding_raptorq(bencher: &mut Bencher) { - let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK; - let data = make_concatenated_shreds(symbol_count as usize); + let symbol_count = DATA_SHREDS_PER_FEC_BLOCK; + let data = make_concatenated_shreds(symbol_count); bencher.iter(|| { let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16); - encoder.get_encoded_packets(symbol_count); + encoder.get_encoded_packets(symbol_count as u32); }) } #[bench] fn bench_shredder_decoding_raptorq(bencher: &mut Bencher) { - let symbol_count = MAX_DATA_SHREDS_PER_FEC_BLOCK; - let data = make_concatenated_shreds(symbol_count as usize); + let symbol_count = DATA_SHREDS_PER_FEC_BLOCK; + let data = make_concatenated_shreds(symbol_count); let encoder = Encoder::with_defaults(&data, VALID_SHRED_DATA_LEN as u16); let mut packets = encoder.get_encoded_packets(symbol_count as u32); packets.shuffle(&mut rand::thread_rng()); diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index ae8f0dbe78..9c476cbfe5 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -50,7 +50,7 @@ impl AccountsHashVerifier { let exit = exit.clone(); let cluster_info = cluster_info.clone(); let t_accounts_hash_verifier = Builder::new() - .name("solana-hash-accounts".to_string()) + .name("solAcctHashVer".to_string()) .spawn(move || { let mut hashes = vec![]; loop { @@ -152,6 +152,26 @@ impl AccountsHashVerifier { if accounts_package.expected_capitalization != lamports { // before we assert, run the hash calc again. This helps track down whether it could have been a failure in a race condition possibly with shrink. // We could add diagnostics to the hash calc here to produce a per bin cap or something to help narrow down how many pubkeys are different. + let result_with_index = accounts_package + .accounts + .accounts_db + .calculate_accounts_hash( + accounts_package.slot, + &CalcAccountsHashConfig { + use_bg_thread_pool: false, + check_hash: false, + ancestors: None, + use_write_cache: false, + epoch_schedule: &accounts_package.epoch_schedule, + rent_collector: &accounts_package.rent_collector, + store_detailed_debug_info_on_failure: false, + full_snapshot: None, + }, + ); + info!( + "hash calc with index: {}, {:?}", + accounts_package.slot, result_with_index + ); let _ = accounts_package .accounts .accounts_db @@ -190,6 +210,7 @@ impl AccountsHashVerifier { accounts_package.snapshot_links.path(), accounts_package.slot, &accounts_hash, + None, ); datapoint_info!( "accounts_hash_verifier", diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index cc142168cb..3f81d38e2a 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -4,29 +4,34 @@ use { duplicate_repair_status::{DeadSlotAncestorRequestStatus, DuplicateAncestorDecision}, outstanding_requests::OutstandingRequests, packet_threshold::DynamicPacketToProcessThreshold, - repair_response::{self}, repair_service::{DuplicateSlotsResetSender, RepairInfo, RepairStatsGroup}, replay_stage::DUPLICATE_THRESHOLD, result::{Error, Result}, - serve_repair::{AncestorHashesRepairType, ServeRepair}, + serve_repair::{ + AncestorHashesRepairType, AncestorHashesResponse, RepairProtocol, ServeRepair, + }, }, + bincode::serialize, crossbeam_channel::{unbounded, Receiver, Sender}, dashmap::{mapref::entry::Entry::Occupied, DashMap}, - solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE}, + solana_gossip::{cluster_info::ClusterInfo, ping_pong::Pong}, + solana_ledger::blockstore::Blockstore, solana_perf::{ - packet::{Packet, PacketBatch}, + packet::{deserialize_from_with_limit, Packet, PacketBatch}, recycler::Recycler, }, solana_runtime::bank::Bank, solana_sdk::{ clock::{Slot, SLOT_MS}, pubkey::Pubkey, + signature::Signable, signer::keypair::Keypair, timing::timestamp, }, solana_streamer::streamer::{self, PacketBatchReceiver, StreamerReceiveStats}, std::{ collections::HashSet, + io::{Cursor, Read}, net::UdpSocket, sync::{ atomic::{AtomicBool, Ordering}, @@ -62,27 +67,25 @@ type RetryableSlotsReceiver = Receiver; type OutstandingAncestorHashesRepairs = OutstandingRequests; #[derive(Default)] -pub struct AncestorHashesResponsesStats { - pub total_packets: usize, - pub dropped_packets: usize, - pub invalid_packets: usize, - pub processed: usize, +struct AncestorHashesResponsesStats { + total_packets: usize, + processed: usize, + dropped_packets: usize, + invalid_packets: usize, + ping_count: usize, + ping_err_verify_count: usize, } impl AncestorHashesResponsesStats { fn report(&mut self) { - inc_new_counter_info!( - "ancestor_hashes_responses-total_packets", - self.total_packets - ); - inc_new_counter_info!("ancestor_hashes_responses-processed", self.processed); - inc_new_counter_info!( - "ancestor_hashes_responses-dropped_packets", - self.dropped_packets - ); - inc_new_counter_info!( - "ancestor_hashes_responses-invalid_packets", - self.invalid_packets + datapoint_info!( + "ancestor_hashes_responses", + ("total_packets", self.total_packets, i64), + ("processed", self.processed, i64), + ("dropped_packets", self.dropped_packets, i64), + ("invalid_packets", self.invalid_packets, i64), + ("ping_count", self.ping_count, i64), + ("ping_err_verify_count", self.ping_err_verify_count, i64), ); *self = AncestorHashesResponsesStats::default(); } @@ -174,6 +177,8 @@ impl AncestorHashesService { exit.clone(), repair_info.duplicate_slots_reset_sender.clone(), retryable_slots_sender, + repair_info.cluster_info.clone(), + ancestor_hashes_request_socket.clone(), ); // Generate ancestor requests for dead slots that are repairable @@ -206,14 +211,17 @@ impl AncestorHashesService { exit: Arc, duplicate_slots_reset_sender: DuplicateSlotsResetSender, retryable_slots_sender: RetryableSlotsSender, + cluster_info: Arc, + ancestor_socket: Arc, ) -> JoinHandle<()> { Builder::new() - .name("solana-ancestor-hashes-responses-service".to_string()) + .name("solAncHashesSvc".to_string()) .spawn(move || { let mut last_stats_report = Instant::now(); let mut stats = AncestorHashesResponsesStats::default(); let mut packet_threshold = DynamicPacketToProcessThreshold::default(); loop { + let keypair = cluster_info.keypair().clone(); let result = Self::process_new_packets_from_channel( &ancestor_hashes_request_statuses, &response_receiver, @@ -223,6 +231,8 @@ impl AncestorHashesService { &mut packet_threshold, &duplicate_slots_reset_sender, &retryable_slots_sender, + &keypair, + &ancestor_socket, ); match result { Err(Error::RecvTimeout(_)) | Ok(_) => {} @@ -241,6 +251,7 @@ impl AncestorHashesService { } /// Process messages from the network + #[allow(clippy::too_many_arguments)] fn process_new_packets_from_channel( ancestor_hashes_request_statuses: &DashMap, response_receiver: &PacketBatchReceiver, @@ -250,6 +261,8 @@ impl AncestorHashesService { packet_threshold: &mut DynamicPacketToProcessThreshold, duplicate_slots_reset_sender: &DuplicateSlotsResetSender, retryable_slots_sender: &RetryableSlotsSender, + keypair: &Keypair, + ancestor_socket: &UdpSocket, ) -> Result<()> { let timeout = Duration::new(1, 0); let mut packet_batches = vec![response_receiver.recv_timeout(timeout)?]; @@ -278,6 +291,8 @@ impl AncestorHashesService { blockstore, duplicate_slots_reset_sender, retryable_slots_sender, + keypair, + ancestor_socket, ); } packet_threshold.update(total_packets, timer.elapsed()); @@ -292,6 +307,8 @@ impl AncestorHashesService { blockstore: &Blockstore, duplicate_slots_reset_sender: &DuplicateSlotsResetSender, retryable_slots_sender: &RetryableSlotsSender, + keypair: &Keypair, + ancestor_socket: &UdpSocket, ) { packet_batch.iter().for_each(|packet| { let decision = Self::verify_and_process_ancestor_response( @@ -300,6 +317,8 @@ impl AncestorHashesService { stats, outstanding_requests, blockstore, + keypair, + ancestor_socket, ); if let Some((slot, decision)) = decision { Self::handle_ancestor_request_decision( @@ -321,55 +340,109 @@ impl AncestorHashesService { stats: &mut AncestorHashesResponsesStats, outstanding_requests: &RwLock, blockstore: &Blockstore, + keypair: &Keypair, + ancestor_socket: &UdpSocket, ) -> Option<(Slot, DuplicateAncestorDecision)> { let from_addr = packet.meta.socket_addr(); - let ancestor_hashes_response = packet - .deserialize_slice(..packet.meta.size.saturating_sub(SIZE_OF_NONCE)) - .ok()?; - - // Verify the response - let request_slot = repair_response::nonce(packet).and_then(|nonce| { - outstanding_requests.write().unwrap().register_response( - nonce, - &ancestor_hashes_response, - timestamp(), - // If the response is valid, return the slot the request - // was for - |ancestor_hashes_request| ancestor_hashes_request.0, - ) - }); + let packet_data = match packet.data(..) { + Some(data) => data, + None => { + stats.invalid_packets += 1; + return None; + } + }; + let mut cursor = Cursor::new(packet_data); + let response = match deserialize_from_with_limit(&mut cursor) { + Ok(response) => response, + Err(_) => { + stats.invalid_packets += 1; + return None; + } + }; - if request_slot.is_none() { - stats.invalid_packets += 1; - return None; - } + match response { + AncestorHashesResponse::Hashes(ref hashes) => { + // deserialize trailing nonce + let nonce = match deserialize_from_with_limit(&mut cursor) { + Ok(nonce) => nonce, + Err(_) => { + stats.invalid_packets += 1; + return None; + } + }; - // If was a valid response, there must be a valid `request_slot` - let request_slot = request_slot.unwrap(); - stats.processed += 1; + // verify that packet does not contain extraneous data + if cursor.bytes().next().is_some() { + stats.invalid_packets += 1; + return None; + } - if let Occupied(mut ancestor_hashes_status_ref) = - ancestor_hashes_request_statuses.entry(request_slot) - { - let decision = ancestor_hashes_status_ref.get_mut().add_response( - &from_addr, - ancestor_hashes_response.into_slot_hashes(), - blockstore, - ); - if decision.is_some() { - // Once a request is completed, remove it from the map so that new - // requests for the same slot can be made again if necessary. It's - // important to hold the `write` lock here via - // `ancestor_hashes_status_ref` so that we don't race with deletion + - // insertion from the `t_ancestor_requests` thread, which may - // 1) Remove expired statuses from `ancestor_hashes_request_statuses` - // 2) Insert another new one via `manage_ancestor_requests()`. - // In which case we wouldn't want to delete the newly inserted entry here. - ancestor_hashes_status_ref.remove(); + let request_slot = outstanding_requests.write().unwrap().register_response( + nonce, + &response, + timestamp(), + // If the response is valid, return the slot the request + // was for + |ancestor_hashes_request| ancestor_hashes_request.0, + ); + + if request_slot.is_none() { + stats.invalid_packets += 1; + return None; + } + + // If was a valid response, there must be a valid `request_slot` + let request_slot = request_slot.unwrap(); + stats.processed += 1; + + if let Occupied(mut ancestor_hashes_status_ref) = + ancestor_hashes_request_statuses.entry(request_slot) + { + let decision = ancestor_hashes_status_ref.get_mut().add_response( + &from_addr, + hashes.clone(), + blockstore, + ); + if decision.is_some() { + // Once a request is completed, remove it from the map so that new + // requests for the same slot can be made again if necessary. It's + // important to hold the `write` lock here via + // `ancestor_hashes_status_ref` so that we don't race with deletion + + // insertion from the `t_ancestor_requests` thread, which may + // 1) Remove expired statuses from `ancestor_hashes_request_statuses` + // 2) Insert another new one via `manage_ancestor_requests()`. + // In which case we wouldn't want to delete the newly inserted entry here. + ancestor_hashes_status_ref.remove(); + } + decision.map(|decision| (request_slot, decision)) + } else { + None + } + } + AncestorHashesResponse::Ping(ping) => { + // verify that packet does not contain extraneous data + if cursor.bytes().next().is_some() { + stats.invalid_packets += 1; + return None; + } + if !ping.verify() { + stats.ping_err_verify_count += 1; + return None; + } + stats.ping_count += 1; + // Respond both with and without domain so that the other node + // will accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, keypair) { + let pong = RepairProtocol::Pong(pong); + if let Ok(pong_bytes) = serialize(&pong) { + let _ignore = ancestor_socket.send_to(&pong_bytes[..], from_addr); + } + } + } + None } - decision.map(|decision| (request_slot, decision)) - } else { - None } } @@ -465,7 +538,7 @@ impl AncestorHashesService { // to MAX_ANCESTOR_HASHES_SLOT_REQUESTS_PER_SECOND/second let mut request_throttle = vec![]; Builder::new() - .name("solana-manage-ancestor-requests".to_string()) + .name("solManAncReqs".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { return; @@ -1145,6 +1218,8 @@ mod test { &mut AncestorHashesResponsesStats::default(), &outstanding_requests, &requester_blockstore, + &requester_cluster_info.keypair(), + &ancestor_hashes_request_socket, ) .unwrap(); @@ -1385,7 +1460,9 @@ mod test { let ManageAncestorHashesState { ancestor_hashes_request_statuses, + ancestor_hashes_request_socket, outstanding_requests, + repair_info, .. } = ManageAncestorHashesState::new(bank_forks); @@ -1402,6 +1479,8 @@ mod test { &mut AncestorHashesResponsesStats::default(), &outstanding_requests, &blockstore, + &repair_info.cluster_info.keypair(), + &ancestor_hashes_request_socket, ) .is_none()); } @@ -1506,6 +1585,8 @@ mod test { &mut AncestorHashesResponsesStats::default(), &outstanding_requests, &requester_blockstore, + &requester_cluster_info.keypair(), + &ancestor_hashes_request_socket, ) .unwrap(); diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index ce09e780a4..e257efa698 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1,10 +1,12 @@ //! The `banking_stage` processes Transaction messages. It is intended to be used //! to construct a software pipeline. The stage uses all available CPU cores and //! can do its processing in parallel with signature verification on the GPU. + use { crate::{ bundle_account_locker::BundleAccountLocker, forward_packet_batches_by_accounts::ForwardPacketBatchesByAccounts, + immutable_deserialized_packet::ImmutableDeserializedPacket, leader_slot_banking_stage_metrics::{LeaderSlotMetricsTracker, ProcessTransactionsSummary}, leader_slot_banking_stage_timing_metrics::{ LeaderExecuteAndCommitTimings, RecordTransactionsTimings, @@ -475,7 +477,7 @@ impl BankingStage { let bank_forks = bank_forks.clone(); Builder::new() - .name(format!("solana-banking-stage-tx-{}", i)) + .name(format!("solBanknStgTx{:02}", i)) .spawn(move || { Self::process_loop( &verified_receiver, @@ -1361,7 +1363,7 @@ impl BankingStage { ); retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map( - |(index, execution_result)| execution_result.was_executed().then(|| index), + |(index, execution_result)| execution_result.was_executed().then_some(index), )); return ExecuteAndCommitTransactionsOutput { @@ -2051,26 +2053,26 @@ impl BankingStage { packet_count_upperbound: usize, ) -> Result<(Vec, Option), RecvTimeoutError> { let start = Instant::now(); - let mut aggregated_tracer_packet_stats_option: Option = None; - let (mut packet_batches, new_tracer_packet_stats_option) = + let (mut packet_batches, mut aggregated_tracer_packet_stats_option) = verified_receiver.recv_timeout(recv_timeout)?; - if let Some(new_tracer_packet_stats) = &new_tracer_packet_stats_option { - if let Some(aggregated_tracer_packet_stats) = &mut aggregated_tracer_packet_stats_option - { - aggregated_tracer_packet_stats.aggregate(new_tracer_packet_stats); - } else { - aggregated_tracer_packet_stats_option = new_tracer_packet_stats_option; - } - } - let mut num_packets_received: usize = packet_batches.iter().map(|batch| batch.len()).sum(); - while let Ok((packet_batch, _tracer_packet_stats_option)) = verified_receiver.try_recv() { + while let Ok((packet_batch, tracer_packet_stats_option)) = verified_receiver.try_recv() { trace!("got more packet batches in banking stage"); let (packets_received, packet_count_overflowed) = num_packets_received .overflowing_add(packet_batch.iter().map(|batch| batch.len()).sum()); packet_batches.extend(packet_batch); + if let Some(tracer_packet_stats) = &tracer_packet_stats_option { + if let Some(aggregated_tracer_packet_stats) = + &mut aggregated_tracer_packet_stats_option + { + aggregated_tracer_packet_stats.aggregate(tracer_packet_stats); + } else { + aggregated_tracer_packet_stats_option = tracer_packet_stats_option; + } + } + // Spend any leftover receive time budget to greedily receive more packet batches, // until the upperbound of the packet count is reached. if start.elapsed() >= recv_timeout @@ -4264,6 +4266,7 @@ mod tests { } #[test] + #[ignore] fn test_forwarder_budget() { solana_logger::setup(); // Create `PacketBatch` with 1 unprocessed packet @@ -4351,6 +4354,7 @@ mod tests { } #[test] + #[ignore] fn test_handle_forwarding() { solana_logger::setup(); // packets are deserialized upon receiving, failed packets will not be diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index b70edf8ea4..4971132a5e 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -15,7 +15,10 @@ use { }, crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender}, itertools::Itertools, - solana_gossip::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT}, + solana_gossip::{ + cluster_info::{ClusterInfo, ClusterInfoError}, + contact_info::ContactInfo, + }, solana_ledger::{blockstore::Blockstore, shred::Shred}, solana_measure::measure::Measure, solana_metrics::{inc_new_counter_error, inc_new_counter_info}, @@ -33,7 +36,6 @@ use { }, std::{ collections::{HashMap, HashSet}, - iter::repeat, net::{SocketAddr, UdpSocket}, sync::{ atomic::{AtomicBool, Ordering}, @@ -261,7 +263,7 @@ impl BroadcastStage { let blockstore = blockstore.clone(); let cluster_info = cluster_info.clone(); Builder::new() - .name("solana-broadcaster".to_string()) + .name("solBroadcast".to_string()) .spawn(move || { let _finalizer = Finalizer::new(exit); Self::run( @@ -283,7 +285,7 @@ impl BroadcastStage { let cluster_info = cluster_info.clone(); let bank_forks = bank_forks.clone(); let t = Builder::new() - .name("solana-broadcaster-transmit".to_string()) + .name("solBroadcastTx".to_string()) .spawn(move || loop { let res = bs_transmit.transmit( &socket_receiver, @@ -306,7 +308,7 @@ impl BroadcastStage { let mut bs_record = broadcast_stage_run.clone(); let btree = blockstore.clone(); let t = Builder::new() - .name("solana-broadcaster-record".to_string()) + .name("solBroadcastRec".to_string()) .spawn(move || loop { let res = bs_record.record(&blockstore_receiver, &btree); let res = Self::handle_error(res, "solana-broadcaster-record"); @@ -319,7 +321,7 @@ impl BroadcastStage { } let retransmit_thread = Builder::new() - .name("solana-broadcaster-retransmit".to_string()) + .name("solBroadcastRtx".to_string()) .spawn(move || loop { if let Some(res) = Self::handle_error( Self::check_retransmit_signals( @@ -403,8 +405,8 @@ fn update_peer_stats( } } -/// broadcast messages from the leader to layer 1 nodes -/// # Remarks +/// Broadcasts shreds from the leader (i.e. this node) to the root of the +/// turbine retransmit tree for each shred. pub fn broadcast_shreds( s: &UdpSocket, shreds: &[Shred], @@ -430,17 +432,18 @@ pub fn broadcast_shreds( let cluster_nodes = cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); update_peer_stats(&cluster_nodes, last_datapoint_submit); - let root_bank = root_bank.clone(); shreds.flat_map(move |shred| { - repeat(shred.payload()).zip(cluster_nodes.maybe_extend_broadcast_addrs( - &shred.id(), - &root_bank, - DATA_PLANE_FANOUT, - socket_addr_space, - shred_receiver_addr, - )) + let node = cluster_nodes.get_broadcast_peer(&shred.id())?; + // TODO (LB): stick in shred_receiver_addr here + ContactInfo::is_valid_address(&node.tvu, socket_addr_space) + .then(|| (shred.payload(), node.tvu)) }) }) + .chain( + shreds + .iter() + .filter_map(|s| Some((s.payload(), shred_receiver_addr?))), + ) .collect(); shred_select.stop(); transmit_stats.shred_select += shred_select.as_us(); diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index bd784f29a1..62a649cb9b 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -3,14 +3,14 @@ use { crate::cluster_nodes::ClusterNodesCache, itertools::Itertools, solana_entry::entry::Entry, - solana_gossip::cluster_info::DATA_PLANE_FANOUT, + solana_gossip::contact_info::ContactInfo, solana_ledger::shred::{ProcessShredsStats, Shredder}, solana_sdk::{ hash::Hash, signature::{Keypair, Signature, Signer}, system_transaction, }, - std::collections::HashSet, + std::{collections::HashSet, net::SocketAddr}, }; pub const MINIMUM_DUPLICATE_SLOT: Slot = 20; @@ -271,12 +271,6 @@ impl BroadcastRun for BroadcastDuplicatesRun { (bank_forks.root_bank(), bank_forks.working_bank()) }; let self_pubkey = cluster_info.id(); - let nodes: Vec<_> = cluster_info - .all_peers() - .into_iter() - .map(|(node, _)| node) - .collect(); - // Create cluster partition. let cluster_partition: HashSet = { let mut cumilative_stake = 0; @@ -303,17 +297,8 @@ impl BroadcastRun for BroadcastDuplicatesRun { let packets: Vec<_> = shreds .iter() .filter_map(|shred| { - let addr = cluster_nodes - .get_broadcast_addrs( - &shred.id(), - &root_bank, - DATA_PLANE_FANOUT, - socket_addr_space, - ) - .first() - .copied()?; - let node = nodes.iter().find(|node| node.tvu == addr)?; - if !socket_addr_space.check(&node.tvu) { + let node = cluster_nodes.get_broadcast_peer(&shred.id())?; + if ContactInfo::is_valid_address(&node.tvu, socket_addr_space) { return None; } if self diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index 852299cc1e..196f7307e2 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -3,6 +3,7 @@ use { solana_entry::entry::Entry, solana_ledger::shred::{ProcessShredsStats, Shredder}, solana_sdk::{hash::Hash, signature::Keypair}, + std::net::SocketAddr, }; #[derive(Clone)] diff --git a/core/src/broadcast_stage/broadcast_utils.rs b/core/src/broadcast_stage/broadcast_utils.rs index 2a913f0115..391979db03 100644 --- a/core/src/broadcast_stage/broadcast_utils.rs +++ b/core/src/broadcast_stage/broadcast_utils.rs @@ -2,7 +2,6 @@ use { crate::result::Result, crossbeam_channel::Receiver, solana_entry::entry::Entry, - solana_ledger::shred::Shred, solana_poh::poh_recorder::WorkingBankEntry, solana_runtime::bank::Bank, solana_sdk::clock::Slot, @@ -25,9 +24,6 @@ pub struct UnfinishedSlotInfo { pub(crate) next_code_index: u32, pub slot: Slot, pub parent: Slot, - // Data shreds buffered to make a batch of size - // MAX_DATA_SHREDS_PER_FEC_BLOCK. - pub(crate) data_shreds_buffer: Vec, } /// This parameter tunes how many entries are received in one iteration of recv loop diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index cdc86918e3..2997c62982 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -3,7 +3,7 @@ use { crate::cluster_nodes::ClusterNodesCache, solana_ledger::shred::{ProcessShredsStats, Shredder}, solana_sdk::{hash::Hash, signature::Keypair}, - std::{thread::sleep, time::Duration}, + std::{net::SocketAddr, thread::sleep, time::Duration}, }; pub const NUM_BAD_SLOTS: u64 = 10; diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 73f4102944..680cf7dd79 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -9,14 +9,12 @@ use { broadcast_stage::broadcast_utils::UnfinishedSlotInfo, cluster_nodes::ClusterNodesCache, }, solana_entry::entry::Entry, - solana_ledger::shred::{ - ProcessShredsStats, Shred, ShredFlags, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, - }, + solana_ledger::shred::{ProcessShredsStats, Shred, ShredFlags, Shredder}, solana_sdk::{ signature::Keypair, timing::{duration_as_us, AtomicInterval}, }, - std::{sync::RwLock, time::Duration}, + std::{net::SocketAddr, sync::RwLock, time::Duration}, }; #[derive(Clone)] @@ -68,41 +66,27 @@ impl StandardBroadcastRun { None => Vec::default(), Some(ref state) if state.slot == current_slot => Vec::default(), Some(ref mut state) => { - let parent_offset = state.slot - state.parent; let reference_tick = max_ticks_in_slot & SHRED_TICK_REFERENCE_MASK; - let fec_set_offset = state - .data_shreds_buffer - .first() - .map(Shred::index) - .unwrap_or(state.next_shred_index); - let fec_set_index = Shredder::fec_set_index(state.next_shred_index, fec_set_offset); - let mut shred = Shred::new_from_data( - state.slot, - state.next_shred_index, - parent_offset as u16, - &[], // data - ShredFlags::LAST_SHRED_IN_SLOT, - reference_tick, - self.shred_version, - fec_set_index.unwrap(), - ); - shred.sign(keypair); - state.data_shreds_buffer.push(shred.clone()); - let mut shreds = make_coding_shreds( + let shredder = + Shredder::new(state.slot, state.parent, reference_tick, self.shred_version) + .unwrap(); + let (mut shreds, coding_shreds) = shredder.entries_to_shreds( keypair, - &mut self.unfinished_slot, - true, // is_last_in_slot + &[], // entries + true, // is_last_in_slot, + state.next_shred_index, + state.next_code_index, stats, ); - shreds.insert(0, shred); self.report_and_reset_stats(true); self.unfinished_slot = None; + shreds.extend(coding_shreds); shreds } } } - fn entries_to_data_shreds( + fn entries_to_shreds( &mut self, keypair: &Keypair, entries: &[Entry], @@ -110,10 +94,13 @@ impl StandardBroadcastRun { reference_tick: u8, is_slot_end: bool, process_stats: &mut ProcessShredsStats, - ) -> Vec { + ) -> ( + Vec, // data shreds + Vec, // coding shreds + ) { let (slot, parent_slot) = self.current_slot_and_parent.unwrap(); - let next_shred_index = match &self.unfinished_slot { - Some(state) => state.next_shred_index, + let (next_shred_index, next_code_index) = match &self.unfinished_slot { + Some(state) => (state.next_shred_index, state.next_code_index), None => { // If the blockstore has shreds for the slot, it should not // recreate the slot: @@ -123,46 +110,37 @@ impl StandardBroadcastRun { process_stats.num_extant_slots += 1; // This is a faulty situation that should not happen. // Refrain from generating shreds for the slot. - return Vec::default(); + return (Vec::default(), Vec::default()); } } - 0u32 - } - }; - let data_shreds = Shredder::new(slot, parent_slot, reference_tick, self.shred_version) - .unwrap() - .entries_to_data_shreds( - keypair, - entries, - is_slot_end, - next_shred_index, - 0, // fec_set_offset - process_stats, - ); - let mut data_shreds_buffer = match &mut self.unfinished_slot { - Some(state) => { - assert_eq!(state.slot, slot); - std::mem::take(&mut state.data_shreds_buffer) + (0u32, 0u32) } - None => Vec::default(), }; - data_shreds_buffer.extend(data_shreds.clone()); + let shredder = + Shredder::new(slot, parent_slot, reference_tick, self.shred_version).unwrap(); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + keypair, + entries, + is_slot_end, + next_shred_index, + next_code_index, + process_stats, + ); let next_shred_index = match data_shreds.iter().map(Shred::index).max() { Some(index) => index + 1, None => next_shred_index, }; - let next_code_index = match &self.unfinished_slot { - Some(state) => state.next_code_index, - None => 0, + let next_code_index = match coding_shreds.iter().map(Shred::index).max() { + Some(index) => index + 1, + None => next_code_index, }; self.unfinished_slot = Some(UnfinishedSlotInfo { next_shred_index, next_code_index, slot, parent: parent_slot, - data_shreds_buffer, }); - data_shreds + (data_shreds, coding_shreds) } #[cfg(test)] @@ -228,7 +206,7 @@ impl StandardBroadcastRun { // 2) Convert entries to shreds and coding shreds let is_last_in_slot = last_tick_height == bank.max_tick_height(); let reference_tick = bank.tick_height() % bank.ticks_per_slot(); - let data_shreds = self.entries_to_data_shreds( + let (data_shreds, coding_shreds) = self.entries_to_shreds( keypair, &receive_results.entries, blockstore, @@ -300,13 +278,7 @@ impl StandardBroadcastRun { socket_sender.send((data_shreds.clone(), batch_info.clone()))?; blockstore_sender.send((data_shreds, batch_info.clone()))?; - // Create and send coding shreds - let coding_shreds = make_coding_shreds( - keypair, - &mut self.unfinished_slot, - is_last_in_slot, - &mut process_stats, - ); + // Send coding shreds let coding_shreds = Arc::new(coding_shreds); debug_assert!(coding_shreds .iter() @@ -437,49 +409,6 @@ impl StandardBroadcastRun { } } -// Consumes data_shreds_buffer returning corresponding coding shreds. -fn make_coding_shreds( - keypair: &Keypair, - unfinished_slot: &mut Option, - is_slot_end: bool, - stats: &mut ProcessShredsStats, -) -> Vec { - let unfinished_slot = match unfinished_slot { - None => return Vec::default(), - Some(state) => state, - }; - let data_shreds: Vec<_> = { - let size = unfinished_slot.data_shreds_buffer.len(); - // Consume a multiple of 32, unless this is the slot end. - let offset = if is_slot_end { - 0 - } else { - size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - }; - unfinished_slot - .data_shreds_buffer - .drain(0..size - offset) - .collect() - }; - let shreds = Shredder::data_shreds_to_coding_shreds( - keypair, - &data_shreds, - is_slot_end, - unfinished_slot.next_code_index, - stats, - ) - .unwrap(); - if let Some(index) = shreds - .iter() - .filter(|shred| shred.is_code()) - .map(Shred::index) - .max() - { - unfinished_slot.next_code_index = unfinished_slot.next_code_index.max(index + 1); - } - shreds -} - impl BroadcastRun for StandardBroadcastRun { fn run( &mut self, @@ -601,7 +530,6 @@ mod test { next_code_index: 17, slot, parent, - data_shreds_buffer: Vec::default(), }); run.slot_broadcast_start = Some(Instant::now()); @@ -786,19 +714,15 @@ mod test { while let Ok((recv_shreds, _)) = brecv.recv_timeout(Duration::from_secs(1)) { shreds.extend(recv_shreds.deref().clone()); } - assert!(shreds.len() < 32, "shreds.len(): {}", shreds.len()); - assert!(shreds.iter().all(|shred| shred.is_data())); + // At least as many coding shreds as data shreds. + assert!(shreds.len() >= 29 * 2); + assert_eq!(shreds.iter().filter(|shred| shred.is_data()).count(), 29); process_ticks(75); while let Ok((recv_shreds, _)) = brecv.recv_timeout(Duration::from_secs(1)) { shreds.extend(recv_shreds.deref().clone()); } - assert!(shreds.len() > 64, "shreds.len(): {}", shreds.len()); - let num_coding_shreds = shreds.iter().filter(|shred| shred.is_code()).count(); - assert_eq!( - num_coding_shreds, 32, - "num coding shreds: {}", - num_coding_shreds - ); + assert!(shreds.len() >= 33 * 2); + assert_eq!(shreds.iter().filter(|shred| shred.is_data()).count(), 33); } #[test] diff --git a/core/src/bundle_account_locker.rs b/core/src/bundle_account_locker.rs index abaa7dad11..f5ecdec47b 100644 --- a/core/src/bundle_account_locker.rs +++ b/core/src/bundle_account_locker.rs @@ -162,7 +162,7 @@ impl BundleAccountLocker { let transaction_locks: Vec = bundle .transactions .iter() - .filter_map(|tx| tx.get_account_locks().ok()) + .filter_map(|tx| tx.get_account_locks(64).ok()) // TODO (LB) .collect(); if transaction_locks.len() != bundle.transactions.len() { diff --git a/core/src/bundle_sanitizer.rs b/core/src/bundle_sanitizer.rs index 7bc6747c8e..ef5c11b226 100644 --- a/core/src/bundle_sanitizer.rs +++ b/core/src/bundle_sanitizer.rs @@ -1,8 +1,8 @@ ///! Turns packets into SanitizedTransactions and ensure they pass sanity checks use { crate::{ - packet_bundle::PacketBundle, - unprocessed_packet_batches::{deserialize_packets, ImmutableDeserializedPacket}, + immutable_deserialized_packet::ImmutableDeserializedPacket, packet_bundle::PacketBundle, + unprocessed_packet_batches::deserialize_packets, }, solana_perf::sigverify::verify_packet, solana_runtime::{bank::Bank, transaction_error_metrics::TransactionErrorMetrics}, diff --git a/core/src/bundle_stage.rs b/core/src/bundle_stage.rs index 9404b822a0..49f85d4e6e 100644 --- a/core/src/bundle_stage.rs +++ b/core/src/bundle_stage.rs @@ -15,8 +15,7 @@ use { solana_entry::entry::hash_transactions, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ - blockstore_processor::TransactionStatusSender, - token_balances::{collect_balances_with_cache, collect_token_balances}, + blockstore_processor::TransactionStatusSender, token_balances::collect_token_balances, }, solana_measure::measure, solana_poh::poh_recorder::{ @@ -633,7 +632,7 @@ impl BundleStage { mint_decimals: &mut HashMap, ) -> (TransactionBalances, TransactionTokenBalances) { if transaction_status_sender.is_some() { - let balances = collect_balances_with_cache(batch, bank, Some(cached_accounts)); + let balances = bank.collect_balances_with_cache(batch, Some(cached_accounts)); let token_balances = collect_token_balances(bank, batch, mint_decimals, Some(cached_accounts)); (balances, token_balances) @@ -762,7 +761,7 @@ impl BundleStage { let _lock = tip_manager.lock(); let tip_pdas = tip_manager.get_tip_accounts(); if Self::bundle_touches_tip_pdas(&sanitized_bundle.transactions, &tip_pdas) { - let _ = Self::maybe_initialize_and_change_tip_receiver( + Self::maybe_initialize_and_change_tip_receiver( bank_start, tip_manager, qos_service, @@ -983,7 +982,7 @@ impl BundleStage { } Err(RecvError) => { error!("shutting down bundle_stage"); - break; + return; } } } @@ -1050,11 +1049,11 @@ impl BundleStage { bank_slot: Slot, mixins_txs: Vec<(Hash, Vec)>, ) -> BundleExecutionResult> { - return match recorder.record(bank_slot, mixins_txs) { + match recorder.record(bank_slot, mixins_txs) { Ok(maybe_tx_index) => Ok(maybe_tx_index), Err(PohRecorderError::MaxHeightReached) => Err(BundleExecutionError::PohMaxHeightError), Err(e) => panic!("Poh recorder returned unexpected error: {:?}", e), - }; + } } } diff --git a/core/src/cache_block_meta_service.rs b/core/src/cache_block_meta_service.rs index 98069f253a..a8da9ac096 100644 --- a/core/src/cache_block_meta_service.rs +++ b/core/src/cache_block_meta_service.rs @@ -31,7 +31,7 @@ impl CacheBlockMetaService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-cache-block-time".to_string()) + .name("solCacheBlkTime".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index ed94724824..bbe7c2b4a6 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -252,7 +252,7 @@ impl ClusterInfoVoteListener { let exit = exit.clone(); let bank_forks = bank_forks.clone(); Builder::new() - .name("solana-cluster_info_vote_listener".to_string()) + .name("solCiVoteLstnr".to_string()) .spawn(move || { let _ = Self::recv_loop( exit, @@ -266,7 +266,7 @@ impl ClusterInfoVoteListener { }; let exit_ = exit.clone(); let bank_send_thread = Builder::new() - .name("solana-cluster_info_bank_send".to_string()) + .name("solCiBankSend".to_string()) .spawn(move || { let _ = Self::bank_send_loop( exit_, @@ -278,7 +278,7 @@ impl ClusterInfoVoteListener { .unwrap(); let send_thread = Builder::new() - .name("solana-cluster_info_process_votes".to_string()) + .name("solCiProcVotes".to_string()) .spawn(move || { let _ = Self::process_votes_loop( exit, diff --git a/core/src/cluster_nodes.rs b/core/src/cluster_nodes.rs index ee02fe3762..22fcc882c0 100644 --- a/core/src/cluster_nodes.rs +++ b/core/src/cluster_nodes.rs @@ -26,7 +26,7 @@ use { any::TypeId, cmp::Reverse, collections::HashMap, - iter::{once, repeat_with}, + iter::repeat_with, marker::PhantomData, net::SocketAddr, ops::Deref, @@ -114,98 +114,15 @@ impl ClusterNodes { new_cluster_nodes(cluster_info, stakes) } - pub fn maybe_extend_broadcast_addrs( - &self, - shred: &ShredId, - root_bank: &Bank, - fanout: usize, - socket_addr_space: &SocketAddrSpace, - shred_receiver_addr: Option, - ) -> Vec { - let mut broadcast_addrs = - self.get_broadcast_addrs(shred, root_bank, fanout, socket_addr_space); - if let Some(extended_addr) = shred_receiver_addr { - broadcast_addrs.extend(vec![extended_addr]); - } - broadcast_addrs - } - - pub(crate) fn get_broadcast_addrs( - &self, - shred: &ShredId, - root_bank: &Bank, - fanout: usize, - socket_addr_space: &SocketAddrSpace, - ) -> Vec { - const MAX_CONTACT_INFO_AGE: Duration = Duration::from_secs(2 * 60); + pub(crate) fn get_broadcast_peer(&self, shred: &ShredId) -> Option<&ContactInfo> { let shred_seed = shred.seed(&self.pubkey); let mut rng = ChaChaRng::from_seed(shred_seed); - let index = match self.weighted_shuffle.first(&mut rng) { - None => return Vec::default(), - Some(index) => index, - }; - if let Some(node) = self.nodes[index].contact_info() { - let now = timestamp(); - let age = Duration::from_millis(now.saturating_sub(node.wallclock)); - if age < MAX_CONTACT_INFO_AGE - && ContactInfo::is_valid_address(&node.tvu, socket_addr_space) - { - return vec![node.tvu]; - } - } - let mut rng = ChaChaRng::from_seed(shred_seed); - let nodes: Vec<&Node> = self - .weighted_shuffle - .clone() - .shuffle(&mut rng) - .map(|index| &self.nodes[index]) - .collect(); - if nodes.is_empty() { - return Vec::default(); - } - if drop_redundant_turbine_path(shred.slot(), root_bank) { - let peers = once(nodes[0]).chain(get_retransmit_peers(fanout, 0, &nodes)); - let addrs = peers.filter_map(Node::contact_info).map(|peer| peer.tvu); - return addrs - .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) - .collect(); - } - let (neighbors, children) = compute_retransmit_peers(fanout, 0, &nodes); - neighbors[..1] - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu)) - .chain( - neighbors[1..] - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu_forwards)), - ) - .chain( - children - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu)), - ) - .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) - .collect() + let index = self.weighted_shuffle.first(&mut rng)?; + self.nodes[index].contact_info() } } impl ClusterNodes { - pub fn maybe_extend_retransmit_addrs( - &self, - slot_leader: &Pubkey, - shred: &ShredId, - root_bank: &Bank, - fanout: usize, - shred_receiver_addr: Option, - ) -> (/*root_distance:*/ usize, Vec) { - let (root_distance, mut existing_addrs) = - self.get_retransmit_addrs(slot_leader, shred, root_bank, fanout); - if let Some(address) = shred_receiver_addr { - existing_addrs.extend(vec![address]); - } - (root_distance, existing_addrs) - } - pub(crate) fn get_retransmit_addrs( &self, slot_leader: &Pubkey, diff --git a/core/src/cluster_slots_service.rs b/core/src/cluster_slots_service.rs index 119f6081cf..f867981f6a 100644 --- a/core/src/cluster_slots_service.rs +++ b/core/src/cluster_slots_service.rs @@ -48,7 +48,7 @@ impl ClusterSlotsService { Self::initialize_lowest_slot(&blockstore, &cluster_info); Self::initialize_epoch_slots(&bank_forks, &cluster_info); let t_cluster_slots_service = Builder::new() - .name("solana-cluster-slots-service".to_string()) + .name("solClusterSlots".to_string()) .spawn(move || { Self::run( blockstore, diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 8a882e5056..92bab89107 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -69,7 +69,7 @@ impl AggregateCommitmentService { sender, Self { t_commitment: Builder::new() - .name("solana-aggregate-stake-lockouts".to_string()) + .name("solAggCommitSvc".to_string()) .spawn(move || loop { if exit_.load(Ordering::Relaxed) { break; @@ -259,7 +259,7 @@ mod tests { solana_sdk::{account::Account, pubkey::Pubkey, signature::Signer}, solana_stake_program::stake_state, solana_vote_program::{ - vote_state::{self, VoteStateVersions}, + vote_state::{self, process_slot_vote_unchecked, VoteStateVersions}, vote_transaction, }, }; @@ -309,7 +309,7 @@ mod tests { let root = ancestors[2]; vote_state.root_slot = Some(root); - vote_state.process_slot_vote_unchecked(*ancestors.last().unwrap()); + process_slot_vote_unchecked(&mut vote_state, *ancestors.last().unwrap()); AggregateCommitmentService::aggregate_commitment_for_vote_account( &mut commitment, &mut rooted_stake, @@ -341,8 +341,8 @@ mod tests { let root = ancestors[2]; vote_state.root_slot = Some(root); assert!(ancestors[4] + 2 >= ancestors[6]); - vote_state.process_slot_vote_unchecked(ancestors[4]); - vote_state.process_slot_vote_unchecked(ancestors[6]); + process_slot_vote_unchecked(&mut vote_state, ancestors[4]); + process_slot_vote_unchecked(&mut vote_state, ancestors[6]); AggregateCommitmentService::aggregate_commitment_for_vote_account( &mut commitment, &mut rooted_stake, @@ -431,30 +431,30 @@ mod tests { // Create bank let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let mut vote_state1 = VoteState::from(&vote_account1).unwrap(); - vote_state1.process_slot_vote_unchecked(3); - vote_state1.process_slot_vote_unchecked(5); + let mut vote_state1 = vote_state::from(&vote_account1).unwrap(); + process_slot_vote_unchecked(&mut vote_state1, 3); + process_slot_vote_unchecked(&mut vote_state1, 5); let versioned = VoteStateVersions::new_current(vote_state1); - VoteState::to(&versioned, &mut vote_account1).unwrap(); + vote_state::to(&versioned, &mut vote_account1).unwrap(); bank.store_account(&pk1, &vote_account1); - let mut vote_state2 = VoteState::from(&vote_account2).unwrap(); - vote_state2.process_slot_vote_unchecked(9); - vote_state2.process_slot_vote_unchecked(10); + let mut vote_state2 = vote_state::from(&vote_account2).unwrap(); + process_slot_vote_unchecked(&mut vote_state2, 9); + process_slot_vote_unchecked(&mut vote_state2, 10); let versioned = VoteStateVersions::new_current(vote_state2); - VoteState::to(&versioned, &mut vote_account2).unwrap(); + vote_state::to(&versioned, &mut vote_account2).unwrap(); bank.store_account(&pk2, &vote_account2); - let mut vote_state3 = VoteState::from(&vote_account3).unwrap(); + let mut vote_state3 = vote_state::from(&vote_account3).unwrap(); vote_state3.root_slot = Some(1); let versioned = VoteStateVersions::new_current(vote_state3); - VoteState::to(&versioned, &mut vote_account3).unwrap(); + vote_state::to(&versioned, &mut vote_account3).unwrap(); bank.store_account(&pk3, &vote_account3); - let mut vote_state4 = VoteState::from(&vote_account4).unwrap(); + let mut vote_state4 = vote_state::from(&vote_account4).unwrap(); vote_state4.root_slot = Some(2); let versioned = VoteStateVersions::new_current(vote_state4); - VoteState::to(&versioned, &mut vote_account4).unwrap(); + vote_state::to(&versioned, &mut vote_account4).unwrap(); bank.store_account(&pk4, &vote_account4); let (commitment, rooted_stake) = diff --git a/core/src/completed_data_sets_service.rs b/core/src/completed_data_sets_service.rs index 08b561b8ac..ff11dfa1fb 100644 --- a/core/src/completed_data_sets_service.rs +++ b/core/src/completed_data_sets_service.rs @@ -31,7 +31,7 @@ impl CompletedDataSetsService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("completed-data-set-service".to_string()) + .name("solComplDataSet".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 37c80014ae..842e989b5b 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -24,8 +24,8 @@ use { solana_vote_program::{ vote_instruction, vote_state::{ - BlockTimestamp, Lockout, Vote, VoteState, VoteStateUpdate, VoteTransaction, - MAX_LOCKOUT_HISTORY, + process_slot_vote_unchecked, process_vote_unchecked, BlockTimestamp, Lockout, Vote, + VoteState, VoteStateUpdate, VoteTransaction, MAX_LOCKOUT_HISTORY, }, }, std::{ @@ -169,7 +169,7 @@ impl TowerVersions { } } -#[frozen_abi(digest = "8Y9r3XAwXwmrVGMCyTuy4Kbdotnt1V6N8J6NEniBFD9x")] +#[frozen_abi(digest = "GrkFcKqGEkJNUYoK1M8rorehi2yyLF4N3Gsj6j8f47Jn")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower { pub node_pubkey: Pubkey, @@ -337,7 +337,7 @@ impl Tower { ); } - vote_state.process_slot_vote_unchecked(bank_slot); + process_slot_vote_unchecked(&mut vote_state, bank_slot); for vote in &vote_state.votes { bank_weight += vote.lockout() as u128 * voted_stake as u128; @@ -438,7 +438,7 @@ impl Tower { last_voted_slot_in_bank: Option, ) -> VoteTransaction { let vote = Vote::new(vec![slot], hash); - local_vote_state.process_vote_unchecked(vote); + process_vote_unchecked(local_vote_state, vote); let slots = if let Some(last_voted_slot) = last_voted_slot_in_bank { local_vote_state .votes @@ -483,7 +483,7 @@ impl Tower { let mut new_vote = if is_direct_vote_state_update_enabled { let vote = Vote::new(vec![vote_slot], vote_hash); - self.vote_state.process_vote_unchecked(vote); + process_vote_unchecked(&mut self.vote_state, vote); VoteTransaction::from(VoteStateUpdate::new( self.vote_state.votes.clone(), self.vote_state.root_slot, @@ -608,7 +608,7 @@ impl Tower { // remaining voted slots are on a different fork from the checked slot, // it's still locked out. let mut vote_state = self.vote_state.clone(); - vote_state.process_slot_vote_unchecked(slot); + process_slot_vote_unchecked(&mut vote_state, slot); for vote in &vote_state.votes { if slot != vote.slot && !ancestors.contains(&vote.slot) { return true; @@ -980,7 +980,7 @@ impl Tower { total_stake: Stake, ) -> bool { let mut vote_state = self.vote_state.clone(); - vote_state.process_slot_vote_unchecked(slot); + process_slot_vote_unchecked(&mut vote_state, slot); let vote = vote_state.nth_recent_vote(self.threshold_depth); if let Some(vote) = vote { if let Some(fork_stake) = voted_stakes.get(&vote.slot) { @@ -1432,7 +1432,7 @@ pub mod test { signature::Signer, slot_history::SlotHistory, }, - solana_vote_program::vote_state::{Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY}, + solana_vote_program::vote_state::{self, Vote, VoteStateVersions, MAX_LOCKOUT_HISTORY}, std::{ collections::{HashMap, VecDeque}, fs::{remove_file, OpenOptions}, @@ -1456,7 +1456,7 @@ pub mod test { }); let mut vote_state = VoteState::default(); for slot in *votes { - vote_state.process_slot_vote_unchecked(*slot); + process_slot_vote_unchecked(&mut vote_state, *slot); } VoteState::serialize( &VoteStateVersions::new_current(vote_state), @@ -2409,7 +2409,7 @@ pub mod test { hash: Hash::default(), timestamp: None, }; - local.process_vote_unchecked(vote); + vote_state::process_vote_unchecked(&mut local, vote); assert_eq!(local.votes.len(), 1); let vote = Tower::apply_vote_and_generate_vote_diff(&mut local, 1, Hash::default(), Some(0)); @@ -2425,7 +2425,7 @@ pub mod test { hash: Hash::default(), timestamp: None, }; - local.process_vote_unchecked(vote); + vote_state::process_vote_unchecked(&mut local, vote); assert_eq!(local.votes.len(), 1); // First vote expired, so should be evicted from tower. Thus even with diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index cf1a55365f..8f5038c0c9 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -75,7 +75,7 @@ impl CostUpdateService { cost_update_receiver: CostUpdateReceiver, ) -> Self { let thread_hdl = Builder::new() - .name("solana-cost-update-service".to_string()) + .name("solCostUpdtSvc".to_string()) .spawn(move || { Self::service_loop(blockstore, cost_model, cost_update_receiver); }) diff --git a/core/src/drop_bank_service.rs b/core/src/drop_bank_service.rs index aac1a02ee0..0321643d6a 100644 --- a/core/src/drop_bank_service.rs +++ b/core/src/drop_bank_service.rs @@ -15,7 +15,7 @@ pub struct DropBankService { impl DropBankService { pub fn new(bank_receiver: Receiver>>) -> Self { let thread_hdl = Builder::new() - .name("sol-drop-b-service".to_string()) + .name("solDropBankSrvc".to_string()) .spawn(move || { for banks in bank_receiver.iter() { let len = banks.len(); diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index c041739d7c..93d8ed28ac 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -208,7 +208,7 @@ impl FetchStage { let poh_recorder = poh_recorder.clone(); let fwd_thread_hdl = Builder::new() - .name("solana-fetch-stage-fwd-rcvr".to_string()) + .name("solFetchStgFwRx".to_string()) .spawn(move || loop { if let Err(e) = Self::handle_forwarded_packets(&forward_receiver, &sender, &poh_recorder) @@ -226,7 +226,7 @@ impl FetchStage { let exit = exit.clone(); let metrics_thread_hdl = Builder::new() - .name("solana-fetch-stage-metrics".to_string()) + .name("solFetchStgMetr".to_string()) .spawn(move || loop { sleep(Duration::from_secs(1)); diff --git a/core/src/find_packet_sender_stake_stage.rs b/core/src/find_packet_sender_stake_stage.rs index d62d6afe7c..53f1d03366 100644 --- a/core/src/find_packet_sender_stake_stage.rs +++ b/core/src/find_packet_sender_stake_stage.rs @@ -84,7 +84,7 @@ impl FindPacketSenderStakeStage { ) -> Self { let mut stats = FindPacketSenderStakeStats::default(); let thread_hdl = Builder::new() - .name("find-packet-sender-stake".to_string()) + .name("solPktStake".to_string()) .spawn(move || loop { match streamer::recv_packet_batches(&packet_receiver) { Ok((mut batches, num_packets, recv_duration)) => { diff --git a/core/src/forward_packet_batches_by_accounts.rs b/core/src/forward_packet_batches_by_accounts.rs index 14fcfe486f..ccd367349a 100644 --- a/core/src/forward_packet_batches_by_accounts.rs +++ b/core/src/forward_packet_batches_by_accounts.rs @@ -1,5 +1,7 @@ use { - crate::unprocessed_packet_batches::{self, ImmutableDeserializedPacket}, + crate::{ + immutable_deserialized_packet::ImmutableDeserializedPacket, unprocessed_packet_batches, + }, solana_perf::packet::Packet, solana_runtime::{ bank::Bank, diff --git a/core/src/immutable_deserialized_packet.rs b/core/src/immutable_deserialized_packet.rs new file mode 100644 index 0000000000..a54f64916c --- /dev/null +++ b/core/src/immutable_deserialized_packet.rs @@ -0,0 +1,137 @@ +use { + crate::transaction_priority_details::{ + GetTransactionPriorityDetails, TransactionPriorityDetails, + }, + solana_perf::packet::Packet, + solana_sdk::{ + hash::Hash, + message::Message, + sanitize::SanitizeError, + short_vec::decode_shortu16_len, + signature::Signature, + transaction::{SanitizedVersionedTransaction, VersionedTransaction}, + }, + std::{cmp::Ordering, mem::size_of}, + thiserror::Error, +}; + +#[derive(Debug, Error)] +pub enum DeserializedPacketError { + #[error("ShortVec Failed to Deserialize")] + // short_vec::decode_shortu16_len() currently returns () on error + ShortVecError(()), + #[error("Deserialization Error: {0}")] + DeserializationError(#[from] bincode::Error), + #[error("overflowed on signature size {0}")] + SignatureOverflowed(usize), + #[error("packet failed sanitization {0}")] + SanitizeError(#[from] SanitizeError), + #[error("transaction failed prioritization")] + PrioritizationFailure, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ImmutableDeserializedPacket { + original_packet: Packet, + transaction: SanitizedVersionedTransaction, + message_hash: Hash, + is_simple_vote: bool, + priority_details: TransactionPriorityDetails, +} + +impl ImmutableDeserializedPacket { + pub fn new( + packet: Packet, + priority_details: Option, + ) -> Result { + let versioned_transaction: VersionedTransaction = packet.deserialize_slice(..)?; + let sanitized_transaction = SanitizedVersionedTransaction::try_from(versioned_transaction)?; + let message_bytes = packet_message(&packet)?; + let message_hash = Message::hash_raw_message(message_bytes); + let is_simple_vote = packet.meta.is_simple_vote_tx(); + + // drop transaction if prioritization fails. + let priority_details = priority_details + .or_else(|| sanitized_transaction.get_transaction_priority_details()) + .ok_or(DeserializedPacketError::PrioritizationFailure)?; + + Ok(Self { + original_packet: packet, + transaction: sanitized_transaction, + message_hash, + is_simple_vote, + priority_details, + }) + } + + pub fn original_packet(&self) -> &Packet { + &self.original_packet + } + + pub fn transaction(&self) -> &SanitizedVersionedTransaction { + &self.transaction + } + + pub fn message_hash(&self) -> &Hash { + &self.message_hash + } + + pub fn is_simple_vote(&self) -> bool { + self.is_simple_vote + } + + pub fn priority(&self) -> u64 { + self.priority_details.priority + } + + pub fn compute_unit_limit(&self) -> u64 { + self.priority_details.compute_unit_limit + } +} + +impl PartialOrd for ImmutableDeserializedPacket { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ImmutableDeserializedPacket { + fn cmp(&self, other: &Self) -> Ordering { + self.priority().cmp(&other.priority()) + } +} + +/// Read the transaction message from packet data +fn packet_message(packet: &Packet) -> Result<&[u8], DeserializedPacketError> { + let (sig_len, sig_size) = packet + .data(..) + .and_then(|bytes| decode_shortu16_len(bytes).ok()) + .ok_or(DeserializedPacketError::ShortVecError(()))?; + sig_len + .checked_mul(size_of::()) + .and_then(|v| v.checked_add(sig_size)) + .and_then(|msg_start| packet.data(msg_start..)) + .ok_or(DeserializedPacketError::SignatureOverflowed(sig_size)) +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_sdk::{signature::Keypair, system_transaction}, + }; + + #[test] + fn simple_deserialized_packet() { + let tx = system_transaction::transfer( + &Keypair::new(), + &solana_sdk::pubkey::new_rand(), + 1, + Hash::new_unique(), + ); + let packet = Packet::from_data(None, &tx).unwrap(); + let deserialized_packet = ImmutableDeserializedPacket::new(packet, None); + + assert!(matches!(deserialized_packet, Ok(_))); + } +} diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 5c006c8261..160b8721f4 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -76,7 +76,7 @@ impl LedgerCleanupService { let blockstore_compact = blockstore.clone(); let t_cleanup = Builder::new() - .name("sol-led-cleanup".to_string()) + .name("solLedgerClean".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; @@ -98,7 +98,7 @@ impl LedgerCleanupService { .unwrap(); let t_compact = Builder::new() - .name("sol-led-compact".to_string()) + .name("solLedgerComp".to_string()) .spawn(move || loop { if exit_compact.load(Ordering::Relaxed) { break; @@ -238,7 +238,7 @@ impl LedgerCleanupService { let purge_complete1 = purge_complete.clone(); let last_compact_slot1 = last_compact_slot.clone(); let _t_purge = Builder::new() - .name("solana-ledger-purge".to_string()) + .name("solLedgerPurge".to_string()) .spawn(move || { let mut slot_update_time = Measure::start("slot_update"); *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; diff --git a/core/src/ledger_metric_report_service.rs b/core/src/ledger_metric_report_service.rs index 8d0b96d28c..1f8636bff6 100644 --- a/core/src/ledger_metric_report_service.rs +++ b/core/src/ledger_metric_report_service.rs @@ -26,7 +26,7 @@ impl LedgerMetricReportService { pub fn new(blockstore: Arc, exit: &Arc) -> Self { let exit_signal = exit.clone(); let t_cf_metric = Builder::new() - .name("metric_report_rocksdb_cf_metrics".to_string()) + .name("solRocksCfMtrcs".to_string()) .spawn(move || loop { if exit_signal.load(Ordering::Relaxed) { break; diff --git a/core/src/lib.rs b/core/src/lib.rs index 869459db97..24cfe0ebaf 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -34,6 +34,7 @@ pub mod fork_choice; pub mod forward_packet_batches_by_accounts; pub mod gen_keys; pub mod heaviest_subtree_fork_choice; +pub mod immutable_deserialized_packet; pub mod latest_validator_votes_for_frozen_banks; pub mod leader_slot_banking_stage_metrics; pub mod leader_slot_banking_stage_timing_metrics; diff --git a/core/src/poh_timing_report_service.rs b/core/src/poh_timing_report_service.rs index 175b3cdc83..bc84176525 100644 --- a/core/src/poh_timing_report_service.rs +++ b/core/src/poh_timing_report_service.rs @@ -28,7 +28,7 @@ impl PohTimingReportService { let exit_signal = exit.clone(); let mut poh_timing_reporter = PohTimingReporter::default(); let t_poh_timing = Builder::new() - .name("poh_timing_report".to_string()) + .name("solPohTimingRpt".to_string()) .spawn(move || loop { if exit_signal.load(Ordering::Relaxed) { break; diff --git a/core/src/qos_service.rs b/core/src/qos_service.rs index e9b67591ed..7affbc271c 100644 --- a/core/src/qos_service.rs +++ b/core/src/qos_service.rs @@ -72,7 +72,7 @@ impl QosService { let metrics_clone = Arc::clone(&metrics); let reporting_thread = Some( Builder::new() - .name("solana-qos-service-metrics-repoting".to_string()) + .name("solQosSvcMetr".to_string()) .spawn(move || { Self::reporting_loop(running_flag_clone, metrics_clone, report_receiver); }) diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 018824c793..2b8bda8705 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -212,7 +212,7 @@ impl RepairService { let exit = exit.clone(); let repair_info = repair_info.clone(); Builder::new() - .name("solana-repair-service".to_string()) + .name("solRepairSvc".to_string()) .spawn(move || { Self::run( &blockstore, diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 85fe23137c..0c94120c86 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -97,7 +97,7 @@ const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4; lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) - .thread_name(|ix| format!("replay_{}", ix)) + .thread_name(|ix| format!("solReplay{:02}", ix)) .build() .unwrap(); } @@ -397,9 +397,9 @@ impl ReplayStage { drop_bank_sender: Sender>>, block_metadata_notifier: Option, log_messages_bytes_limit: Option, - ) -> Self { + ) -> Result { let mut tower = if let Some(process_blockstore) = maybe_process_blockstore { - let tower = process_blockstore.process_to_create_tower(); + let tower = process_blockstore.process_to_create_tower()?; info!("Tower state: {:?}", tower); tower } else { @@ -436,7 +436,7 @@ impl ReplayStage { #[allow(clippy::cognitive_complexity)] let t_replay = Builder::new() - .name("solana-replay-stage".to_string()) + .name("solReplayStage".to_string()) .spawn(move || { let verify_recyclers = VerifyRecyclers::default(); let _exit = Finalizer::new(exit.clone()); @@ -940,10 +940,10 @@ impl ReplayStage { }) .unwrap(); - Self { + Ok(Self { t_replay, commitment_service, - } + }) } fn check_for_vote_only_mode( @@ -3529,7 +3529,7 @@ pub(crate) mod tests { solana_streamer::socket::SocketAddrSpace, solana_transaction_status::VersionedTransactionWithStatusMeta, solana_vote_program::{ - vote_state::{VoteState, VoteStateVersions}, + vote_state::{self, VoteStateVersions}, vote_transaction, }, std::{ @@ -4220,10 +4220,10 @@ pub(crate) mod tests { fn test_replay_commitment_cache() { fn leader_vote(vote_slot: Slot, bank: &Arc, pubkey: &Pubkey) { let mut leader_vote_account = bank.get_account(pubkey).unwrap(); - let mut vote_state = VoteState::from(&leader_vote_account).unwrap(); - vote_state.process_slot_vote_unchecked(vote_slot); + let mut vote_state = vote_state::from(&leader_vote_account).unwrap(); + vote_state::process_slot_vote_unchecked(&mut vote_state, vote_slot); let versioned = VoteStateVersions::new_current(vote_state); - VoteState::to(&versioned, &mut leader_vote_account).unwrap(); + vote_state::to(&versioned, &mut leader_vote_account).unwrap(); bank.store_account(pubkey, &leader_vote_account); } diff --git a/core/src/result.rs b/core/src/result.rs index 6c9b66b6d4..2aa8f8718f 100644 --- a/core/src/result.rs +++ b/core/src/result.rs @@ -3,53 +3,42 @@ use { solana_gossip::{cluster_info, gossip_error::GossipError}, solana_ledger::blockstore, + thiserror::Error, }; -#[derive(Debug)] +#[derive(Debug, Error)] pub enum Error { - Io(std::io::Error), - Recv(crossbeam_channel::RecvError), + #[error(transparent)] + Blockstore(#[from] blockstore::BlockstoreError), + #[error(transparent)] + ClusterInfo(#[from] cluster_info::ClusterInfoError), + #[error(transparent)] + Gossip(#[from] GossipError), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("ReadyTimeout")] ReadyTimeout, - RecvTimeout(crossbeam_channel::RecvTimeoutError), - TrySend, - Serialize(std::boxed::Box), - ClusterInfo(cluster_info::ClusterInfoError), + #[error(transparent)] + Recv(#[from] crossbeam_channel::RecvError), + #[error(transparent)] + RecvTimeout(#[from] crossbeam_channel::RecvTimeoutError), + #[error("Send")] Send, - Blockstore(blockstore::BlockstoreError), - WeightedIndex(rand::distributions::weighted::WeightedError), - Gossip(GossipError), + #[error("TrySend")] + TrySend, + #[error(transparent)] + Serialize(#[from] std::boxed::Box), + #[error(transparent)] + WeightedIndex(#[from] rand::distributions::weighted::WeightedError), } pub type Result = std::result::Result; -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "solana error") - } -} - -impl std::error::Error for Error {} - -impl std::convert::From for Error { - fn from(e: crossbeam_channel::RecvError) -> Error { - Error::Recv(e) - } -} impl std::convert::From for Error { fn from(_e: crossbeam_channel::ReadyTimeoutError) -> Error { Error::ReadyTimeout } } -impl std::convert::From for Error { - fn from(e: crossbeam_channel::RecvTimeoutError) -> Error { - Error::RecvTimeout(e) - } -} -impl std::convert::From for Error { - fn from(e: cluster_info::ClusterInfoError) -> Error { - Error::ClusterInfo(e) - } -} impl std::convert::From> for Error { fn from(_e: crossbeam_channel::TrySendError) -> Error { Error::TrySend @@ -60,31 +49,6 @@ impl std::convert::From> for Error { Error::Send } } -impl std::convert::From for Error { - fn from(e: std::io::Error) -> Error { - Error::Io(e) - } -} -impl std::convert::From> for Error { - fn from(e: std::boxed::Box) -> Error { - Error::Serialize(e) - } -} -impl std::convert::From for Error { - fn from(e: blockstore::BlockstoreError) -> Error { - Error::Blockstore(e) - } -} -impl std::convert::From for Error { - fn from(e: rand::distributions::weighted::WeightedError) -> Error { - Error::WeightedIndex(e) - } -} -impl std::convert::From for Error { - fn from(e: GossipError) -> Error { - Error::Gossip(e) - } -} #[cfg(test)] mod tests { diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index 87d98129da..329eca86e2 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -305,13 +305,13 @@ fn retransmit_shred( shred_receiver_addr: Option, ) -> (/*root_distance:*/ usize, /*num_nodes:*/ usize) { let mut compute_turbine_peers = Measure::start("turbine_start"); - let (root_distance, addrs) = cluster_nodes.maybe_extend_retransmit_addrs( - slot_leader, - key, - root_bank, - DATA_PLANE_FANOUT, - shred_receiver_addr, - ); + let (root_distance, mut addrs) = + cluster_nodes.get_retransmit_addrs(slot_leader, key, root_bank, DATA_PLANE_FANOUT); + + if let Some(addr) = shred_receiver_addr { + addrs.push(addr); + } + let addrs: Vec<_> = addrs .into_iter() .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) @@ -374,11 +374,11 @@ pub fn retransmitter( let num_threads = get_thread_count().min(8).max(sockets.len()); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads) - .thread_name(|i| format!("retransmit-{}", i)) + .thread_name(|i| format!("solRetransmit{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-retransmitter".to_string()) + .name("solRetransmittr".to_string()) .spawn(move || loop { match retransmit( &thread_pool, diff --git a/core/src/rewards_recorder_service.rs b/core/src/rewards_recorder_service.rs index 8988441d22..10dd8ea9cd 100644 --- a/core/src/rewards_recorder_service.rs +++ b/core/src/rewards_recorder_service.rs @@ -30,7 +30,7 @@ impl RewardsRecorderService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-rewards-writer".to_string()) + .name("solRewardsWritr".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index a9cc6cd0cb..0b7d63a7dd 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -32,7 +32,7 @@ use { solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ clock::Slot, - feature_set::sign_repair_requests, + feature_set::{check_ping_ancestor_requests, sign_repair_requests}, hash::{Hash, HASH_BYTES}, packet::PACKET_DATA_SIZE, pubkey::{Pubkey, PUBKEY_BYTES}, @@ -43,7 +43,6 @@ use { }, solana_streamer::{ sendmmsg::{batch_send, SendPktsError}, - socket::SocketAddrSpace, streamer::{PacketBatchReceiver, PacketBatchSender}, }, std::{ @@ -131,36 +130,21 @@ impl AncestorHashesRepairType { } #[derive(Debug, Serialize, Deserialize)] -pub enum AncestorHashesResponseVersion { - Current(Vec), -} -impl AncestorHashesResponseVersion { - pub fn into_slot_hashes(self) -> Vec { - match self { - AncestorHashesResponseVersion::Current(slot_hashes) => slot_hashes, - } - } - - pub fn slot_hashes(&self) -> &[SlotHash] { - match self { - AncestorHashesResponseVersion::Current(slot_hashes) => slot_hashes, - } - } - - fn max_ancestors_in_response(&self) -> usize { - match self { - AncestorHashesResponseVersion::Current(_) => MAX_ANCESTOR_RESPONSES, - } - } +pub enum AncestorHashesResponse { + Hashes(Vec), + Ping(Ping), } impl RequestResponse for AncestorHashesRepairType { - type Response = AncestorHashesResponseVersion; + type Response = AncestorHashesResponse; fn num_expected_responses(&self) -> u32 { 1 } - fn verify_response(&self, response: &AncestorHashesResponseVersion) -> bool { - response.slot_hashes().len() <= response.max_ancestors_in_response() + fn verify_response(&self, response: &AncestorHashesResponse) -> bool { + match response { + AncestorHashesResponse::Hashes(hashes) => hashes.len() <= MAX_ANCESTOR_RESPONSES, + AncestorHashesResponse::Ping(ping) => ping.verify(), + } } } @@ -241,7 +225,7 @@ pub enum RepairProtocol { } #[derive(Serialize, Deserialize, Debug)] -enum RepairResponse { +pub(crate) enum RepairResponse { Ping(Ping), } @@ -279,23 +263,6 @@ impl RepairProtocol { | Self::AncestorHashes { .. } => true, } } - - fn requires_ping_check(&self) -> bool { - match self { - Self::LegacyWindowIndex(_, _, _) - | Self::LegacyHighestWindowIndex(_, _, _) - | Self::LegacyOrphan(_, _) - | Self::LegacyWindowIndexWithNonce(_, _, _, _) - | Self::LegacyHighestWindowIndexWithNonce(_, _, _, _) - | Self::LegacyOrphanWithNonce(_, _, _) - | Self::LegacyAncestorHashes(_, _, _) - | Self::Pong(_) - | Self::AncestorHashes { .. } => false, - Self::WindowIndex { .. } | Self::HighestWindowIndex { .. } | Self::Orphan { .. } => { - true - } - } - } } #[derive(Clone)] @@ -475,6 +442,24 @@ impl ServeRepair { } } + fn check_ping_ancestor_requests_activated_epoch(root_bank: &Bank) -> Option { + root_bank + .feature_set + .activated_slot(&check_ping_ancestor_requests::id()) + .map(|slot| root_bank.epoch_schedule().get_epoch(slot)) + } + + fn should_check_ping_ancestor_request( + slot: Slot, + root_bank: &Bank, + check_ping_ancestor_request_epoch: Option, + ) -> bool { + match check_ping_ancestor_request_epoch { + None => false, + Some(feature_epoch) => feature_epoch < root_bank.epoch_schedule().get_epoch(slot), + } + } + /// Process messages from the network fn run_listen( &self, @@ -582,7 +567,7 @@ impl ServeRepair { let recycler = PacketBatchRecycler::default(); Builder::new() - .name("solana-repair-listen".to_string()) + .name("solRepairListen".to_string()) .spawn(move || { let mut last_print = Instant::now(); let mut stats = ServeRepairStats::default(); @@ -682,26 +667,11 @@ impl ServeRepair { request: &RepairProtocol, from_addr: &SocketAddr, identity_keypair: &Keypair, - socket_addr_space: &SocketAddrSpace, ping_cache: &mut PingCache, - pending_pings: &mut Vec<(SocketAddr, Ping)>, - stats: &mut ServeRepairStats, - ) -> bool { - if !ContactInfo::is_valid_address(from_addr, socket_addr_space) { - stats.err_malformed += 1; - return false; - } + ) -> (bool, Option) { let mut rng = rand::thread_rng(); let mut pingf = move || Ping::new_rand(&mut rng, identity_keypair).ok(); - let (check, ping) = - ping_cache.check(Instant::now(), (*request.sender(), *from_addr), &mut pingf); - if let Some(ping) = ping { - pending_pings.push((*from_addr, ping)); - } - if !check { - stats.pings_required += 1; - } - check + ping_cache.check(Instant::now(), (*request.sender(), *from_addr), &mut pingf) } fn requires_signature_check( @@ -727,6 +697,44 @@ impl ServeRepair { } } + fn ping_to_packet_mapper_by_request_variant( + request: &RepairProtocol, + dest_addr: SocketAddr, + root_bank: &Bank, + check_ping_ancestor_request_epoch: Option, + ) -> Option Option>> { + match request { + RepairProtocol::LegacyWindowIndex(_, _, _) + | RepairProtocol::LegacyHighestWindowIndex(_, _, _) + | RepairProtocol::LegacyOrphan(_, _) + | RepairProtocol::LegacyWindowIndexWithNonce(_, _, _, _) + | RepairProtocol::LegacyHighestWindowIndexWithNonce(_, _, _, _) + | RepairProtocol::LegacyOrphanWithNonce(_, _, _) + | RepairProtocol::LegacyAncestorHashes(_, _, _) + | RepairProtocol::Pong(_) => None, + RepairProtocol::WindowIndex { .. } + | RepairProtocol::HighestWindowIndex { .. } + | RepairProtocol::Orphan { .. } => Some(Box::new(move |ping| { + let ping = RepairResponse::Ping(ping); + Packet::from_data(Some(&dest_addr), ping).ok() + })), + RepairProtocol::AncestorHashes { slot, .. } => { + if Self::should_check_ping_ancestor_request( + *slot, + root_bank, + check_ping_ancestor_request_epoch, + ) { + Some(Box::new(move |ping| { + let ping = AncestorHashesResponse::Ping(ping); + Packet::from_data(Some(&dest_addr), ping).ok() + })) + } else { + None + } + } + } + } + fn handle_packets( &self, ping_cache: &mut PingCache, @@ -739,6 +747,8 @@ impl ServeRepair { data_budget: &DataBudget, ) { let sign_repairs_epoch = Self::sign_repair_requests_activated_epoch(root_bank); + let check_ping_ancestor_request_epoch = + Self::check_ping_ancestor_requests_activated_epoch(root_bank); let identity_keypair = self.cluster_info.keypair().clone(); let socket_addr_space = *self.cluster_info.socket_addr_space(); let my_id = identity_keypair.pubkey(); @@ -772,18 +782,27 @@ impl ServeRepair { } let from_addr = packet.meta.socket_addr(); - if request.requires_ping_check() - && !Self::check_ping_cache( - &request, - &from_addr, - &identity_keypair, - &socket_addr_space, - ping_cache, - &mut pending_pings, - stats, - ) - { - continue; + if let Some(ping_to_pkt) = Self::ping_to_packet_mapper_by_request_variant( + &request, + from_addr, + root_bank, + check_ping_ancestor_request_epoch, + ) { + if !ContactInfo::is_valid_address(&from_addr, &socket_addr_space) { + stats.err_malformed += 1; + continue; + } + let (check, ping) = + Self::check_ping_cache(&request, &from_addr, &identity_keypair, ping_cache); + if let Some(ping) = ping { + if let Some(pkt) = ping_to_pkt(ping) { + pending_pings.push(pkt); + } + } + if !check { + stats.pings_required += 1; + continue; + } } stats.processed += 1; @@ -806,15 +825,8 @@ impl ServeRepair { } if !pending_pings.is_empty() { - let packets: Vec<_> = pending_pings - .into_iter() - .filter_map(|(sockaddr, ping)| { - let ping = RepairResponse::Ping(ping); - Packet::from_data(Some(&sockaddr), ping).ok() - }) - .collect(); - let batch = PacketBatch::new(packets); - let _ = response_sender.send(batch); + let batch = PacketBatch::new(pending_pings); + let _ignore = response_sender.send(batch); } } @@ -1032,11 +1044,16 @@ impl ServeRepair { } packet.meta.set_discard(true); stats.ping_count += 1; - if let Ok(pong) = Pong::new(&ping, keypair) { - let pong = RepairProtocol::Pong(pong); - if let Ok(pong_bytes) = serialize(&pong) { - let from_addr = packet.meta.socket_addr(); - pending_pongs.push((pong_bytes, from_addr)); + // Respond both with and without domain so that the other node + // will accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, keypair) { + let pong = RepairProtocol::Pong(pong); + if let Ok(pong_bytes) = serialize(&pong) { + let from_addr = packet.meta.socket_addr(); + pending_pongs.push((pong_bytes, from_addr)); + } } } } @@ -1198,7 +1215,7 @@ impl ServeRepair { // If this slot is not duplicate confirmed, return nothing vec![] }; - let response = AncestorHashesResponseVersion::Current(ancestor_slot_hashes); + let response = AncestorHashesResponse::Hashes(ancestor_slot_hashes); let serialized_response = serialize(&response).ok()?; // Could probably directly write response into packet via `serialize_into()` @@ -1961,7 +1978,7 @@ mod tests { #[test] fn test_run_ancestor_hashes() { - fn deserialize_ancestor_hashes_response(packet: &Packet) -> AncestorHashesResponseVersion { + fn deserialize_ancestor_hashes_response(packet: &Packet) -> AncestorHashesResponse { packet .deserialize_slice(..packet.meta.size - SIZE_OF_NONCE) .unwrap() @@ -1996,7 +2013,14 @@ mod tests { assert_eq!(rv.len(), 1); let packet = &rv[0]; let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); - assert!(ancestor_hashes_response.into_slot_hashes().is_empty()); + match ancestor_hashes_response { + AncestorHashesResponse::Hashes(hashes) => { + assert!(hashes.is_empty()); + } + _ => { + panic!("unexpected response: {:?}", &ancestor_hashes_response); + } + } // `slot + num_slots - 1` is not marked duplicate confirmed so nothing should return // empty @@ -2011,7 +2035,14 @@ mod tests { assert_eq!(rv.len(), 1); let packet = &rv[0]; let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); - assert!(ancestor_hashes_response.into_slot_hashes().is_empty()); + match ancestor_hashes_response { + AncestorHashesResponse::Hashes(hashes) => { + assert!(hashes.is_empty()); + } + _ => { + panic!("unexpected response: {:?}", &ancestor_hashes_response); + } + } // Set duplicate confirmed let mut expected_ancestors = Vec::with_capacity(num_slots as usize); @@ -2033,10 +2064,14 @@ mod tests { assert_eq!(rv.len(), 1); let packet = &rv[0]; let ancestor_hashes_response = deserialize_ancestor_hashes_response(packet); - assert_eq!( - ancestor_hashes_response.into_slot_hashes(), - expected_ancestors - ); + match ancestor_hashes_response { + AncestorHashesResponse::Hashes(hashes) => { + assert_eq!(hashes, expected_ancestors); + } + _ => { + panic!("unexpected response: {:?}", &ancestor_hashes_response); + } + } } Blockstore::destroy(&ledger_path).expect("Expected successful database destruction"); @@ -2184,10 +2219,10 @@ mod tests { .into_iter() .map(|slot| (slot, Hash::new_unique())) .collect(); - assert!(repair.verify_response(&AncestorHashesResponseVersion::Current(response.clone()))); + assert!(repair.verify_response(&AncestorHashesResponse::Hashes(response.clone()))); // over the allowed limit, should fail response.push((request_slot, Hash::new_unique())); - assert!(!repair.verify_response(&AncestorHashesResponseVersion::Current(response))); + assert!(!repair.verify_response(&AncestorHashesResponse::Hashes(response))); } } diff --git a/core/src/serve_repair_service.rs b/core/src/serve_repair_service.rs index 72dc7a49e6..144de5c2a9 100644 --- a/core/src/serve_repair_service.rs +++ b/core/src/serve_repair_service.rs @@ -46,7 +46,7 @@ impl ServeRepairService { ); let (response_sender, response_receiver) = unbounded(); let t_responder = streamer::responder( - "serve-repairs", + "Repair", serve_repair_socket, response_receiver, socket_addr_space, diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index ae604e766f..25c9b00cdf 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -135,7 +135,7 @@ impl ShredFetchStage { }) .collect(); let modifier_hdl = Builder::new() - .name("solana-tvu-fetch-stage-packet-modifier".to_string()) + .name("solTvuFetchPMod".to_string()) .spawn(move || { let repair_context = repair_context .as_ref() @@ -293,8 +293,7 @@ mod tests { )); let coding = solana_ledger::shred::Shredder::generate_coding_shreds( &[shred], - false, // is_last_in_slot - 3, // next_code_index + 3, // next_code_index ); coding[0].copy_to_packet(&mut packet); assert!(!should_discard_packet( diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index f9a50ab8b2..0e5fa278cd 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -37,7 +37,7 @@ pub(crate) fn spawn_shred_sigverify( let recycler_cache = RecyclerCache::warmed(); let mut stats = ShredSigVerifyStats::new(Instant::now()); Builder::new() - .name("shred-verifier".to_string()) + .name("solShredVerifr".to_string()) .spawn(move || loop { match run_shred_sigverify( &self_pubkey, @@ -151,7 +151,7 @@ fn get_slot_leaders( let leader = leaders.entry(slot).or_insert_with(|| { let leader = leader_schedule_cache.slot_leader_at(slot, Some(bank))?; // Discard the shred if the slot leader is the node itself. - (&leader != self_pubkey).then(|| leader) + (&leader != self_pubkey).then_some(leader) }); if leader.is_none() { packet.meta.set_discard(true); diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index 75c863e9f1..30174dc986 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -412,7 +412,7 @@ impl SigVerifyStage { const MAX_DEDUPER_AGE: Duration = Duration::from_secs(2); const MAX_DEDUPER_ITEMS: u32 = 1_000_000; Builder::new() - .name("solana-verifier".to_string()) + .name("solSigVerifier".to_string()) .spawn(move || { let mut deduper = Deduper::new(MAX_DEDUPER_ITEMS, MAX_DEDUPER_AGE); loop { diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index 7077362e4b..3234dcbeea 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -49,7 +49,7 @@ impl SnapshotPackagerService { ); let t_snapshot_packager = Builder::new() - .name("snapshot-packager".to_string()) + .name("solSnapshotPkgr".to_string()) .spawn(move || { renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap(); let mut snapshot_gossip_manager = if enable_gossip_push { diff --git a/core/src/staked_nodes_updater_service.rs b/core/src/staked_nodes_updater_service.rs index a5bd351859..73b3c6ec8c 100644 --- a/core/src/staked_nodes_updater_service.rs +++ b/core/src/staked_nodes_updater_service.rs @@ -8,7 +8,7 @@ use { net::IpAddr, sync::{ atomic::{AtomicBool, Ordering}, - Arc, RwLock, + Arc, RwLock, RwLockReadGuard, }, thread::{self, sleep, Builder, JoinHandle}, time::{Duration, Instant}, @@ -27,22 +27,29 @@ impl StakedNodesUpdaterService { cluster_info: Arc, bank_forks: Arc>, shared_staked_nodes: Arc>, + shared_staked_nodes_overrides: Arc>>, ) -> Self { let thread_hdl = Builder::new() - .name("sol-sn-updater".to_string()) + .name("solStakedNodeUd".to_string()) .spawn(move || { let mut last_stakes = Instant::now(); while !exit.load(Ordering::Relaxed) { + let overrides = shared_staked_nodes_overrides.read().unwrap(); let mut new_ip_to_stake = HashMap::new(); let mut new_id_to_stake = HashMap::new(); let mut total_stake = 0; + let mut max_stake: u64 = 0; + let mut min_stake: u64 = u64::MAX; if Self::try_refresh_stake_maps( &mut last_stakes, &mut new_ip_to_stake, &mut new_id_to_stake, &mut total_stake, + &mut max_stake, + &mut min_stake, &bank_forks, &cluster_info, + &overrides, ) { let mut shared = shared_staked_nodes.write().unwrap(); shared.total_stake = total_stake; @@ -61,16 +68,22 @@ impl StakedNodesUpdaterService { ip_to_stake: &mut HashMap, id_to_stake: &mut HashMap, total_stake: &mut u64, + max_stake: &mut u64, + min_stake: &mut u64, bank_forks: &RwLock, cluster_info: &ClusterInfo, + overrides: &RwLockReadGuard>, ) -> bool { if last_stakes.elapsed() > IP_TO_STAKE_REFRESH_DURATION { let root_bank = bank_forks.read().unwrap().root_bank(); let staked_nodes = root_bank.staked_nodes(); - *total_stake = staked_nodes - .iter() - .map(|(_pubkey, stake)| stake) - .sum::(); + + for stake in staked_nodes.values() { + *total_stake += stake; + *max_stake = *stake.max(max_stake); + *min_stake = *stake.min(min_stake); + } + *id_to_stake = cluster_info .tvu_peers() .into_iter() @@ -87,6 +100,14 @@ impl StakedNodesUpdaterService { Some((node.tvu.ip(), *stake)) }) .collect(); + Self::override_stake( + cluster_info, + total_stake, + id_to_stake, + ip_to_stake, + overrides, + ); + *last_stakes = Instant::now(); true } else { @@ -95,6 +116,40 @@ impl StakedNodesUpdaterService { } } + fn override_stake( + cluster_info: &ClusterInfo, + total_stake: &mut u64, + id_to_stake_map: &mut HashMap, + ip_to_stake_map: &mut HashMap, + staked_map_overrides: &HashMap, + ) { + for (id_override, stake_override) in staked_map_overrides.iter() { + if let Some(ip_override) = + cluster_info + .all_peers() + .into_iter() + .find_map(|(node, _seen_time)| { + if node.id == *id_override { + return Some(node.tvu.ip()); + } + None + }) + { + if let Some(previous_stake) = id_to_stake_map.get(id_override) { + *total_stake -= previous_stake; + } + *total_stake += stake_override; + id_to_stake_map.insert(*id_override, *stake_override); + ip_to_stake_map.insert(ip_override, *stake_override); + } else { + error!( + "staked nodes overrides configuration for id {} with stake {} does not match existing IP. Skipping", + id_override, stake_override + ); + } + } + } + pub fn join(self) -> thread::Result<()> { self.thread_hdl.join() } diff --git a/core/src/stats_reporter_service.rs b/core/src/stats_reporter_service.rs index b6f23e4162..90e72aaadb 100644 --- a/core/src/stats_reporter_service.rs +++ b/core/src/stats_reporter_service.rs @@ -22,7 +22,7 @@ impl StatsReporterService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-stats-reporter".to_owned()) + .name("solStatsReport".to_owned()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { return; diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index 3b11d4b6a1..dc6146ed1a 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -363,7 +363,7 @@ impl SystemMonitorService { ) -> Self { info!("Starting SystemMonitorService"); let thread_hdl = Builder::new() - .name("system-monitor".to_string()) + .name("solSystemMonitr".to_string()) .spawn(move || { Self::run( exit, diff --git a/core/src/tower1_7_14.rs b/core/src/tower1_7_14.rs index 63b70cdf80..7fe7881e01 100644 --- a/core/src/tower1_7_14.rs +++ b/core/src/tower1_7_14.rs @@ -9,7 +9,7 @@ use { solana_vote_program::vote_state::{BlockTimestamp, Vote, VoteState}, }; -#[frozen_abi(digest = "7phMrqmBo2D3rXPdhBj8CpjRvvmx9qgpcU4cDGkL3W9q")] +#[frozen_abi(digest = "8EBpwHf9gys2irNgyRCEe6A5KSh4RK875Fa46yA2NSoN")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower1_7_14 { pub(crate) node_pubkey: Pubkey, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 33a90f4d90..760ce225b1 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -33,13 +33,13 @@ use { cost_model::CostModel, vote_sender_types::{ReplayVoteReceiver, ReplayVoteSender}, }, - solana_sdk::signature::Keypair, + solana_sdk::{pubkey::Pubkey, signature::Keypair}, solana_streamer::{ quic::{spawn_server, StreamStats, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, streamer::StakedNodes, }, std::{ - collections::HashSet, + collections::{HashMap, HashSet}, net::{SocketAddr, UdpSocket}, sync::{atomic::AtomicBool, Arc, RwLock}, thread, @@ -68,8 +68,8 @@ pub struct Tpu { banking_stage: BankingStage, cluster_info_vote_listener: ClusterInfoVoteListener, broadcast_stage: BroadcastStage, - tpu_quic_t: Option>, - tpu_forwards_quic_t: Option>, + tpu_quic_t: thread::JoinHandle<()>, + tpu_forwards_quic_t: thread::JoinHandle<()>, find_packet_sender_stake_stage: FindPacketSenderStakeStage, vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage, staked_nodes_updater_service: StakedNodesUpdaterService, @@ -103,11 +103,11 @@ impl Tpu { connection_cache: &Arc, keypair: &Keypair, log_messages_bytes_limit: Option, - enable_quic_servers: bool, staked_nodes: &Arc>, maybe_relayer_config: Option, tip_manager_config: TipManagerConfig, shred_receiver_address: Option, + shared_staked_nodes_overrides: Arc>>, ) -> Self { let TpuSockets { transactions: transactions_sockets, @@ -141,6 +141,7 @@ impl Tpu { cluster_info.clone(), bank_forks.clone(), staked_nodes.clone(), + shared_staked_nodes_overrides, ); let (find_packet_sender_stake_sender, find_packet_sender_stake_receiver) = unbounded(); @@ -149,7 +150,7 @@ impl Tpu { packet_receiver, find_packet_sender_stake_sender, staked_nodes.clone(), - "tpu-find-packet-sender-stake", + "Tpu", ); let (vote_find_packet_sender_stake_sender, vote_find_packet_sender_stake_receiver) = @@ -159,43 +160,39 @@ impl Tpu { vote_packet_receiver, vote_find_packet_sender_stake_sender, staked_nodes.clone(), - "tpu-vote-find-packet-sender-stake", + "Vote", ); let (verified_sender, verified_receiver) = unbounded(); let stats = Arc::new(StreamStats::default()); - let tpu_quic_t = enable_quic_servers.then(|| { - spawn_server( - transactions_quic_sockets, - keypair, - cluster_info.my_contact_info().tpu.ip(), - packet_sender.clone(), - exit.clone(), - MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes.clone(), - MAX_STAKED_CONNECTIONS, - MAX_UNSTAKED_CONNECTIONS, - stats.clone(), - ) - .unwrap() - }); + let tpu_quic_t = spawn_server( + transactions_quic_sockets, + keypair, + cluster_info.my_contact_info().tpu.ip(), + packet_sender.clone(), + exit.clone(), + MAX_QUIC_CONNECTIONS_PER_PEER, + staked_nodes.clone(), + MAX_STAKED_CONNECTIONS, + MAX_UNSTAKED_CONNECTIONS, + stats.clone(), + ) + .unwrap(); - let tpu_forwards_quic_t = enable_quic_servers.then(|| { - spawn_server( - transactions_forwards_quic_sockets, - keypair, - cluster_info.my_contact_info().tpu_forwards.ip(), - forwarded_packet_sender, - exit.clone(), - MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes.clone(), - MAX_STAKED_CONNECTIONS.saturating_add(MAX_UNSTAKED_CONNECTIONS), - 0, // Prevent unstaked nodes from forwarding transactions - stats, - ) - .unwrap() - }); + let tpu_forwards_quic_t = spawn_server( + transactions_forwards_quic_sockets, + keypair, + cluster_info.my_contact_info().tpu_forwards.ip(), + forwarded_packet_sender, + exit.clone(), + MAX_QUIC_CONNECTIONS_PER_PEER, + staked_nodes.clone(), + MAX_STAKED_CONNECTIONS.saturating_add(MAX_UNSTAKED_CONNECTIONS), + 0, // Prevent unstaked nodes from forwarding transactions + stats, + ) + .unwrap(); let sigverify_stage = { let verifier = TransactionSigVerifier::new(verified_sender.clone()); @@ -323,15 +320,11 @@ impl Tpu { self.find_packet_sender_stake_stage.join(), self.vote_find_packet_sender_stake_stage.join(), self.staked_nodes_updater_service.join(), + self.tpu_quic_t.join(), + self.tpu_forwards_quic_t.join(), self.bundle_stage.join(), ]; - if let Some(tpu_quic_t) = self.tpu_quic_t { - tpu_quic_t.join()?; - } - if let Some(tpu_forwards_quic_t) = self.tpu_forwards_quic_t { - tpu_forwards_quic_t.join()?; - } if let Some(relayer_stage) = self.maybe_relayer_stage { relayer_stage.join()?; } diff --git a/core/src/transaction_priority_details.rs b/core/src/transaction_priority_details.rs index 7f816cad2a..6f406d0edf 100644 --- a/core/src/transaction_priority_details.rs +++ b/core/src/transaction_priority_details.rs @@ -24,7 +24,6 @@ pub trait GetTransactionPriorityDetails { .process_instructions( instructions, true, // use default units per instruction - true, // don't reject txs that use set compute unit price ix ) .ok()?; Some(TransactionPriorityDetails { diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 6ebcb33208..6095447448 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -130,7 +130,7 @@ impl Tvu { log_messages_bytes_limit: Option, connection_cache: &Arc, shred_receiver_addr: Option, - ) -> Self { + ) -> Result { let TvuSockets { repair: repair_socket, fetch: fetch_sockets, @@ -290,7 +290,7 @@ impl Tvu { drop_bank_sender, block_metadata_notifier, log_messages_bytes_limit, - ); + )?; let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { LedgerCleanupService::new( @@ -303,7 +303,7 @@ impl Tvu { ) }); - Tvu { + Ok(Tvu { fetch_stage, shred_sigverify, retransmit_stage, @@ -315,7 +315,7 @@ impl Tvu { voting_service, warm_quic_cache_service, drop_bank_service, - } + }) } pub fn join(self) -> thread::Result<()> { @@ -453,7 +453,8 @@ pub mod tests { None, &Arc::new(ConnectionCache::default()), None, - ); + ) + .expect("assume success"); exit.store(true, Ordering::Relaxed); tvu.join().unwrap(); poh_service.join().unwrap(); diff --git a/core/src/unprocessed_packet_batches.rs b/core/src/unprocessed_packet_batches.rs index 3305e264b7..82ff8f092a 100644 --- a/core/src/unprocessed_packet_batches.rs +++ b/core/src/unprocessed_packet_batches.rs @@ -1,81 +1,23 @@ use { - crate::transaction_priority_details::{ - GetTransactionPriorityDetails, TransactionPriorityDetails, + crate::{ + immutable_deserialized_packet::{DeserializedPacketError, ImmutableDeserializedPacket}, + transaction_priority_details::TransactionPriorityDetails, }, min_max_heap::MinMaxHeap, solana_perf::packet::{Packet, PacketBatch}, solana_sdk::{ feature_set, hash::Hash, - message::Message, - sanitize::SanitizeError, - short_vec::decode_shortu16_len, - signature::Signature, - transaction::{ - AddressLoader, SanitizedTransaction, SanitizedVersionedTransaction, Transaction, - VersionedTransaction, - }, + transaction::{AddressLoader, SanitizedTransaction, Transaction}, }, std::{ cmp::Ordering, collections::{hash_map::Entry, HashMap}, - mem::size_of, rc::Rc, sync::Arc, }, - thiserror::Error, }; -#[derive(Debug, Error)] -pub enum DeserializedPacketError { - #[error("ShortVec Failed to Deserialize")] - // short_vec::decode_shortu16_len() currently returns () on error - ShortVecError(()), - #[error("Deserialization Error: {0}")] - DeserializationError(#[from] bincode::Error), - #[error("overflowed on signature size {0}")] - SignatureOverflowed(usize), - #[error("packet failed sanitization {0}")] - SanitizeError(#[from] SanitizeError), - #[error("transaction failed prioritization")] - PrioritizationFailure, -} - -#[derive(Debug, PartialEq, Eq)] -pub struct ImmutableDeserializedPacket { - original_packet: Packet, - transaction: SanitizedVersionedTransaction, - message_hash: Hash, - is_simple_vote: bool, - priority_details: TransactionPriorityDetails, -} - -impl ImmutableDeserializedPacket { - pub fn original_packet(&self) -> &Packet { - &self.original_packet - } - - pub fn transaction(&self) -> &SanitizedVersionedTransaction { - &self.transaction - } - - pub fn message_hash(&self) -> &Hash { - &self.message_hash - } - - pub fn is_simple_vote(&self) -> bool { - self.is_simple_vote - } - - pub fn priority(&self) -> u64 { - self.priority_details.priority - } - - pub fn compute_unit_limit(&self) -> u64 { - self.priority_details.compute_unit_limit - } -} - /// Holds deserialized messages, as well as computed message_hash and other things needed to create /// SanitizedTransaction #[derive(Debug, Clone, PartialEq, Eq)] @@ -101,25 +43,10 @@ impl DeserializedPacket { packet: Packet, priority_details: Option, ) -> Result { - let versioned_transaction: VersionedTransaction = packet.deserialize_slice(..)?; - let sanitized_transaction = SanitizedVersionedTransaction::try_from(versioned_transaction)?; - let message_bytes = packet_message(&packet)?; - let message_hash = Message::hash_raw_message(message_bytes); - let is_simple_vote = packet.meta.is_simple_vote_tx(); - - // drop transaction if prioritization fails. - let priority_details = priority_details - .or_else(|| sanitized_transaction.get_transaction_priority_details()) - .ok_or(DeserializedPacketError::PrioritizationFailure)?; + let immutable_section = ImmutableDeserializedPacket::new(packet, priority_details)?; Ok(Self { - immutable_section: Rc::new(ImmutableDeserializedPacket { - original_packet: packet, - transaction: sanitized_transaction, - message_hash, - is_simple_vote, - priority_details, - }), + immutable_section: Rc::new(immutable_section), forwarded: false, }) } @@ -143,18 +70,6 @@ impl Ord for DeserializedPacket { } } -impl PartialOrd for ImmutableDeserializedPacket { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for ImmutableDeserializedPacket { - fn cmp(&self, other: &Self) -> Ordering { - self.priority().cmp(&other.priority()) - } -} - /// Currently each banking_stage thread has a `UnprocessedPacketBatches` buffer to store /// PacketBatch's received from sigverify. Banking thread continuously scans the buffer /// to pick proper packets to add to the block. @@ -389,19 +304,6 @@ pub fn deserialize_packets<'a>( }) } -/// Read the transaction message from packet data -pub fn packet_message(packet: &Packet) -> Result<&[u8], DeserializedPacketError> { - let (sig_len, sig_size) = packet - .data(..) - .and_then(|bytes| decode_shortu16_len(bytes).ok()) - .ok_or(DeserializedPacketError::ShortVecError(()))?; - sig_len - .checked_mul(size_of::()) - .and_then(|v| v.checked_add(sig_size)) - .and_then(|msg_start| packet.data(msg_start..)) - .ok_or(DeserializedPacketError::SignatureOverflowed(sig_size)) -} - pub fn transactions_to_deserialized_packets( transactions: &[Transaction], ) -> Result, DeserializedPacketError> { @@ -452,7 +354,7 @@ mod tests { solana_vote_program::vote_transaction, }; - fn simmple_deserialized_packet() -> DeserializedPacket { + fn simple_deserialized_packet() -> DeserializedPacket { let tx = system_transaction::transfer( &Keypair::new(), &solana_sdk::pubkey::new_rand(), @@ -483,7 +385,7 @@ mod tests { #[test] fn test_unprocessed_packet_batches_insert_pop_same_packet() { - let packet = simmple_deserialized_packet(); + let packet = simple_deserialized_packet(); let mut unprocessed_packet_batches = UnprocessedPacketBatches::with_capacity(2); unprocessed_packet_batches.push(packet.clone()); unprocessed_packet_batches.push(packet.clone()); @@ -529,7 +431,7 @@ mod tests { #[test] fn test_unprocessed_packet_batches_pop_max_n() { let num_packets = 10; - let packets_iter = std::iter::repeat_with(simmple_deserialized_packet).take(num_packets); + let packets_iter = std::iter::repeat_with(simple_deserialized_packet).take(num_packets); let mut unprocessed_packet_batches = UnprocessedPacketBatches::from_iter(packets_iter.clone(), num_packets); diff --git a/core/src/validator.rs b/core/src/validator.rs index e073b5f5c8..95133f2b2c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -101,7 +101,7 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, - solana_vote_program::vote_state::VoteState, + solana_vote_program::vote_state, std::{ collections::{HashMap, HashSet}, net::SocketAddr, @@ -170,13 +170,13 @@ pub struct ValidatorConfig { pub accounts_db_test_hash_calculation: bool, pub accounts_db_skip_shrink: bool, pub tpu_coalesce_ms: u64, + pub staked_nodes_overrides: Arc>>, pub validator_exit: Arc>, pub no_wait_for_vote_to_start_leader: bool, pub accounts_shrink_ratio: AccountShrinkThreshold, pub wait_to_vote_slot: Option, pub ledger_column_options: LedgerColumnOptions, pub runtime_config: RuntimeConfig, - pub enable_quic_servers: bool, pub maybe_relayer_config: Option, pub shred_receiver_address: Option, pub tip_manager_config: TipManagerConfig, @@ -235,6 +235,7 @@ impl Default for ValidatorConfig { accounts_db_test_hash_calculation: false, accounts_db_skip_shrink: false, tpu_coalesce_ms: DEFAULT_TPU_COALESCE_MS, + staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), validator_exit: Arc::new(RwLock::new(Exit::default())), no_wait_for_vote_to_start_leader: true, accounts_shrink_ratio: AccountShrinkThreshold::default(), @@ -242,7 +243,6 @@ impl Default for ValidatorConfig { wait_to_vote_slot: None, ledger_column_options: LedgerColumnOptions::default(), runtime_config: RuntimeConfig::default(), - enable_quic_servers: false, maybe_relayer_config: None, shred_receiver_address: None, tip_manager_config: TipManagerConfig::default(), @@ -267,14 +267,23 @@ impl ValidatorConfig { pub enum ValidatorStartProgress { Initializing, // Catch all, default state SearchingForRpcService, - DownloadingSnapshot { slot: Slot, rpc_addr: SocketAddr }, + DownloadingSnapshot { + slot: Slot, + rpc_addr: SocketAddr, + }, CleaningBlockStore, CleaningAccounts, LoadingLedger, - ProcessingLedger { slot: Slot, max_slot: Slot }, + ProcessingLedger { + slot: Slot, + max_slot: Slot, + }, StartingServices, Halted, // Validator halted due to `--dev-halt-at-slot` argument - WaitingForSupermajority, + WaitingForSupermajority { + slot: Slot, + gossip_stake_percent: u64, + }, // `Running` is the terminal state once the validator fully starts and all services are // operational @@ -301,7 +310,7 @@ impl BlockstoreRootScan { let exit = exit.clone(); Some( Builder::new() - .name("blockstore-root-scan".to_string()) + .name("solBStoreRtScan".to_string()) .spawn(move || blockstore.scan_and_fix_roots(&exit)) .unwrap(), ) @@ -362,20 +371,6 @@ pub struct Validator { accounts_hash_verifier: AccountsHashVerifier, } -// in the distant future, get rid of ::new()/exit() and use Result properly... -pub fn abort() -> ! { - #[cfg(not(test))] - { - // standard error is usually redirected to a log file, cry for help on standard output as - // well - println!("Validator process aborted. The validator log may contain further details"); - std::process::exit(1); - } - - #[cfg(test)] - panic!("process::exit(1) is intercepted for friendly test failure..."); -} - impl Validator { #[allow(clippy::too_many_arguments)] pub fn new( @@ -391,7 +386,7 @@ impl Validator { socket_addr_space: SocketAddrSpace, use_quic: bool, tpu_connection_pool_size: usize, - ) -> Self { + ) -> Result { let id = identity_keypair.pubkey(); assert_eq!(id, node.info.id); @@ -399,10 +394,12 @@ impl Validator { warn!("vote account: {}", vote_account); if !config.no_os_network_stats_reporting { - verify_net_stats_access().unwrap_or_else(|err| { - error!("Failed to access Network stats: {}. Bypass check with --no-os-network-stats-reporting.", err); - abort(); - }); + if let Err(e) = verify_net_stats_access() { + return Err(format!( + "Failed to access Network stats: {}. Bypass check with --no-os-network-stats-reporting.", + e, + )); + } } let mut bank_notification_senders = Vec::new(); @@ -416,8 +413,7 @@ impl Validator { match result { Ok(geyser_plugin_service) => Some(geyser_plugin_service), Err(err) => { - error!("Failed to load the Geyser plugin: {:?}", err); - abort(); + return Err(format!("Failed to load the Geyser plugin: {:?}", err)); } } } else { @@ -437,6 +433,14 @@ impl Validator { info!("entrypoint: {:?}", cluster_entrypoint); } + if rayon::ThreadPoolBuilder::new() + .thread_name(|ix| format!("solRayonGlob{:02}", ix)) + .build_global() + .is_err() + { + warn!("Rayon global thread pool already initialized"); + } + if solana_perf::perf_libs::api().is_some() { info!("Initializing sigverify, this could take a while..."); } else { @@ -446,11 +450,10 @@ impl Validator { info!("Done."); if !ledger_path.is_dir() { - error!( + return Err(format!( "ledger directory does not exist or is not accessible: {:?}", ledger_path - ); - abort(); + )); } if let Some(shred_version) = config.expected_shred_version { @@ -467,14 +470,7 @@ impl Validator { info!("Cleaning accounts paths.."); *start_progress.write().unwrap() = ValidatorStartProgress::CleaningAccounts; let mut start = Measure::start("clean_accounts_paths"); - for accounts_path in &config.account_paths { - cleanup_accounts_path(accounts_path); - } - if let Some(ref shrink_paths) = config.account_shrink_paths { - for accounts_path in shrink_paths { - cleanup_accounts_path(accounts_path); - } - } + cleanup_accounts_paths(config); start.stop(); info!("done. {}", start); @@ -547,7 +543,7 @@ impl Validator { accounts_update_notifier, transaction_notifier, Some(poh_timing_point_sender.clone()), - ); + )?; node.info.wallclock = timestamp(); node.info.shred_version = compute_shred_version( @@ -567,11 +563,10 @@ impl Validator { if let Some(expected_shred_version) = config.expected_shred_version { if expected_shred_version != node.info.shred_version { - error!( + return Err(format!( "shred version mismatch: expected {} found: {}", expected_shred_version, node.info.shred_version, - ); - abort(); + )); } } @@ -695,7 +690,7 @@ impl Validator { ledger_path, &bank_forks, &leader_schedule_cache, - ); + )?; *start_progress.write().unwrap() = ValidatorStartProgress::StartingServices; @@ -804,29 +799,32 @@ impl Validator { } else { None }; + + let json_rpc_service = JsonRpcService::new( + rpc_addr, + config.rpc_config.clone(), + config.snapshot_config.clone(), + bank_forks.clone(), + block_commitment_cache.clone(), + blockstore.clone(), + cluster_info.clone(), + Some(poh_recorder.clone()), + genesis_config.hash(), + ledger_path, + config.validator_exit.clone(), + config.known_validators.clone(), + rpc_override_health_check.clone(), + startup_verification_complete, + optimistically_confirmed_bank.clone(), + config.send_transaction_service_config.clone(), + max_slots.clone(), + leader_schedule_cache.clone(), + connection_cache.clone(), + max_complete_transaction_status_slot, + )?; + ( - Some(JsonRpcService::new( - rpc_addr, - config.rpc_config.clone(), - config.snapshot_config.clone(), - bank_forks.clone(), - block_commitment_cache.clone(), - blockstore.clone(), - cluster_info.clone(), - Some(poh_recorder.clone()), - genesis_config.hash(), - ledger_path, - config.validator_exit.clone(), - config.known_validators.clone(), - rpc_override_health_check.clone(), - startup_verification_complete, - optimistically_confirmed_bank.clone(), - config.send_transaction_service_config.clone(), - max_slots.clone(), - leader_schedule_cache.clone(), - connection_cache.clone(), - max_complete_transaction_status_slot, - )), + Some(json_rpc_service), if !config.rpc_config.full_api { None } else { @@ -901,7 +899,7 @@ impl Validator { exit.clone(), ); - let waited_for_supermajority = if let Ok(waited) = wait_for_supermajority( + let waited_for_supermajority = match wait_for_supermajority( config, Some(&mut process_blockstore), &bank_forks, @@ -909,9 +907,8 @@ impl Validator { rpc_override_health_check, &start_progress, ) { - waited - } else { - abort(); + Ok(waited) => waited, + Err(e) => return Err(format!("wait_for_supermajority failed: {:?}", e)), }; let ledger_metric_report_service = @@ -1002,7 +999,7 @@ impl Validator { config.runtime_config.log_messages_bytes_limit, &connection_cache, config.shred_receiver_address, - ); + )?; let tpu = Tpu::new( &cluster_info, @@ -1036,11 +1033,11 @@ impl Validator { &connection_cache, &identity_keypair, config.runtime_config.log_messages_bytes_limit, - config.enable_quic_servers, &staked_nodes, config.maybe_relayer_config.clone(), config.tip_manager_config.clone(), config.shred_receiver_address, + config.staked_nodes_overrides.clone(), ); datapoint_info!( @@ -1054,7 +1051,7 @@ impl Validator { ); *start_progress.write().unwrap() = ValidatorStartProgress::Running; - Self { + Ok(Self { stats_reporter_service, gossip_service, serve_repair_service, @@ -1083,7 +1080,7 @@ impl Validator { ledger_metric_report_service, accounts_background_service, accounts_hash_verifier, - } + }) } // Used for notifying many nodes in parallel to exit @@ -1222,14 +1219,17 @@ impl Validator { fn active_vote_account_exists_in_bank(bank: &Arc, vote_account: &Pubkey) -> bool { if let Some(account) = &bank.get_account(vote_account) { - if let Some(vote_state) = VoteState::from(account) { + if let Some(vote_state) = vote_state::from(account) { return !vote_state.votes.is_empty(); } } false } -fn check_poh_speed(genesis_config: &GenesisConfig, maybe_hash_samples: Option) { +fn check_poh_speed( + genesis_config: &GenesisConfig, + maybe_hash_samples: Option, +) -> Result<(), String> { if let Some(hashes_per_tick) = genesis_config.hashes_per_tick() { let ticks_per_slot = genesis_config.ticks_per_slot(); let hashes_per_slot = hashes_per_tick * ticks_per_slot; @@ -1249,13 +1249,14 @@ fn check_poh_speed(genesis_config: &GenesisConfig, maybe_hash_samples: Option Option { @@ -1275,66 +1276,64 @@ fn post_process_restored_tower( vote_account: &Pubkey, config: &ValidatorConfig, bank_forks: &BankForks, -) -> Tower { +) -> Result { let mut should_require_tower = config.require_tower; - restored_tower - .and_then(|tower| { - let root_bank = bank_forks.root_bank(); - let slot_history = root_bank.get_slot_history(); - // make sure tower isn't corrupted first before the following hard fork check - let tower = tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history); - - if let Some(hard_fork_restart_slot) = maybe_cluster_restart_with_hard_fork(config, root_bank.slot()) { - // intentionally fail to restore tower; we're supposedly in a new hard fork; past - // out-of-chain vote state doesn't make sense at all - // what if --wait-for-supermajority again if the validator restarted? - let message = format!("Hard fork is detected; discarding tower restoration result: {:?}", tower); - datapoint_error!( - "tower_error", - ( - "error", - message, - String - ), - ); - error!("{}", message); + let restored_tower = restored_tower.and_then(|tower| { + let root_bank = bank_forks.root_bank(); + let slot_history = root_bank.get_slot_history(); + // make sure tower isn't corrupted first before the following hard fork check + let tower = tower.adjust_lockouts_after_replay(root_bank.slot(), &slot_history); - // unconditionally relax tower requirement so that we can always restore tower - // from root bank. - should_require_tower = false; - return Err(crate::consensus::TowerError::HardFork(hard_fork_restart_slot)); - } + if let Some(hard_fork_restart_slot) = + maybe_cluster_restart_with_hard_fork(config, root_bank.slot()) + { + // intentionally fail to restore tower; we're supposedly in a new hard fork; past + // out-of-chain vote state doesn't make sense at all + // what if --wait-for-supermajority again if the validator restarted? + let message = format!( + "Hard fork is detected; discarding tower restoration result: {:?}", + tower + ); + datapoint_error!("tower_error", ("error", message, String),); + error!("{}", message); + + // unconditionally relax tower requirement so that we can always restore tower + // from root bank. + should_require_tower = false; + return Err(crate::consensus::TowerError::HardFork( + hard_fork_restart_slot, + )); + } - if let Some(warp_slot) = config.warp_slot { - // unconditionally relax tower requirement so that we can always restore tower - // from root bank after the warp - should_require_tower = false; - return Err(crate::consensus::TowerError::HardFork(warp_slot)); - } + if let Some(warp_slot) = config.warp_slot { + // unconditionally relax tower requirement so that we can always restore tower + // from root bank after the warp + should_require_tower = false; + return Err(crate::consensus::TowerError::HardFork(warp_slot)); + } - tower - }) - .unwrap_or_else(|err| { + tower + }); + + let restored_tower = match restored_tower { + Ok(tower) => tower, + Err(err) => { let voting_has_been_active = active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account); if !err.is_file_missing() { datapoint_error!( "tower_error", - ( - "error", - format!("Unable to restore tower: {}", err), - String - ), + ("error", format!("Unable to restore tower: {}", err), String), ); } if should_require_tower && voting_has_been_active { - error!("Requested mandatory tower restore failed: {}", err); - error!( - "And there is an existing vote_account containing actual votes. \ + return Err(format!( + "Requested mandatory tower restore failed: {}. \ + And there is an existing vote_account containing actual votes. \ Aborting due to possible conflicting duplicate votes", - ); - abort(); + err + )); } if err.is_file_missing() && !voting_has_been_active { // Currently, don't protect against spoofed snapshots with no tower at all @@ -1349,12 +1348,11 @@ fn post_process_restored_tower( ); } - Tower::new_from_bankforks( - bank_forks, - validator_identity, - vote_account, - ) - }) + Tower::new_from_bankforks(bank_forks, validator_identity, vote_account) + } + }; + + Ok(restored_tower) } #[allow(clippy::type_complexity)] @@ -1366,20 +1364,23 @@ fn load_blockstore( accounts_update_notifier: Option, transaction_notifier: Option, poh_timing_point_sender: Option, -) -> ( - GenesisConfig, - Arc>, - Arc, - Slot, - Receiver, - CompletedSlotsReceiver, - LeaderScheduleCache, - Option, - TransactionHistoryServices, - blockstore_processor::ProcessOptions, - BlockstoreRootScan, - DroppedSlotsReceiver, -) { +) -> Result< + ( + GenesisConfig, + Arc>, + Arc, + Slot, + Receiver, + CompletedSlotsReceiver, + LeaderScheduleCache, + Option, + TransactionHistoryServices, + blockstore_processor::ProcessOptions, + BlockstoreRootScan, + DroppedSlotsReceiver, + ), + String, +> { info!("loading ledger from {:?}...", ledger_path); *start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger; let genesis_config = open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size); @@ -1396,14 +1397,17 @@ fn load_blockstore( if let Some(expected_genesis_hash) = config.expected_genesis_hash { if genesis_hash != expected_genesis_hash { - error!("genesis hash mismatch: expected {}", expected_genesis_hash); - error!("Delete the ledger directory to continue: {:?}", ledger_path); - abort(); + return Err(format!( + "genesis hash mismatch: hash={} expected={}. Delete the ledger directory to continue: {:?}", + genesis_hash, + expected_genesis_hash, + ledger_path, + )); } } if !config.no_poh_speed_test { - check_poh_speed(&genesis_config, None); + check_poh_speed(&genesis_config, None)?; } let BlockstoreSignals { @@ -1512,7 +1516,7 @@ fn load_blockstore( } } - ( + Ok(( genesis_config, bank_forks, blockstore, @@ -1525,7 +1529,7 @@ fn load_blockstore( process_options, blockstore_root_scan, pruned_banks_receiver, - ) + )) } fn highest_slot(blockstore: &Blockstore) -> Option { @@ -1603,41 +1607,30 @@ impl<'a> ProcessBlockStore<'a> { } } - pub(crate) fn process(&mut self) { + pub(crate) fn process(&mut self) -> Result<(), String> { if self.tower.is_none() { let previous_start_process = *self.start_progress.read().unwrap(); *self.start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger; - /* - #[allow(clippy::too_many_arguments)] - fn process_blockstore( - blockstore: &Blockstore, - bank_forks: &Arc>, - leader_schedule_cache: &LeaderScheduleCache, - process_options: &blockstore_processor::ProcessOptions, - transaction_status_sender: Option<&TransactionStatusSender>, - cache_block_meta_sender: Option<&CacheBlockMetaSender>, - blockstore_root_scan: BlockstoreRootScan, - accounts_background_request_sender: &AbsRequestSender, - start_progress: &Arc>, - ) { - */ let exit = Arc::new(AtomicBool::new(false)); if let Some(max_slot) = highest_slot(self.blockstore) { let bank_forks = self.bank_forks.clone(); let exit = exit.clone(); let start_progress = self.start_progress.clone(); - let _ = std::thread::spawn(move || { - while !exit.load(Ordering::Relaxed) { - let slot = bank_forks.read().unwrap().working_bank().slot(); - *start_progress.write().unwrap() = - ValidatorStartProgress::ProcessingLedger { slot, max_slot }; - sleep(Duration::from_secs(2)); - } - }); + let _ = Builder::new() + .name("solRptLdgrStat".to_string()) + .spawn(move || { + while !exit.load(Ordering::Relaxed) { + let slot = bank_forks.read().unwrap().working_bank().slot(); + *start_progress.write().unwrap() = + ValidatorStartProgress::ProcessingLedger { slot, max_slot }; + sleep(Duration::from_secs(2)); + } + }) + .unwrap(); } - blockstore_processor::process_blockstore_from_root( + if let Err(e) = blockstore_processor::process_blockstore_from_root( self.blockstore, self.bank_forks, self.leader_schedule_cache, @@ -1645,11 +1638,9 @@ impl<'a> ProcessBlockStore<'a> { self.transaction_status_sender, self.cache_block_meta_sender.as_ref(), &self.accounts_background_request_sender, - ) - .unwrap_or_else(|err| { - error!("Failed to load ledger: {:?}", err); - abort() - }); + ) { + return Err(format!("Failed to load ledger: {:?}", e)); + } exit.store(true, Ordering::Relaxed); @@ -1661,15 +1652,16 @@ impl<'a> ProcessBlockStore<'a> { let restored_tower = Tower::restore(self.config.tower_storage.as_ref(), self.id); if let Ok(tower) = &restored_tower { // reconciliation attempt 1 of 2 with tower - reconcile_blockstore_roots_with_external_source( + if let Err(e) = reconcile_blockstore_roots_with_external_source( ExternalRootSource::Tower(tower.root()), self.blockstore, &mut self.original_blockstore_root, - ) - .unwrap_or_else(|err| { - error!("Failed to reconcile blockstore with tower: {:?}", err); - abort() - }); + ) { + return Err(format!( + "Failed to reconcile blockstore with tower: {:?}", + e + )); + } } post_process_restored_tower( @@ -1678,7 +1670,7 @@ impl<'a> ProcessBlockStore<'a> { self.vote_account, self.config, &self.bank_forks.read().unwrap(), - ) + )? }); if let Some(hard_fork_restart_slot) = maybe_cluster_restart_with_hard_fork( @@ -1687,24 +1679,26 @@ impl<'a> ProcessBlockStore<'a> { ) { // reconciliation attempt 2 of 2 with hard fork // this should be #2 because hard fork root > tower root in almost all cases - reconcile_blockstore_roots_with_external_source( + if let Err(e) = reconcile_blockstore_roots_with_external_source( ExternalRootSource::HardFork(hard_fork_restart_slot), self.blockstore, &mut self.original_blockstore_root, - ) - .unwrap_or_else(|err| { - error!("Failed to reconcile blockstore with hard fork: {:?}", err); - abort() - }); + ) { + return Err(format!( + "Failed to reconcile blockstore with hard fork: {:?}", + e + )); + } } *self.start_progress.write().unwrap() = previous_start_process; } + Ok(()) } - pub(crate) fn process_to_create_tower(mut self) -> Tower { - self.process(); - self.tower.unwrap() + pub(crate) fn process_to_create_tower(mut self) -> Result { + self.process()?; + Ok(self.tower.unwrap()) } } @@ -1714,26 +1708,25 @@ fn maybe_warp_slot( ledger_path: &Path, bank_forks: &RwLock, leader_schedule_cache: &LeaderScheduleCache, -) { +) -> Result<(), String> { if let Some(warp_slot) = config.warp_slot { - let snapshot_config = config.snapshot_config.as_ref().unwrap_or_else(|| { - error!("warp slot requires a snapshot config"); - abort(); - }); + let snapshot_config = match config.snapshot_config.as_ref() { + Some(config) => config, + None => return Err("warp slot requires a snapshot config".to_owned()), + }; - process_blockstore.process(); + process_blockstore.process()?; let mut bank_forks = bank_forks.write().unwrap(); let working_bank = bank_forks.working_bank(); if warp_slot <= working_bank.slot() { - error!( + return Err(format!( "warp slot ({}) cannot be less than the working bank slot ({})", warp_slot, working_bank.slot() - ); - abort(); + )); } info!("warping to slot {}", warp_slot); @@ -1750,7 +1743,7 @@ fn maybe_warp_slot( ); leader_schedule_cache.set_root(&bank_forks.root_bank()); - let full_snapshot_archive_info = snapshot_utils::bank_to_full_snapshot_archive( + let full_snapshot_archive_info = match snapshot_utils::bank_to_full_snapshot_archive( ledger_path, &bank_forks.root_bank(), None, @@ -1759,16 +1752,16 @@ fn maybe_warp_slot( snapshot_config.archive_format, snapshot_config.maximum_full_snapshot_archives_to_retain, snapshot_config.maximum_incremental_snapshot_archives_to_retain, - ) - .unwrap_or_else(|err| { - error!("Unable to create snapshot: {}", err); - abort(); - }); + ) { + Ok(archive_info) => archive_info, + Err(e) => return Err(format!("Unable to create snapshot: {}", e)), + }; info!( "created snapshot: {}", full_snapshot_archive_info.path().display() ); } + Ok(()) } fn blockstore_contains_bad_shred_version( @@ -1888,6 +1881,7 @@ fn initialize_rpc_transaction_history_services( enum ValidatorError { BadExpectedBankHash, NotEnoughLedgerData, + Error(String), } // Return if the validator waited on other nodes to start. In this case @@ -1906,20 +1900,22 @@ fn wait_for_supermajority( ) -> Result { match config.wait_for_supermajority { None => Ok(false), - Some(wait_for_supermajority) => { + Some(wait_for_supermajority_slot) => { if let Some(process_blockstore) = process_blockstore { - process_blockstore.process(); + process_blockstore + .process() + .map_err(ValidatorError::Error)?; } let bank = bank_forks.read().unwrap().working_bank(); - match wait_for_supermajority.cmp(&bank.slot()) { + match wait_for_supermajority_slot.cmp(&bank.slot()) { std::cmp::Ordering::Less => return Ok(false), std::cmp::Ordering::Greater => { error!( "Ledger does not have enough data to wait for supermajority, \ please enable snapshot fetch. Has {} needs {}", bank.slot(), - wait_for_supermajority + wait_for_supermajority_slot ); return Err(ValidatorError::NotEnoughLedgerData); } @@ -1937,7 +1933,6 @@ fn wait_for_supermajority( } } - *start_progress.write().unwrap() = ValidatorStartProgress::WaitingForSupermajority; for i in 1.. { if i % 10 == 1 { info!( @@ -1950,6 +1945,12 @@ fn wait_for_supermajority( let gossip_stake_percent = get_stake_percent_in_gossip(&bank, cluster_info, i % 10 == 0); + *start_progress.write().unwrap() = + ValidatorStartProgress::WaitingForSupermajority { + slot: wait_for_supermajority_slot, + gossip_stake_percent, + }; + if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT { info!( "Supermajority reached, {}% active stake detected, starting up now.", @@ -2063,13 +2064,54 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo online_stake_percentage as u64 } -// Cleanup anything that looks like an accounts append-vec -fn cleanup_accounts_path(account_path: &std::path::Path) { - if let Err(e) = std::fs::remove_dir_all(account_path) { +/// Delete directories/files asynchronously to avoid blocking on it. +/// Fist, in sync context, rename the original path to *_deleted, +/// then spawn a thread to delete the renamed path. +/// If the process is killed and the deleting process is not done, +/// the leftover path will be deleted in the next process life, so +/// there is no file space leaking. +fn move_and_async_delete_path(path: impl AsRef + Copy) { + let mut path_delete = PathBuf::new(); + path_delete.push(path); + path_delete.set_file_name(format!( + "{}{}", + path_delete.file_name().unwrap().to_str().unwrap(), + "_to_be_deleted" + )); + + if path_delete.exists() { + std::fs::remove_dir_all(&path_delete).unwrap(); + } + + if !path.as_ref().exists() { + return; + } + + if let Err(err) = std::fs::rename(&path, &path_delete) { warn!( - "encountered error removing accounts path: {:?}: {}", - account_path, e + "Path renaming failed: {}. Falling back to rm_dir in sync mode", + err.to_string() ); + std::fs::remove_dir_all(&path).unwrap(); + return; + } + + Builder::new() + .name("solDeletePath".to_string()) + .spawn(move || { + std::fs::remove_dir_all(&path_delete).unwrap(); + }) + .unwrap(); +} + +fn cleanup_accounts_paths(config: &ValidatorConfig) { + for accounts_path in &config.account_paths { + move_and_async_delete_path(accounts_path); + } + if let Some(ref shrink_paths) = config.account_shrink_paths { + for accounts_path in shrink_paths { + move_and_async_delete_path(accounts_path); + } } } @@ -2139,7 +2181,8 @@ mod tests { SocketAddrSpace::Unspecified, DEFAULT_TPU_USE_QUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE, - ); + ) + .expect("assume successful validator start"); assert_eq!( *start_progress.read().unwrap(), ValidatorStartProgress::Running @@ -2223,6 +2266,7 @@ mod tests { DEFAULT_TPU_USE_QUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE, ) + .expect("assume successful validator start") }) .collect(); @@ -2362,7 +2406,6 @@ mod tests { } #[test] - #[should_panic] fn test_poh_speed() { solana_logger::setup(); let poh_config = PohConfig { @@ -2375,7 +2418,7 @@ mod tests { poh_config, ..GenesisConfig::default() }; - check_poh_speed(&genesis_config, Some(10_000)); + assert!(check_poh_speed(&genesis_config, Some(10_000)).is_err()); } #[test] @@ -2389,6 +2432,6 @@ mod tests { poh_config, ..GenesisConfig::default() }; - check_poh_speed(&genesis_config, Some(10_000)); + check_poh_speed(&genesis_config, Some(10_000)).unwrap(); } } diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs index 29cf4699dd..cbd53a1c3b 100644 --- a/core/src/voting_service.rs +++ b/core/src/voting_service.rs @@ -46,7 +46,7 @@ impl VotingService { bank_forks: Arc>, ) -> Self { let thread_hdl = Builder::new() - .name("sol-vote-service".to_string()) + .name("solVoteService".to_string()) .spawn(move || { for vote_op in vote_receiver.iter() { let rooted_bank = bank_forks.read().unwrap().root_bank().clone(); diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index 2632d03101..08428d5898 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -32,7 +32,7 @@ impl WarmQuicCacheService { exit: Arc, ) -> Self { let thread_hdl = Builder::new() - .name("sol-warm-quic-service".to_string()) + .name("solWarmQuicSvc".to_string()) .spawn(move || { let slot_jitter = thread_rng().gen_range(-CACHE_JITTER_SLOT, CACHE_JITTER_SLOT); let mut maybe_last_leader = None; diff --git a/core/src/window_service.rs b/core/src/window_service.rs index da4cbcb445..2f7983bd86 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -375,7 +375,7 @@ impl WindowService { inc_new_counter_error!("solana-check-duplicate-error", 1, 1); }; Builder::new() - .name("solana-check-duplicate".to_string()) + .name("solWinCheckDup".to_string()) .spawn(move || { while !exit.load(Ordering::Relaxed) { if let Err(e) = run_check_duplicate( @@ -408,11 +408,11 @@ impl WindowService { }; let thread_pool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) - .thread_name(|i| format!("window-insert-{}", i)) + .thread_name(|i| format!("solWinInsert{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-window-insert".to_string()) + .name("solWinInsert".to_string()) .spawn(move || { let handle_duplicate = |shred| { let _ = check_duplicate_sender.send(shred); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 3dcc004a53..6e3b7869fa 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -256,6 +256,7 @@ fn run_bank_forks_snapshot_n( accounts_package.snapshot_links.path(), accounts_package.slot, &last_bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, last_bank.get_accounts_hash()); snapshot_utils::archive_snapshot_package( @@ -491,6 +492,7 @@ fn test_concurrent_snapshot_packaging( accounts_package.snapshot_links.path(), accounts_package.slot, &Hash::default(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, Hash::default()); pending_snapshot_package @@ -534,6 +536,7 @@ fn test_concurrent_snapshot_packaging( saved_snapshots_dir.path(), saved_slot, &Hash::default(), + None, ); snapshot_utils::verify_snapshot_archive( diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 0735f62839..fd7bdac977 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -53,42 +53,62 @@ module.exports = { }, items: [ { - href: "https://spl.solana.com", - label: "Program Library »", + to: "introduction", + label: "Learn", position: "left", }, { - to: "developing/programming-model/overview", - label: "Develop", + to: "cluster/overview", + label: "Architecture", position: "left", }, { - to: "running-validator", - label: "Validate", + to: "cli", + label: "CLI", position: "left", }, { - to: "integrations/exchange", - label: "Integrate", + to: "developing/programming-model/overview", + label: "Developers", position: "left", }, { - to: "cluster/overview", - label: "Learn", + to: "running-validator", + label: "Validators", position: "left", }, + { + label: "More", + position: "left", + items: [ + { label: "Terminology", to: "terminology" }, + { label: "Staking", to: "staking" }, + { label: "Integrations", to: "integrations/exchange" }, + { label: "Economics", to: "economics_overview" }, + { label: "Proposals", to: "proposals" }, + { + href: "https://spl.solana.com", + label: "Solana Program Library »", + position: "left", + }, + ], + }, { type: "localeDropdown", position: "right", }, { href: "https://discordapp.com/invite/pquxPsq", - label: "Chat", + // label: "Discord", + className: "header-link-icon header-discord-link", + "aria-label": "Solana Discord", position: "right", }, { href: "https://github.com/solana-labs/solana", - label: "GitHub", + // label: "GitHub", + className: "header-link-icon header-github-link", + "aria-label": "GitHub repository", position: "right", }, ], @@ -103,37 +123,69 @@ module.exports = { style: "dark", links: [ { - title: "Docs", + title: "Documentation", items: [ { - label: "Introduction", + label: "Learn", to: "introduction", }, + { + label: "Developers", + to: "developing/programming-model/overview", + }, + { + label: "Validators", + to: "running-validator", + }, + { + label: "Command Line", + to: "cli", + }, + { + label: "Architecture", + to: "cluster/overview", + }, ], }, { title: "Community", items: [ { - label: "Discord", + label: "Stack Exchange »", + href: "https://solana.stackexchange.com/", + }, + { + label: "GitHub »", + href: "https://github.com/solana-labs/solana", + }, + { + label: "Discord »", href: "https://discordapp.com/invite/pquxPsq", }, { - label: "Twitter", + label: "Twitter »", href: "https://twitter.com/solana", }, { - label: "Forums", + label: "Forums »", href: "https://forums.solana.com", }, ], }, { - title: "More", + title: "Resources", items: [ { - label: "GitHub", - href: "https://github.com/solana-labs/solana", + label: "Proposals", + to: "proposals", + }, + { + label: "Integrations", + to: "integrations/exchange", + }, + { + href: "https://spl.solana.com", + label: "Solana Program Library »", }, ], }, diff --git a/docs/sidebars.js b/docs/sidebars.js index 904bcb8f15..190db55d9c 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -1,203 +1,307 @@ module.exports = { - docs: { - About: ["introduction", "terminology", "history"], - Wallets: [ - "wallet-guide", - { - type: "category", - label: "Command-line Wallets", - items: [ - "wallet-guide/cli", - "wallet-guide/paper-wallet", - { - type: "category", - label: "Hardware Wallets", - items: [ - "wallet-guide/hardware-wallets", - "wallet-guide/hardware-wallets/ledger", - ], - }, - "wallet-guide/file-system-wallet", - ], - }, - "wallet-guide/support", - ], - Staking: ["staking", "staking/stake-accounts"], - "Command Line": [ - "cli", - "cli/install-solana-cli-tools", - "cli/conventions", - "cli/choose-a-cluster", - "cli/transfer-tokens", - "cli/delegate-stake", - "cli/deploy-a-program", - "offline-signing", - "offline-signing/durable-nonce", - "cli/usage", - ], - Developing: [ - { - type: "category", - label: "Programming Model", - items: [ - "developing/programming-model/overview", - "developing/programming-model/transactions", - "developing/programming-model/accounts", - "developing/programming-model/runtime", - "developing/programming-model/calling-between-programs", - ], - }, - { - type: "category", - label: "Clients", - items: [ - "developing/clients/jsonrpc-api", - "developing/clients/javascript-api", - "developing/clients/javascript-reference", - "developing/clients/rust-api", - ], - }, - { - type: "category", - label: "Runtime Facilities", - items: [ - "developing/runtime-facilities/programs", - "developing/runtime-facilities/sysvars", - ], - }, - { - type: "category", - label: "On-chain Programs", - items: [ - "developing/on-chain-programs/overview", - "developing/on-chain-programs/developing-rust", - "developing/on-chain-programs/developing-c", - "developing/on-chain-programs/deploying", - "developing/on-chain-programs/debugging", - "developing/on-chain-programs/examples", - "developing/on-chain-programs/faq", - ], - }, - "developing/test-validator", - "developing/backwards-compatibility", - "developing/plugins/geyser-plugins", - ], - Integrating: [ - "integrations/exchange", - "integrations/retrying-transactions", - ], - Validating: [ - "running-validator", - "running-validator/validator-reqs", - "running-validator/validator-start", - "running-validator/vote-accounts", - "running-validator/validator-stake", - "running-validator/validator-monitor", - "running-validator/validator-info", - "running-validator/validator-failover", - "running-validator/validator-troubleshoot", - ], - Clusters: [ - "clusters", - "cluster/rpc-endpoints", - "cluster/bench-tps", - "cluster/performance-metrics", - ], - Architecture: [ - { - type: "category", - label: "Cluster", - items: [ - "cluster/overview", - "cluster/synchronization", - "cluster/leader-rotation", - "cluster/fork-generation", - "cluster/managing-forks", - "cluster/turbine-block-propagation", - "cluster/vote-signing", - "cluster/stake-delegation-and-rewards", - ], - }, - { - type: "category", - label: "Validator", - items: [ - "validator/anatomy", - "validator/tpu", - "validator/tvu", - "validator/blockstore", - "validator/gossip", - "validator/runtime", - ], - }, - ], - Economics: [ - "economics_overview", - { - type: "category", - label: "Inflation Design", - items: [ - "inflation/terminology", - "inflation/inflation_schedule", - "inflation/adjusted_staking_yield", - ], - }, - "transaction_fees", - "storage_rent_economics", - ], - "Design Proposals": [ - { - type: "category", - label: "Implemented", - items: [ - "implemented-proposals/implemented-proposals", - "implemented-proposals/abi-management", - "implemented-proposals/bank-timestamp-correction", - "implemented-proposals/commitment", - "implemented-proposals/durable-tx-nonces", - "implemented-proposals/installer", - "implemented-proposals/instruction_introspection", - "implemented-proposals/leader-leader-transition", - "implemented-proposals/leader-validator-transition", - "implemented-proposals/persistent-account-storage", - "implemented-proposals/readonly-accounts", - "implemented-proposals/reliable-vote-transmission", - "implemented-proposals/rent", - "implemented-proposals/repair-service", - "implemented-proposals/rpc-transaction-history", - "implemented-proposals/snapshot-verification", - "implemented-proposals/staking-rewards", - "implemented-proposals/testing-programs", - "implemented-proposals/tower-bft", - "implemented-proposals/transaction-fees", - "implemented-proposals/validator-timestamp-oracle", - ], - }, - { - type: "category", - label: "Accepted", - items: [ - "proposals/accepted-design-proposals", - "proposals/bankless-leader", - "proposals/block-confirmation", - "proposals/cluster-test-framework", - "proposals/embedding-move", - "proposals/handle-duplicate-block", - "proposals/interchain-transaction-verification", - "proposals/ledger-replication-to-implement", - "proposals/optimistic-confirmation-and-slashing", - "proposals/optimistic_confirmation", - "proposals/rip-curl", - "proposals/rust-clients", - "proposals/simple-payment-and-state-verification", - "proposals/slashing", - "proposals/snapshot-verification", - "proposals/tick-verification", - "proposals/transactions-v2", - "proposals/validator-proposal", - "proposals/vote-signing-to-implement", - ], - }, - ], - }, + introdutionSidebar: [ + { + type: "category", + collapsed: false, + label: "Introduction to Solana", + items: [ + { + type: "doc", + id: "introduction", + label: "What is Solana?", + }, + // This will be the future home for the economics overview page + // { + // type: "doc", + // id: "economics_overview", + // label: "How do the economics work?", + // }, + { + type: "doc", + id: "history", + label: "History of Solana", + }, + ], + }, + { + type: "category", + collapsed: false, + label: "Getting started with Solana", + items: [ + { + type: "doc", + id: "wallet-guide", + label: "Wallets", + }, + // This will be the future home of the `staking` page, with the introductory info on what staking on Solana looks like + // { + // type: "doc", + // id: "staking", + // label: "Staking", + // }, + ], + }, + { + type: "category", + collapsed: false, + label: "Dive into Solana", + items: [ + "terminology", + { + type: "ref", + label: "Developers", + id: "developing/programming-model/overview", + }, + { + type: "ref", + label: "Validators", + id: "running-validator", + }, + { + type: "ref", + label: "Command Line", + id: "cli", + }, + { + type: "ref", + label: "Economics", + id: "economics_overview", + }, + { + type: "ref", + label: "Proposals", + id: "proposals", + }, + ], + }, + ], + developingSidebar: [ + { + type: "doc", + id: "developing/programming-model/overview", + label: "Overview", + }, + { + type: "category", + label: "Core Concepts", + // collapsed: false, + items: [ + "developing/programming-model/transactions", + "developing/programming-model/accounts", + "developing/programming-model/calling-between-programs", + "developing/programming-model/runtime", + ], + }, + { + type: "category", + label: "Clients", + items: [ + "developing/clients/jsonrpc-api", + "developing/clients/javascript-api", + "developing/clients/javascript-reference", + "developing/clients/rust-api", + ], + }, + { + type: "category", + label: "Writing Programs", + items: [ + "developing/on-chain-programs/overview", + "developing/on-chain-programs/developing-rust", + "developing/on-chain-programs/developing-c", + { + type: "doc", + label: "Deploying", + id: "developing/on-chain-programs/deploying", + }, + { + type: "doc", + label: "Debugging", + id: "developing/on-chain-programs/debugging", + }, + "developing/on-chain-programs/examples", + "developing/on-chain-programs/faq", + ], + }, + { + type: "category", + label: "Native Programs", + items: [ + { + type: "doc", + label: "Overview", + id: "developing/runtime-facilities/programs", + }, + "developing/runtime-facilities/sysvars", + ], + }, + { + type: "category", + label: "Local Development", + collapsed: false, + items: ["developing/test-validator"], + }, + "developing/backwards-compatibility", + ], + validatorsSidebar: [ + "running-validator", + { + type: "category", + label: "Getting Started", + collapsed: false, + items: ["running-validator/validator-reqs"], + }, + { + type: "category", + label: "Voting Setup", + collapsed: false, + items: [ + "running-validator/validator-start", + "running-validator/vote-accounts", + "running-validator/validator-stake", + "running-validator/validator-monitor", + "running-validator/validator-info", + "running-validator/validator-failover", + "running-validator/validator-troubleshoot", + ], + }, + { + type: "category", + label: "Geyser", + collapsed: false, + items: ["developing/plugins/geyser-plugins"], + }, + ], + cliSidebar: [ + "cli", + "cli/install-solana-cli-tools", + "cli/install-solana-cli-tools", + { + type: "category", + label: "Command-line Wallets", + items: [ + "wallet-guide/cli", + "wallet-guide/paper-wallet", + { + type: "category", + label: "Hardware Wallets", + items: [ + "wallet-guide/hardware-wallets", + "wallet-guide/hardware-wallets/ledger", + ], + }, + "wallet-guide/file-system-wallet", + "wallet-guide/support", + ], + }, + "cli/conventions", + "cli/choose-a-cluster", + "cli/transfer-tokens", + "cli/delegate-stake", + "cli/deploy-a-program", + "offline-signing", + "offline-signing/durable-nonce", + "cli/usage", + ], + architectureSidebar: [ + { + type: "doc", + label: "What is a Solana Cluster?", + id: "cluster/overview", + }, + { + type: "category", + label: "Clusters", + collapsed: false, + items: [ + "clusters", + { + type: "doc", + label: "RPC Endpoints", + id: "cluster/rpc-endpoints", + }, + "cluster/bench-tps", + "cluster/performance-metrics", + ], + }, + { + type: "category", + label: "Consensus", + collapsed: false, + items: [ + "cluster/synchronization", + "cluster/leader-rotation", + "cluster/fork-generation", + "cluster/managing-forks", + "cluster/turbine-block-propagation", + "cluster/vote-signing", + "cluster/stake-delegation-and-rewards", + ], + }, + { + type: "category", + label: "Validators", + collapsed: false, + items: [ + { + type: "doc", + label: "Overview", + id: "validator/anatomy", + }, + "validator/tpu", + "validator/tvu", + "validator/blockstore", + "validator/gossip", + "validator/runtime", + ], + }, + ], + "Design Proposals": [ + "proposals", + { + type: "category", + label: "Accepted Proposals", + collapsed: true, + items: [ + { + type: "autogenerated", + dirName: "proposals", + }, + ], + }, + { + type: "category", + label: "Implemented Proposals", + collapsed: true, + items: [ + { + type: "autogenerated", + dirName: "implemented-proposals", + }, + ], + }, + ], + stakingSidebar: ["staking", "staking/stake-accounts"], + integratingSidebar: [ + "integrations/exchange", + "integrations/retrying-transactions", + ], + economicsSidebar: [ + { + type: "doc", + id: "economics_overview", + // label: "How do the economics work?", + }, + { + type: "category", + label: "Inflation Design", + items: [ + "inflation/terminology", + "inflation/inflation_schedule", + "inflation/adjusted_staking_yield", + ], + }, + "transaction_fees", + "storage_rent_economics", + ], }; diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css index 39cf391344..4a3ff08fd4 100644 --- a/docs/src/css/custom.css +++ b/docs/src/css/custom.css @@ -7,7 +7,7 @@ /* You can override the default Infima variables here. */ -@import url('https://fonts.googleapis.com/css2?family=Roboto'); +@import url("https://fonts.googleapis.com/css2?family=Roboto"); :root { --ifm-color-primary: #25c2a0; @@ -19,15 +19,18 @@ --ifm-color-primary-lightest: #abd5c6; --ifm-code-font-size: 95%; --ifm-spacing-horizontal: 1em; - --ifm-font-family-base: "Roboto", system-ui, -apple-system, Segoe UI, Roboto, Ubuntu, Cantarell, Noto Sans, sans-serif, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + --ifm-font-family-base: "Roboto", system-ui, -apple-system, Segoe UI, Roboto, + Ubuntu, Cantarell, Noto Sans, sans-serif, BlinkMacSystemFont, "Segoe UI", + Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", + "Segoe UI Symbol"; --ifm-footer-background-color: #232323; } - - - @keyframes fadeInUp { - 0% { opacity: 0; transform: translateY(1.5rem); } + 0% { + opacity: 0; + transform: translateY(1.5rem); + } } main { @@ -48,10 +51,9 @@ main { animation-delay: 150ms; transition-property: all; transition-duration: 200ms; - box-shadow: 0 8px 28px 4px rgba(86,91,115,0.15); + box-shadow: 0 8px 28px 4px rgba(86, 91, 115, 0.15); } - .card a { text-decoration: none; } @@ -69,5 +71,32 @@ footer .text--center { } .card__header h3 { - color: #1DD79B; -} \ No newline at end of file + color: #1dd79b; +} + +.header-link-icon:before { + content: ""; + display: flex; + height: 24px; + width: 24px; + background-position: center center; +} +.header-link-icon { + padding: 0.4em !important; +} +[data-theme="dark"] .header-github-link:before { + background: url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='%23fff' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") + no-repeat; +} +.header-github-link:before { + background: url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E") + no-repeat; +} +[data-theme="dark"] .header-discord-link:before { + background: url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 640 512' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='%23fff' d='M524.531,69.836a1.5,1.5,0,0,0-.764-.7A485.065,485.065,0,0,0,404.081,32.03a1.816,1.816,0,0,0-1.923.91,337.461,337.461,0,0,0-14.9,30.6,447.848,447.848,0,0,0-134.426,0,309.541,309.541,0,0,0-15.135-30.6,1.89,1.89,0,0,0-1.924-.91A483.689,483.689,0,0,0,116.085,69.137a1.712,1.712,0,0,0-.788.676C39.068,183.651,18.186,294.69,28.43,404.354a2.016,2.016,0,0,0,.765,1.375A487.666,487.666,0,0,0,176.02,479.918a1.9,1.9,0,0,0,2.063-.676A348.2,348.2,0,0,0,208.12,430.4a1.86,1.86,0,0,0-1.019-2.588,321.173,321.173,0,0,1-45.868-21.853,1.885,1.885,0,0,1-.185-3.126c3.082-2.309,6.166-4.711,9.109-7.137a1.819,1.819,0,0,1,1.9-.256c96.229,43.917,200.41,43.917,295.5,0a1.812,1.812,0,0,1,1.924.233c2.944,2.426,6.027,4.851,9.132,7.16a1.884,1.884,0,0,1-.162,3.126,301.407,301.407,0,0,1-45.89,21.83,1.875,1.875,0,0,0-1,2.611,391.055,391.055,0,0,0,30.014,48.815,1.864,1.864,0,0,0,2.063.7A486.048,486.048,0,0,0,610.7,405.729a1.882,1.882,0,0,0,.765-1.352C623.729,277.594,590.933,167.465,524.531,69.836ZM222.491,337.58c-28.972,0-52.844-26.587-52.844-59.239S193.056,219.1,222.491,219.1c29.665,0,53.306,26.82,52.843,59.239C275.334,310.993,251.924,337.58,222.491,337.58Zm195.38,0c-28.971,0-52.843-26.587-52.843-59.239S388.437,219.1,417.871,219.1c29.667,0,53.307,26.82,52.844,59.239C470.715,310.993,447.538,337.58,417.871,337.58Z'/%3E%3C/svg%3E") + no-repeat center; +} +.header-discord-link:before { + background: url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 640 512' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M524.531,69.836a1.5,1.5,0,0,0-.764-.7A485.065,485.065,0,0,0,404.081,32.03a1.816,1.816,0,0,0-1.923.91,337.461,337.461,0,0,0-14.9,30.6,447.848,447.848,0,0,0-134.426,0,309.541,309.541,0,0,0-15.135-30.6,1.89,1.89,0,0,0-1.924-.91A483.689,483.689,0,0,0,116.085,69.137a1.712,1.712,0,0,0-.788.676C39.068,183.651,18.186,294.69,28.43,404.354a2.016,2.016,0,0,0,.765,1.375A487.666,487.666,0,0,0,176.02,479.918a1.9,1.9,0,0,0,2.063-.676A348.2,348.2,0,0,0,208.12,430.4a1.86,1.86,0,0,0-1.019-2.588,321.173,321.173,0,0,1-45.868-21.853,1.885,1.885,0,0,1-.185-3.126c3.082-2.309,6.166-4.711,9.109-7.137a1.819,1.819,0,0,1,1.9-.256c96.229,43.917,200.41,43.917,295.5,0a1.812,1.812,0,0,1,1.924.233c2.944,2.426,6.027,4.851,9.132,7.16a1.884,1.884,0,0,1-.162,3.126,301.407,301.407,0,0,1-45.89,21.83,1.875,1.875,0,0,0-1,2.611,391.055,391.055,0,0,0,30.014,48.815,1.864,1.864,0,0,0,2.063.7A486.048,486.048,0,0,0,610.7,405.729a1.882,1.882,0,0,0,.765-1.352C623.729,277.594,590.933,167.465,524.531,69.836ZM222.491,337.58c-28.972,0-52.844-26.587-52.844-59.239S193.056,219.1,222.491,219.1c29.665,0,53.306,26.82,52.843,59.239C275.334,310.993,251.924,337.58,222.491,337.58Zm195.38,0c-28.971,0-52.843-26.587-52.843-59.239S388.437,219.1,417.871,219.1c29.667,0,53.307,26.82,52.844,59.239C470.715,310.993,447.538,337.58,417.871,337.58Z'/%3E%3C/svg%3E") + no-repeat center; +} diff --git a/docs/src/developing/clients/jsonrpc-api.md b/docs/src/developing/clients/jsonrpc-api.md index f8b3cf474e..7f91201f2d 100644 --- a/docs/src/developing/clients/jsonrpc-api.md +++ b/docs/src/developing/clients/jsonrpc-api.md @@ -205,6 +205,28 @@ Many methods that take a commitment parameter return an RpcResponse JSON object - `context` : An RpcResponseContext JSON structure including a `slot` field at which the operation was evaluated. - `value` : The value returned by the operation itself. +#### Parsed Responses + +Some methods support an `encoding` parameter, and can return account or +instruction data in parsed JSON format if `"encoding":"jsonParsed"` is requested +and the node has a parser for the owning program. Solana nodes currently support +JSON parsing for the following native and SPL programs: + +| Program | Account State | Instructions | +| --- | --- | --- | +| Address Lookup | v1.12.0 | | +| BPF Loader | n/a | stable | +| BPF Upgradeable Loader | stable | stable | +| Config | stable | | +| SPL Associated Token Account | n/a | stable | +| SPL Memo | n/a | stable | +| SPL Token | stable | stable | +| SPL Token 2022 | stable | stable | +| Stake | stable | stable | +| Vote | stable | stable | + +The list of account parsers can be found [here](https://github.com/solana-labs/solana/blob/master/account-decoder/src/parse_account_data.rs), and instruction parsers [here](https://github.com/solana-labs/solana/blob/master/transaction-status/src/parse_instruction.rs). + ## Health Check Although not a JSON RPC API, a `GET /health` at the RPC HTTP Endpoint provides a @@ -233,7 +255,7 @@ Returns all information associated with the account of provided Pubkey "base58" is limited to Account data of less than 129 bytes. "base64" will return base64 encoded data for Account data of any size. "base64+zstd" compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. - (optional) `dataSlice: ` - limit the returned account data using the provided `offset: ` and `length: ` fields; only available for "base58", "base64" or "base64+zstd" encodings. - (optional) `minContextSlot: ` - set the minimum slot that the request can be evaluated at. @@ -388,7 +410,7 @@ Returns identity and transaction information about a confirmed block in the ledg - `` - slot, as u64 integer - `` - (optional) Configuration object containing the following optional fields: - (optional) `encoding: ` - encoding for each returned Transaction, either "json", "jsonParsed", "base58" (_slow_), "base64". If parameter not provided, the default encoding is "json". - "jsonParsed" encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - (optional) `transactionDetails: ` - level of transaction detail to return, either "full", "signatures", or "none". If parameter not provided, the default detail level is "full". - (optional) `rewards: bool` - whether to populate the `rewards` array. If parameter not provided, the default includes rewards. - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized". @@ -1753,7 +1775,7 @@ Returns the account information for a list of Pubkeys. "base58" is limited to Account data of less than 129 bytes. "base64" will return base64 encoded data for Account data of any size. "base64+zstd" compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. - (optional) `dataSlice: ` - limit the returned account data using the provided `offset: ` and `length: ` fields; only available for "base58", "base64" or "base64+zstd" encodings. - (optional) `minContextSlot: ` - set the minimum slot that the request can be evaluated at. @@ -1896,7 +1918,7 @@ Returns all accounts owned by the provided program Pubkey "base58" is limited to Account data of less than 129 bytes. "base64" will return base64 encoded data for Account data of any size. "base64+zstd" compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. - (optional) `dataSlice: ` - limit the returned account data using the provided `offset: ` and `length: ` fields; only available for "base58", "base64" or "base64+zstd" encodings. - (optional) `filters: ` - filter results using up to 4 [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results - (optional) `withContext: bool` - wrap the result in an RpcResponse JSON object. @@ -2612,7 +2634,7 @@ Returns all SPL Token accounts by approved Delegate. "base58" is limited to Account data of less than 129 bytes. "base64" will return base64 encoded data for Account data of any size. "base64+zstd" compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. - (optional) `dataSlice: ` - limit the returned account data using the provided `offset: ` and `length: ` fields; only available for "base58", "base64" or "base64+zstd" encodings. - (optional) `minContextSlot: ` - set the minimum slot that the request can be evaluated at. @@ -2718,7 +2740,7 @@ Returns all SPL Token accounts by token owner. "base58" is limited to Account data of less than 129 bytes. "base64" will return base64 encoded data for Account data of any size. "base64+zstd" compresses the Account data using [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to "base64" encoding, detectable when the `data` field is type ``. - (optional) `dataSlice: ` - limit the returned account data using the provided `offset: ` and `length: ` fields; only available for "base58", "base64" or "base64+zstd" encodings. - (optional) `minContextSlot: ` - set the minimum slot that the request can be evaluated at. @@ -2921,7 +2943,7 @@ Returns transaction details for a confirmed transaction - `` - transaction signature as base-58 encoded string - `` - (optional) Configuration object containing the following optional fields: - (optional) `encoding: ` - encoding for each returned Transaction, either "json", "jsonParsed", "base58" (_slow_), "base64". If parameter not provided, the default encoding is "json". - "jsonParsed" encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized". - (optional) `maxSupportedTransactionVersion: ` - set the max transaction version to return in responses. If the requested transaction is a higher version, an error will be returned. If this parameter is omitted, only legacy transactions will be returned, and any versioned transaction will prompt the error. @@ -3446,7 +3468,7 @@ Simulate sending a transaction (default: false, conflicts with `sigVerify`) - `accounts: ` - (optional) Accounts configuration object containing the following fields: - `encoding: ` - (optional) encoding for returned Account data, either "base64" (default), "base64+zstd" or "jsonParsed". - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type ``. - `addresses: ` - An array of accounts to return, as base-58 encoded strings - (optional) `minContextSlot: ` - set the minimum slot that the request can be evaluated at. @@ -3538,7 +3560,7 @@ Subscribe to an account to receive notifications when the lamports or data for a - `` - (optional) Configuration object containing the following optional fields: - `` - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) - `encoding: ` - encoding for Account data, either "base58" (_slow_), "base64", "base64+zstd" or "jsonParsed". - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to binary encoding, detectable when the `data` field is type ``. #### Results: @@ -3689,7 +3711,7 @@ Subscribe to receive notification anytime a new block is Confirmed or Finalized. - `` - (optional) Configuration object containing the following optional fields: - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) - (optional) `encoding: ` - encoding for Account data, either "base58" (_slow_), "base64", "base64+zstd" or "jsonParsed". - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type ``. Default is "base64". + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type ``. Default is "base64". - (optional) `transactionDetails: ` - level of transaction detail to return, either "full", "signatures", or "none". If parameter not provided, the default detail level is "full". - (optional) `showRewards: bool` - whether to populate the `rewards` array. If parameter not provided, the default includes rewards. @@ -4083,7 +4105,7 @@ Subscribe to a program to receive notifications when the lamports or data for a - `` - (optional) Configuration object containing the following optional fields: - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) - `encoding: ` - encoding for Account data, either "base58" (_slow_), "base64", "base64+zstd" or "jsonParsed". - "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type ``. + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type ``. - (optional) `filters: ` - filter results using various [filter objects](jsonrpc-api.md#filters); account must meet all filter criteria to be included in results #### Results: @@ -4672,7 +4694,7 @@ Returns identity and transaction information about a confirmed block in the ledg - `` - slot, as u64 integer - `` - (optional) Configuration object containing the following optional fields: - (optional) `encoding: ` - encoding for each returned Transaction, either "json", "jsonParsed", "base58" (_slow_), "base64". If parameter not provided, the default encoding is "json". - "jsonParsed" encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - (optional) `transactionDetails: ` - level of transaction detail to return, either "full", "signatures", or "none". If parameter not provided, the default detail level is "full". - (optional) `rewards: bool` - whether to populate the `rewards` array. If parameter not provided, the default includes rewards. - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized". @@ -4980,7 +5002,7 @@ Returns transaction details for a confirmed transaction - `` - transaction signature as base-58 encoded string - `` - (optional) Configuration object containing the following optional fields: - (optional) `encoding: ` - encoding for each returned Transaction, either "json", "jsonParsed", "base58" (_slow_), "base64". If parameter not provided, the default encoding is "json". - "jsonParsed" encoding attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). + ["jsonParsed" encoding](jsonrpc-api.md#parsed-responses) attempts to use program-specific instruction parsers to return more human-readable and explicit data in the `transaction.message.instructions` list. If "jsonParsed" is requested but a parser cannot be found, the instruction falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` fields). - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment); "processed" is not supported. If parameter not provided, the default is "finalized". #### Results: diff --git a/docs/src/developing/on-chain-programs/debugging.md b/docs/src/developing/on-chain-programs/debugging.md index a9ded2bd31..d673ab23a3 100644 --- a/docs/src/developing/on-chain-programs/debugging.md +++ b/docs/src/developing/on-chain-programs/debugging.md @@ -1,5 +1,5 @@ --- -title: "Debugging" +title: "Debugging Programs" --- Solana programs run on-chain, so debugging them in the wild can be challenging. diff --git a/docs/src/developing/on-chain-programs/deploying.md b/docs/src/developing/on-chain-programs/deploying.md index 767d6d114b..852e8a195f 100644 --- a/docs/src/developing/on-chain-programs/deploying.md +++ b/docs/src/developing/on-chain-programs/deploying.md @@ -1,5 +1,5 @@ --- -title: "Deploying" +title: "Deploying Programs" --- ![SDK tools](/img/sdk-tools.svg) @@ -11,7 +11,7 @@ clients via a _program ID_. The program ID is an _address_ specified when deploying and is used to reference the program in subsequent transactions. Upon a successful deployment the account that holds the program is marked -executable. If the program is marked "final", its account data become permanently +executable. If the program is marked "final", its account data become permanently immutable. If any changes are required to the finalized program (features, patches, etc...) the new program must be deployed to a new program ID. diff --git a/docs/src/developing/on-chain-programs/examples.md b/docs/src/developing/on-chain-programs/examples.md index d1d6adb4b0..ef07c7b8b4 100644 --- a/docs/src/developing/on-chain-programs/examples.md +++ b/docs/src/developing/on-chain-programs/examples.md @@ -1,5 +1,5 @@ --- -title: "Examples" +title: "Program Examples" --- ## Helloworld diff --git a/docs/src/developing/programming-model/runtime.md b/docs/src/developing/programming-model/runtime.md index f0d4025088..ac8284b723 100644 --- a/docs/src/developing/programming-model/runtime.md +++ b/docs/src/developing/programming-model/runtime.md @@ -49,7 +49,9 @@ To prevent abuse of computational resources, each transaction is allocated a compute budget. The budget specifies a maximum number of compute units that a transaction can consume, the costs associated with different types of operations the transaction may perform, and operational bounds the transaction must adhere -to. As the transaction is processed compute units are consumed by its +to. + +As the transaction is processed compute units are consumed by its instruction's programs performing operations such as executing BPF instructions, calling syscalls, etc... When the transaction consumes its entire budget, or exceeds a bound such as attempting a call stack that is too deep, the runtime @@ -71,11 +73,11 @@ budget, or exceeds a bound, the entire invocation chain and the top level transaction processing are halted. The current [compute -budget](https://github.com/solana-labs/solana/blob/090e11210aa7222d8295610a6ccac4acda711bb9/program-runtime/src/compute_budget.rs#L26-L87) +budget](https://github.com/solana-labs/solana/blob/090e11210aa7222d8295610a6ccac4acda711bb9/program-runtime/src/compute_budget.rs#L26-L87) can be found in the Solana Program Runtime. -can be found in the Solana Program Runtime. +#### Example Compute Budget -For example, if the current budget is: +For example, if the compute budget set in the Solana runtime is: ```rust max_units: 1,400,000, @@ -89,21 +91,23 @@ log_pubkey_units: 100, ... ``` -Then the transaction +Then any transaction: - Could execute 1,400,000 BPF instructions, if it did nothing else. - Cannot exceed 4k of stack usage. - Cannot exceed a BPF call depth of 64. - Cannot exceed 4 levels of cross-program invocations. -Since the compute budget is consumed incrementally as the transaction executes, -the total budget consumption will be a combination of the various costs of the -operations it performs. +> **NOTE:** Since the compute budget is consumed incrementally as the transaction executes, +> the total budget consumption will be a combination of the various costs of the +> operations it performs. At runtime a program may log how much of the compute budget remains. See [debugging](developing/on-chain-programs/debugging.md#monitoring-compute-budget-consumption) for more information. +### Prioritization fees + A transaction may set the maximum number of compute units it is allowed to consume and the compute unit price by including a `SetComputeUnitLimit` and a `SetComputeUnitPrice` @@ -112,20 +116,19 @@ respectively. If no `SetComputeUnitLimit` is provided the limit will be calculated as the product of the number of instructions in the transaction (excluding the [Compute -budget -instructions](https://github.com/solana-labs/solana/blob/db32549c00a1b5370fcaf128981ad3323bbd9570/sdk/src/compute_budget.rs#L22)) -and the default per-instruction units, which is currently 200k. - -Note that a transaction's prioritization fee is calculated by multiplying the -number of compute units by the compute unit price (measured in micro-lamports) -set by the transaction via compute budget instructions. So transactions should -request the minimum amount of compute units required for execution to minimize +budget instructions](https://github.com/solana-labs/solana/blob/db32549c00a1b5370fcaf128981ad3323bbd9570/sdk/src/compute_budget.rs#L22)) and the default per-instruction units, which is currently 200k. + +> **NOTE:** A transaction's [prioritization fee](./../../terminology.md#prioritization-fee) is calculated by multiplying the +> number of _compute units_ by the _compute unit price_ (measured in micro-lamports) +> set by the transaction via compute budget instructions. + +Transactions should request the minimum amount of compute units required for execution to minimize fees. Also note that fees are not adjusted when the number of requested compute units exceeds the number of compute units actually consumed by an executed transaction. Compute Budget instructions don't require any accounts and don't consume any -compute units to process. Transactions can only contain one of each type of +compute units to process. Transactions can only contain one of each type of compute budget instruction, duplicate types will result in an error. The `ComputeBudgetInstruction::set_compute_unit_limit` and diff --git a/docs/src/implemented-proposals/implemented-proposals.md b/docs/src/implemented-proposals/implemented-proposals.md index 52ecd71ee3..5dd1bc16fe 100644 --- a/docs/src/implemented-proposals/implemented-proposals.md +++ b/docs/src/implemented-proposals/implemented-proposals.md @@ -1,9 +1,15 @@ --- title: Implemented Design Proposals +sidebar_position: 1 +sidebar_label: Overview --- -The following architectural proposals have been accepted and implemented -by the Solana team. Any designs that may be subject to future change are noted -in the specific proposal page. -Design proposals that have been accepted but not yet implemented are found in -[Accepted Proposals](../proposals/accepted-design-proposals.md). +These architectural proposals have been accepted and implemented by the Solana maintainers. Any designs that may be subject to future change are noted in the specific proposal page. + +## Not Yet Implemented + +Design proposals that have been accepted but not yet implemented are found in [Accepted Proposals](../proposals/accepted-design-proposals.md). + +## Submit a New Proposal + +To submit a new design proposal, consult this guide on [how to submit a design proposal](../proposals.md#submit-a-design-proposal). diff --git a/docs/src/pages/index.js b/docs/src/pages/index.js index ebf7b60a7a..4c6d4b98f8 100644 --- a/docs/src/pages/index.js +++ b/docs/src/pages/index.js @@ -26,7 +26,7 @@ function Home() {

- ⛏ Start Building + ⛏ Start Developing

@@ -34,7 +34,7 @@ function Home() {

Get started building your decentralized app or - marketplace. + marketplace with Solana.

@@ -129,7 +129,7 @@ function Home() {
- +

diff --git a/docs/src/proposals.md b/docs/src/proposals.md new file mode 100644 index 0000000000..d2a6e9a325 --- /dev/null +++ b/docs/src/proposals.md @@ -0,0 +1,49 @@ +--- +title: System Design Proposals +--- + +Changes to the Solana architecture are performed through a public proposal process (via pull requests) on the [Solana GitHub repository](https://github.com/solana-labs/solana). New proposals should be submitted with the "[Submit a Design Proposal](#submit-a-design-proposal)" guide below. + +There are currently two different states of these design proposals: + +1. [Accepted Proposals](./proposals/accepted-design-proposals.md) +2. [Implemented Proposals](./implemented-proposals/implemented-proposals.md) + +## Accepted Proposals + +These architectural proposals have been accepted by the Solana team, but are not yet fully implemented. + +Each proposal may be implemented as described, implemented differently as issues in the designs become evident, or not implemented at all. If implemented, the proposal will be moved to [Implemented Proposals](./implemented-proposals/implemented-proposals.md) and the details will be added to relevant sections of the docs. + +## Implemented Proposals + +These architectural proposals have been accepted and implemented by the Solana team. + +Any designs that may be subject to future change are noted in their specific proposal page. + +## Submit a Design Proposal + +To submit a new design proposal for Solana: + +1. Propose a design by creating a PR that adds a markdown document to the `docs/src/proposals` directory. +2. Add any relevant Solana maintainers to the PR review. +3. Publish the PR for community review and feedback. + +> **NOTE:** All people submitting PRs to the Solana repo should consult the [CONTRIBUTING](https://github.com/solana-labs/solana/blob/master/CONTRIBUTING.md) doc in the repo. + +### After Accepted + +Once a design proposal has been accepted, the PR will be merged into the `master` branch of the Solana repo. This also signifies the maintainers support your plan of attack. + +> **NOTE:** The merging of the PR will **automatically** create a link in the "Accepted Proposals" table of contents sidebar. +> Once approved, continue to submit PRs that implement the proposal. When the implementation reveals the need for tweaks to the proposal, be sure to update the "accepted proposal" document and have these change reviewed by the same approving maintainers. + +### After Implemented + +After a proposal has been fully implemented into the Solana architecture, a PR should be created to perform the following: + +1. Move the newly implemented proposal file from `docs/src/proposals` to `docs/src/implemented-proposals` +2. Create a new redirect in the `publish-docs.sh` to redirect the old `accepted` proposal page to the new `implemented-proposal` page +3. Publish the PR + +> **NOTE:** Moving the proposal document into the `implemented-proposals` directory will **automatically** move the link in the "Accepted Proposals" table of contents sidebar to the "Implemented Proposals" sidebar. diff --git a/docs/src/proposals/accepted-design-proposals.md b/docs/src/proposals/accepted-design-proposals.md index 98f197da4e..e2145d26b6 100644 --- a/docs/src/proposals/accepted-design-proposals.md +++ b/docs/src/proposals/accepted-design-proposals.md @@ -1,11 +1,15 @@ --- title: Accepted Design Proposals +sidebar_position: 1 +sidebar_label: Overview --- -The following architectural proposals have been accepted by the Solana team, -but are not yet fully implemented. -The proposals may be implemented as described, implemented differently as -issues in the designs become evident, or not implemented at all. -If implemented, the proposal will be moved to -[Implemented Proposals](../implemented-proposals/implemented-proposals.md) -and the details will be added to relevant sections of the docs. +These architectural proposals have been accepted by the Solana maintainers, but are not yet fully implemented. These proposals may be implemented as described, implemented differently as issues in the designs become evident, or not implemented at all. + +## After Implemented + +Once a proposal has been implemented, it will be moved to [Implemented Proposals](../implemented-proposals/implemented-proposals.md) and the details will be added to relevant sections of the docs. + +## Submit a New Proposal + +To submit a new design proposal, consult this guide on [how to submit a design proposal](../proposals.md#submit-a-design-proposal). diff --git a/docs/src/proposals/return-data.md b/docs/src/proposals/return-data.md index 3d37ee6fca..d62d4b9333 100644 --- a/docs/src/proposals/return-data.md +++ b/docs/src/proposals/return-data.md @@ -2,7 +2,7 @@ ## Problem -In the Solidity langauge it is permitted to return any number of values from a function, +In the Solidity language it is permitted to return any number of values from a function, for example a variable length string can be returned: ``` diff --git a/docs/src/terminology.md b/docs/src/terminology.md index 2c22efb2bb..038aa2d302 100644 --- a/docs/src/terminology.md +++ b/docs/src/terminology.md @@ -1,8 +1,10 @@ --- title: Terminology +description: "Learn the essential terminology used thoughtout the Solana blockchain and development models." +keywords: "terms, dictionary, definitions, define, programming models" --- -The following terms are used throughout the documentation. +The following terms are used throughout the Solana documentation and development ecosystem. ## account @@ -12,9 +14,9 @@ Like an account at a traditional bank, a Solana account may hold funds called [l The key may be one of: -* an ed25519 public key -* a program-derived account address (32byte value forced off the ed25519 curve) -* a hash of an ed25519 public key with a 32 character string +- an ed25519 public key +- a program-derived account address (32byte value forced off the ed25519 curve) +- a hash of an ed25519 public key with a 32 character string ## account owner @@ -34,7 +36,7 @@ A contiguous set of [entries](#entry) on the ledger covered by a [vote](#ledger- ## blockhash -A unique value ([hash](#hash)) that identifies a record (block). Solana computes a blockhash from the last [entry id](#entry-id) of the block. +A unique value ([hash](#hash)) that identifies a record (block). Solana computes a blockhash from the last [entry id](#entry-id) of the block. ## block height @@ -56,6 +58,14 @@ A computer program that accesses the Solana server network [cluster](#cluster). A set of [validators](#validator) maintaining a single [ledger](#ledger). +## compute budget + +The maximum number of [compute units](#compute-units) consumed per transaction. + +## compute units + +The smallest unit of measure for consumption of computational resources of the blockchain. + ## confirmation time The wallclock duration between a [leader](#leader) creating a [tick entry](#tick) and creating a [confirmed block](#confirmed-block). @@ -179,6 +189,12 @@ A [program](#program) with the ability to interpret the binary encoding of other The duration of time for which a [validator](#validator) is unable to [vote](#ledger-vote) on another [fork](#fork). +## message + +The structured contents of a [transaction](#transaction). Generally containing a header, array of account addresses, recent [blockhash](#blockhash), and an array of [instructions](#instruction). + +Learn more about the [message formatting inside of transactions](./developing/programming-model/transactions.md#message-format) here. + ## native token The [token](#token) used to track work done by [nodes](#node) in a cluster. @@ -221,7 +237,7 @@ A stack of proofs, each of which proves that some data existed before the proof ## prioritization fee -An additional fee user can specify in compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). +An additional fee user can specify in the compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). The prioritization fee is calculated by multiplying the requested maximum compute units by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. @@ -287,7 +303,7 @@ Tokens forfeit to the [cluster](#cluster) if malicious [validator](#validator) b ## sysvar -A system [account](#account). [Sysvars](developing/runtime-facilities/sysvars.md) provide cluster state information such as current tick height, rewards [points](#point) values, etc. Programs can access Sysvars via a Sysvar account (pubkey) or by querying via a syscall. +A system [account](#account). [Sysvars](developing/runtime-facilities/sysvars.md) provide cluster state information such as current tick height, rewards [points](#point) values, etc. Programs can access Sysvars via a Sysvar account (pubkey) or by querying via a syscall. ## thin client @@ -327,7 +343,7 @@ A set of [transactions](#transaction) that may be executed in parallel. ## validator -A full participant in a Solana network [cluster](#cluster) that produces new [blocks](#block). A validator validates the transactions added to the [ledger](#ledger) +A full participant in a Solana network [cluster](#cluster) that produces new [blocks](#block). A validator validates the transactions added to the [ledger](#ledger) ## VDF diff --git a/docs/src/transaction_fees.md b/docs/src/transaction_fees.md index c28cb32543..ee9fdfa43e 100644 --- a/docs/src/transaction_fees.md +++ b/docs/src/transaction_fees.md @@ -1,21 +1,70 @@ --- title: Transaction Fees +description: "Transaction fees are the small fees paid to process instructions on the network. These fees are based on computation and an optional prioritization fee." +keywords: "instruction fee, processing fee, storage fee, low fee blockchain, gas, gwei, cheap network, affordable blockchain" --- -**Subject to change.** +The small fees paid to process [instructions](./terminology.md#instruction) on the Solana blockchain are known as "_transaction fees_". -Each transaction sent through the network, to be processed by the current leader validation-client and confirmed as a global state transaction, contains a transaction fee. Transaction fees offer many benefits in the Solana economic design, for example they: +As each transaction (which contains one or more instructions) is sent through the network, it gets processed by the current leader validation-client. Once confirmed as a global state transaction, this _transaction fee_ is paid to the network to help support the [economic design](#economic-design) of the Solana blockchain. -- provide unit compensation to the validator network for the CPU/GPU resources necessary to process the state transaction, +> **NOTE:** Transaction fees are different from [account rent](./terminology.md#rent)! +> While transaction fees are paid to process instructions on the Solana network, rent is paid to store data on the blockchain. + + + +## Why pay transaction fees? + +Transaction fees offer many benefits in the Solana [economic design](#basic-economic-design) described below. Mainly: + +- they provide compensation to the validator network for the CPU/GPU resources necessary to process transactions, - reduce network spam by introducing real cost to transactions, -- and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below. +- and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction + +> **NOTE:** Network consensus votes are sent as normal system transfers, which means that validators pay transaction fees to participate in consensus. + +## Basic economic design + +Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on _protocol-based rewards_ to support the economy in the short term. And when the protocol derived rewards expire, predict that the revenue generated through _transaction fees_ will support the economy in the long term. + +In an attempt to create a sustainable economy on Solana through _protocol-based rewards_ and _transaction fees_: + +- a fixed portion (initially 50%) of each transaction fee is _burned_ (aka destroyed), +- with the remaining fee going to the current [leader](./terminology.md#leader) processing the transaction. + +A scheduled global inflation rate provides a source for [rewards](./implemented-proposals/staking-rewards.md) distributed to [Solana Validators](../src/running-validator.md). + +### Why burn some fees? + +As mentioned above, a fixed proportion of each transaction fee is _burned_ (aka destroyed). The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time. While still providing an inflation limiting mechanism that protects against "tax evasion" attacks \(i.e. side-channel fee payments\). + +Burnt fees can also help prevent malicious validators from censoring transactions by being considered in [fork](./terminology.md#fork) selection. + +#### Example of an attack: + +In the case of a [Proof of History (PoH)](./terminology.md#proof-of-history-poh) fork with a malicious, censoring leader: + +- due to the fees lost from censoring, we would expect the total fees destroyed to be **_less than_** a comparable honest fork +- if the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves +- thus potentially reducing the incentive to censor in the first place + +## Calculating transaction fees + +Transactions fees are calculated based on two main parts: + +- a statically set base fee per signature, and +- the computational resources used during the transaction, measured in "[_compute units_](./terminology.md#compute-units)" + +Since each transaction may require a different amount of computational resources, they are alloted a maximum number of _compute units_ per transaction known as the "[_compute budget_](./terminology.md#compute-budget)". + +The execution of each instruction within a transactions consumes a different number of _compute units_. After the maximum number of _computer units_ has been consumed (aka compute budget exhaustion), the runtime will halt the transaction and return an error. Resulting in a failed transaction. -Network consensus votes are sent as normal system transfers, which means that validators pay transaction fees to participate in consensus. +> **Learn more:** compute units and the [Compute Budget](./developing/programming-model/runtime#compute-budget) in the Runtime and [requesting a fee estimate](./developing/clients/jsonrpc-api.md#getfeeformessage) from the RPC. -Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion (initially 50%) of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above. +## Prioritization fee -Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](implemented-proposals/transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical _signatures-per-slot_. In this way, the protocol can use the minimum fee to target a desired hardware utilization. By monitoring a protocol specified _signatures-per-slot_ with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual _signature-per-slot_ per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level. +Recently, Solana has introduced an optional fee called the "_[prioritization fee](./terminology.md#prioritization-fee)_". This additional fee can be paid to help boost how a transaction is prioritized against others, resulting in faster transaction execution times. -As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechanism that protects against "tax evasion" attacks \(i.e. side-channel fee payments\). +The prioritization fee is calculated by multiplying the requested maximum _compute units_ by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. -Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place. +You can read more about the [compute budget instruction](./developing/programming-model/runtime.md#compute-budget) here. diff --git a/dos/Cargo.toml b/dos/Cargo.toml index cff134d343..edbeea1dae 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -12,11 +12,11 @@ description = "Tool to send various requests to cluster in order to evaluate the [dependencies] bincode = "1.3.3" clap = { version = "3.1.5", features = ["derive", "cargo"] } -crossbeam-channel = "0.5.4" +crossbeam-channel = "0.5.6" itertools = "0.10.3" log = "0.4.17" rand = "0.7.0" -serde = "1.0.138" +serde = "1.0.143" solana-bench-tps = { path = "../bench-tps", version = "=1.12.0" } solana-client = { path = "../client", version = "=1.12.0" } solana-core = { path = "../core", version = "=1.12.0" } @@ -35,5 +35,5 @@ solana-version = { path = "../version", version = "=1.12.0" } targets = ["x86_64-unknown-linux-gnu"] [dev-dependencies] -serial_test = "0.8.0" +serial_test = "0.9.0" solana-local-cluster = { path = "../local-cluster", version = "=1.12.0" } diff --git a/dos/src/cli.rs b/dos/src/cli.rs index 0fe3890af0..9e82c3f06e 100644 --- a/dos/src/cli.rs +++ b/dos/src/cli.rs @@ -248,6 +248,7 @@ mod tests { "--valid-signatures", "--num-signatures", "8", + "--tpu-use-quic", "--send-batch-size", "1", ]) diff --git a/dos/src/main.rs b/dos/src/main.rs index fa75fe90b7..baadc5c001 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -1185,11 +1185,13 @@ pub mod test { } #[test] + #[ignore] fn test_dos_with_blockhash_and_payer() { run_dos_with_blockhash_and_payer(/*tpu_use_quic*/ false) } #[test] + #[ignore] fn test_dos_with_blockhash_and_payer_and_quic() { run_dos_with_blockhash_and_payer(/*tpu_use_quic*/ true) } diff --git a/entry/Cargo.toml b/entry/Cargo.toml index 500cbacf58..f633874f46 100644 --- a/entry/Cargo.toml +++ b/entry/Cargo.toml @@ -18,7 +18,7 @@ lazy_static = "1.4.0" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -serde = "1.0.138" +serde = "1.0.143" solana-measure = { path = "../measure", version = "=1.12.0" } solana-merkle-tree = { path = "../merkle-tree", version = "=1.12.0" } solana-metrics = { path = "../metrics", version = "=1.12.0" } diff --git a/entry/src/entry.rs b/entry/src/entry.rs index 8aba53f072..ff72d35eb1 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -46,7 +46,7 @@ use { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_max_thread_count()) - .thread_name(|ix| format!("entry_{}", ix)) + .thread_name(|ix| format!("solEntry{:02}", ix)) .build() .unwrap(); } @@ -525,21 +525,24 @@ pub fn start_verify_transactions( let tx_offset_recycler = verify_recyclers.tx_offset_recycler; let out_recycler = verify_recyclers.out_recycler; let num_packets = entry_txs.len(); - let gpu_verify_thread = thread::spawn(move || { - let mut verify_time = Measure::start("sigverify"); - sigverify::ed25519_verify( - &mut packet_batches, - &tx_offset_recycler, - &out_recycler, - false, - num_packets, - ); - let verified = packet_batches - .iter() - .all(|batch| batch.iter().all(|p| !p.meta.discard())); - verify_time.stop(); - (verified, verify_time.as_us()) - }); + let gpu_verify_thread = thread::Builder::new() + .name("solGpuSigVerify".into()) + .spawn(move || { + let mut verify_time = Measure::start("sigverify"); + sigverify::ed25519_verify( + &mut packet_batches, + &tx_offset_recycler, + &out_recycler, + false, + num_packets, + ); + let verified = packet_batches + .iter() + .all(|batch| batch.iter().all(|p| !p.meta.discard())); + verify_time.stop(); + (verified, verify_time.as_us()) + }) + .unwrap(); Ok(EntrySigVerificationState { verification_status: EntryVerificationStatus::Pending, entries: Some(entries), @@ -770,25 +773,28 @@ impl EntrySlice for [Entry] { let hashes = Arc::new(Mutex::new(hashes_pinned)); let hashes_clone = hashes.clone(); - let gpu_verify_thread = thread::spawn(move || { - let mut hashes = hashes_clone.lock().unwrap(); - let gpu_wait = Instant::now(); - let res; - unsafe { - res = (api.poh_verify_many)( - hashes.as_mut_ptr() as *mut u8, - num_hashes_vec.as_ptr(), - length, - 1, + let gpu_verify_thread = thread::Builder::new() + .name("solGpuPohVerify".into()) + .spawn(move || { + let mut hashes = hashes_clone.lock().unwrap(); + let gpu_wait = Instant::now(); + let res; + unsafe { + res = (api.poh_verify_many)( + hashes.as_mut_ptr() as *mut u8, + num_hashes_vec.as_ptr(), + length, + 1, + ); + } + assert!(res == 0, "GPU PoH verify many failed"); + inc_new_counter_info!( + "entry_verify-gpu_thread", + timing::duration_as_us(&gpu_wait.elapsed()) as usize ); - } - assert!(res == 0, "GPU PoH verify many failed"); - inc_new_counter_info!( - "entry_verify-gpu_thread", - timing::duration_as_us(&gpu_wait.elapsed()) as usize - ); - timing::duration_as_us(&gpu_wait.elapsed()) - }); + timing::duration_as_us(&gpu_wait.elapsed()) + }) + .unwrap(); let verifications = PAR_THREAD_POOL.install(|| { self.into_par_iter() diff --git a/explorer/package-lock.json b/explorer/package-lock.json index 920066b7df..be5371fc54 100644 --- a/explorer/package-lock.json +++ b/explorer/package-lock.json @@ -14,15 +14,14 @@ "@cloudflare/stream-react": "^1.2.0", "@metamask/jazzicon": "^2.0.0", "@metaplex/js": "4.12.0", - "@popperjs/core": "2.11.0", "@project-serum/anchor": "0.23.0", "@project-serum/serum": "^0.13.61", "@react-hook/debounce": "^4.0.0", "@sentry/react": "^7.6.0", "@solana/buffer-layout": "^3.0.0", "@solana/spl-token-registry": "^0.2.3736", - "@solana/web3.js": "^1.50.0", - "@testing-library/jest-dom": "^5.16.1", + "@solana/web3.js": "^1.53.0", + "@testing-library/jest-dom": "^5.16.5", "@testing-library/react": "^13.3.0", "@testing-library/user-event": "^14.2.3", "@types/bn.js": "^5.1.0", @@ -33,7 +32,7 @@ "@types/jest": "^28.1.4", "@types/node": "^18.0.3", "@types/react": "^18.0.8", - "@types/react-dom": "^18.0.3", + "@types/react-dom": "^18.0.6", "@types/react-router-dom": "^5.3.2", "@types/react-select": "^3.1.2", "@types/socket.io-client": "^3.0.0", @@ -47,12 +46,12 @@ "coingecko-api": "^1.0.10", "cross-fetch": "^3.1.4", "humanize-duration-ts": "^2.1.1", - "prettier": "^2.5.1", + "prettier": "^2.7.1", "react": "^18.1.0", "react-chartjs-2": "^2.11.2", "react-content-loader": "^6.1.0", "react-countup": "^6.1.0", - "react-dom": "^18.1.0", + "react-dom": "^18.2.0", "react-json-view": "^1.21.3", "react-moment": "^1.1.1", "react-router-dom": "^5.3.0", @@ -63,6 +62,11 @@ "typescript": "^4.7.4" } }, + "node_modules/@adobe/css-tools": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.0.1.tgz", + "integrity": "sha512-+u76oB43nOHrF4DDWRLWDCtci7f3QJoEBigemIdIeTi1ODqjx6Tad9NCVnPRwewWlKkVab5PlK8DCtPTyX7S8g==" + }, "node_modules/@babel/code-frame": { "version": "7.10.4", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", @@ -4721,6 +4725,7 @@ "version": "2.11.0", "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.0.tgz", "integrity": "sha512-zrsUxjLOKAzdewIDRWy9nsV1GQsKBCWaGwsZQlCgr6/q+vjyZhFgqedLfFBuI9anTPEUT4APq9Mu0SZBTzIcGQ==", + "peer": true, "funding": { "type": "opencollective", "url": "https://opencollective.com/popperjs" @@ -5171,9 +5176,9 @@ } }, "node_modules/@solana/web3.js": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/@solana/web3.js/-/web3.js-1.50.0.tgz", - "integrity": "sha512-9KCuF3QLHd/dkYlffYPSHPbPFaBKqiwwDqWR+C/z1CZZbQybu+NCydWvwHHqrrlG50wn15L/6zYLb5Tjr6tOkA==", + "version": "1.53.0", + "resolved": "https://registry.npmjs.org/@solana/web3.js/-/web3.js-1.53.0.tgz", + "integrity": "sha512-QyQDA9U5b+AiTo1ANsj9WihWWECeLv6VRpiTE7xPe5hLYANXZYecnlLglNiEzVgRg/jLvR5DrCISXhHx/mAEJw==", "dependencies": { "@babel/runtime": "^7.12.5", "@ethersproject/sha2": "^5.5.0", @@ -5191,7 +5196,7 @@ "rpc-websockets": "^7.5.0", "secp256k1": "^4.0.2", "superstruct": "^0.14.2", - "tweetnacl": "^1.0.0" + "tweetnacl": "^1.0.3" }, "engines": { "node": ">=12.20.0" @@ -5636,15 +5641,15 @@ } }, "node_modules/@testing-library/jest-dom": { - "version": "5.16.1", - "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.16.1.tgz", - "integrity": "sha512-ajUJdfDIuTCadB79ukO+0l8O+QwN0LiSxDaYUTI4LndbbUsGi6rWU1SCexXzBA2NSjlVB9/vbkasQIL3tmPBjw==", + "version": "5.16.5", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.16.5.tgz", + "integrity": "sha512-N5ixQ2qKpi5OLYfwQmUb/5mSV9LneAcaUfp32pn4yCnpb8r/Yz0pXFPck21dIicKmi+ta5WRAknkZCfA8refMA==", "dependencies": { + "@adobe/css-tools": "^4.0.1", "@babel/runtime": "^7.9.2", "@types/testing-library__jest-dom": "^5.9.1", "aria-query": "^5.0.0", "chalk": "^3.0.0", - "css": "^3.0.0", "css.escape": "^1.5.1", "dom-accessibility-api": "^0.5.6", "lodash": "^4.17.15", @@ -6166,9 +6171,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.0.3", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.3.tgz", - "integrity": "sha512-1RRW9kst+67gveJRYPxGmVy8eVJ05O43hg77G2j5m76/RFJtMbcfAs2viQ2UNsvvDg8F7OfQZx8qQcl6ymygaQ==", + "version": "18.0.6", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.6.tgz", + "integrity": "sha512-/5OFZgfIPSwy+YuIBP/FgJnQnsxhZhjjrnxudMddeblOouIodEQ75X14Rr4wGSG/bknL+Omy9iWlLo1u/9GzAA==", "dependencies": { "@types/react": "*" } @@ -9377,16 +9382,6 @@ "node": ">=4" } }, - "node_modules/css": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/css/-/css-3.0.0.tgz", - "integrity": "sha512-DG9pFfwOrzc+hawpmqX/dHYHJG+Bsdb0klhyi1sDneOgGOXy9wQIC8hzyVp1e4NRYDBdxcylvywPkkXCHAzTyQ==", - "dependencies": { - "inherits": "^2.0.4", - "source-map": "^0.6.1", - "source-map-resolve": "^0.6.0" - } - }, "node_modules/css-blank-pseudo": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/css-blank-pseudo/-/css-blank-pseudo-0.1.4.tgz", @@ -21201,14 +21196,17 @@ } }, "node_modules/prettier": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.5.1.tgz", - "integrity": "sha512-vBZcPRUR5MZJwoyi3ZoyQlc1rXeEck8KgeC9AwwOn+exuxLxq5toTRDTSaVrXHxelDMHy9zlicw8u66yxoSUFg==", + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.7.1.tgz", + "integrity": "sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==", "bin": { "prettier": "bin-prettier.js" }, "engines": { "node": ">=10.13.0" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" } }, "node_modules/pretty-bytes": { @@ -21798,15 +21796,15 @@ } }, "node_modules/react-dom": { - "version": "18.1.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.1.0.tgz", - "integrity": "sha512-fU1Txz7Budmvamp7bshe4Zi32d0ll7ect+ccxNu9FlObT605GOEB8BfO4tmRJ39R5Zj831VCpvQ05QPBW5yb+w==", + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", "dependencies": { "loose-envify": "^1.1.0", - "scheduler": "^0.22.0" + "scheduler": "^0.23.0" }, "peerDependencies": { - "react": "^18.1.0" + "react": "^18.2.0" } }, "node_modules/react-error-overlay": { @@ -23110,9 +23108,9 @@ } }, "node_modules/scheduler": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.22.0.tgz", - "integrity": "sha512-6QAm1BgQI88NPYymgGQLCZgvep4FyePDWFpXVK+zNSUgHwlqpJy8VEh8Et0KxTACS4VWwMousBElAZOH9nkkoQ==", + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", "dependencies": { "loose-envify": "^1.1.0" } @@ -23740,15 +23738,6 @@ "node": ">=0.10.0" } }, - "node_modules/source-map-resolve": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.6.0.tgz", - "integrity": "sha512-KXBr9d/fO/bWo97NXsPIAW1bFSBOuCnjbNTBMO7N59hsv5i9yzRDfcYwwt0l04+VqnKC+EwzvJZIP/qkuMgR/w==", - "dependencies": { - "atob": "^2.1.2", - "decode-uri-component": "^0.2.0" - } - }, "node_modules/source-map-support": { "version": "0.5.21", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", @@ -27357,6 +27346,11 @@ } }, "dependencies": { + "@adobe/css-tools": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.0.1.tgz", + "integrity": "sha512-+u76oB43nOHrF4DDWRLWDCtci7f3QJoEBigemIdIeTi1ODqjx6Tad9NCVnPRwewWlKkVab5PlK8DCtPTyX7S8g==" + }, "@babel/code-frame": { "version": "7.10.4", "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", @@ -28888,7 +28882,8 @@ "@cloudflare/stream-react": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@cloudflare/stream-react/-/stream-react-1.2.0.tgz", - "integrity": "sha512-uLsKstcNOrTH19UEFxHWUX6h8SM9Ytr1Qyo6dIObuDUQ40g/cJgNnNnFn4gcGWf3zPwXQN3LkRmVol5MU/rU2Q==" + "integrity": "sha512-uLsKstcNOrTH19UEFxHWUX6h8SM9Ytr1Qyo6dIObuDUQ40g/cJgNnNnFn4gcGWf3zPwXQN3LkRmVol5MU/rU2Q==", + "requires": {} }, "@cnakazawa/watch": { "version": "1.0.4", @@ -30809,7 +30804,8 @@ "@popperjs/core": { "version": "2.11.0", "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.0.tgz", - "integrity": "sha512-zrsUxjLOKAzdewIDRWy9nsV1GQsKBCWaGwsZQlCgr6/q+vjyZhFgqedLfFBuI9anTPEUT4APq9Mu0SZBTzIcGQ==" + "integrity": "sha512-zrsUxjLOKAzdewIDRWy9nsV1GQsKBCWaGwsZQlCgr6/q+vjyZhFgqedLfFBuI9anTPEUT4APq9Mu0SZBTzIcGQ==", + "peer": true }, "@project-serum/anchor": { "version": "0.23.0", @@ -30915,7 +30911,8 @@ "@react-hook/latest": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/@react-hook/latest/-/latest-1.0.3.tgz", - "integrity": "sha512-dy6duzl+JnAZcDbNTfmaP3xHiKtbXYOaz3G51MGVljh548Y8MWzTr+PHLOfvpypEVW9zwvl+VyKjbWKEVbV1Rg==" + "integrity": "sha512-dy6duzl+JnAZcDbNTfmaP3xHiKtbXYOaz3G51MGVljh548Y8MWzTr+PHLOfvpypEVW9zwvl+VyKjbWKEVbV1Rg==", + "requires": {} }, "@rollup/plugin-node-resolve": { "version": "7.1.3", @@ -31141,9 +31138,9 @@ } }, "@solana/web3.js": { - "version": "1.50.0", - "resolved": "https://registry.npmjs.org/@solana/web3.js/-/web3.js-1.50.0.tgz", - "integrity": "sha512-9KCuF3QLHd/dkYlffYPSHPbPFaBKqiwwDqWR+C/z1CZZbQybu+NCydWvwHHqrrlG50wn15L/6zYLb5Tjr6tOkA==", + "version": "1.53.0", + "resolved": "https://registry.npmjs.org/@solana/web3.js/-/web3.js-1.53.0.tgz", + "integrity": "sha512-QyQDA9U5b+AiTo1ANsj9WihWWECeLv6VRpiTE7xPe5hLYANXZYecnlLglNiEzVgRg/jLvR5DrCISXhHx/mAEJw==", "requires": { "@babel/runtime": "^7.12.5", "@ethersproject/sha2": "^5.5.0", @@ -31161,7 +31158,7 @@ "rpc-websockets": "^7.5.0", "secp256k1": "^4.0.2", "superstruct": "^0.14.2", - "tweetnacl": "^1.0.0" + "tweetnacl": "^1.0.3" }, "dependencies": { "@babel/runtime": { @@ -31450,15 +31447,15 @@ } }, "@testing-library/jest-dom": { - "version": "5.16.1", - "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.16.1.tgz", - "integrity": "sha512-ajUJdfDIuTCadB79ukO+0l8O+QwN0LiSxDaYUTI4LndbbUsGi6rWU1SCexXzBA2NSjlVB9/vbkasQIL3tmPBjw==", + "version": "5.16.5", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.16.5.tgz", + "integrity": "sha512-N5ixQ2qKpi5OLYfwQmUb/5mSV9LneAcaUfp32pn4yCnpb8r/Yz0pXFPck21dIicKmi+ta5WRAknkZCfA8refMA==", "requires": { + "@adobe/css-tools": "^4.0.1", "@babel/runtime": "^7.9.2", "@types/testing-library__jest-dom": "^5.9.1", "aria-query": "^5.0.0", "chalk": "^3.0.0", - "css": "^3.0.0", "css.escape": "^1.5.1", "dom-accessibility-api": "^0.5.6", "lodash": "^4.17.15", @@ -31538,7 +31535,8 @@ "@testing-library/user-event": { "version": "14.2.3", "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.2.3.tgz", - "integrity": "sha512-07Ly+NsFDTvjNdvl5bLBA5oHeGIIHCKc7CniGuKnHrjvqcTPVqPEo4z6a8iYydZ0WvDA6ZA0fnhYrqCLbsm0+A==" + "integrity": "sha512-07Ly+NsFDTvjNdvl5bLBA5oHeGIIHCKc7CniGuKnHrjvqcTPVqPEo4z6a8iYydZ0WvDA6ZA0fnhYrqCLbsm0+A==", + "requires": {} }, "@tootallnate/once": { "version": "1.1.2", @@ -31899,9 +31897,9 @@ } }, "@types/react-dom": { - "version": "18.0.3", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.3.tgz", - "integrity": "sha512-1RRW9kst+67gveJRYPxGmVy8eVJ05O43hg77G2j5m76/RFJtMbcfAs2viQ2UNsvvDg8F7OfQZx8qQcl6ymygaQ==", + "version": "18.0.6", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.6.tgz", + "integrity": "sha512-/5OFZgfIPSwy+YuIBP/FgJnQnsxhZhjjrnxudMddeblOouIodEQ75X14Rr4wGSG/bknL+Omy9iWlLo1u/9GzAA==", "requires": { "@types/react": "*" } @@ -32373,7 +32371,8 @@ "acorn-jsx": { "version": "5.3.1", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.1.tgz", - "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==" + "integrity": "sha512-K0Ptm/47OKfQRpNQ2J/oIN/3QYiK6FwW+eJbILhsdxh2WTLdl+30o8aGdTbm5JbffpFFAg/g+zi1E+jvJha5ng==", + "requires": {} }, "acorn-walk": { "version": "7.2.0", @@ -32445,12 +32444,14 @@ "ajv-errors": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ajv-errors/-/ajv-errors-1.0.1.tgz", - "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==" + "integrity": "sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ==", + "requires": {} }, "ajv-keywords": { "version": "3.5.2", "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz", - "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==" + "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==", + "requires": {} }, "alphanum-sort": { "version": "1.0.2", @@ -32952,7 +32953,8 @@ "babel-plugin-named-asset-import": { "version": "0.3.7", "resolved": "https://registry.npmjs.org/babel-plugin-named-asset-import/-/babel-plugin-named-asset-import-0.3.7.tgz", - "integrity": "sha512-squySRkf+6JGnvjoUtDEjSREJEBirnXi9NqP6rjSYsylxQxqBTz+pkmf395i9E2zsvmYUaI40BHo6SqZUdydlw==" + "integrity": "sha512-squySRkf+6JGnvjoUtDEjSREJEBirnXi9NqP6rjSYsylxQxqBTz+pkmf395i9E2zsvmYUaI40BHo6SqZUdydlw==", + "requires": {} }, "babel-plugin-polyfill-corejs2": { "version": "0.1.2", @@ -33433,7 +33435,8 @@ "bootstrap": { "version": "5.1.3", "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-5.1.3.tgz", - "integrity": "sha512-fcQztozJ8jToQWXxVuEyXWW+dSo8AiXWKwiSSrKWsRB/Qt+Ewwza+JWoLKiTuQLaEPhdNAJ7+Dosc9DOIqNy7Q==" + "integrity": "sha512-fcQztozJ8jToQWXxVuEyXWW+dSo8AiXWKwiSSrKWsRB/Qt+Ewwza+JWoLKiTuQLaEPhdNAJ7+Dosc9DOIqNy7Q==", + "requires": {} }, "borsh": { "version": "0.4.0", @@ -34476,16 +34479,6 @@ "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-1.0.0.tgz", "integrity": "sha1-ojD2T1aDEOFJgAmUB5DsmVRbyn4=" }, - "css": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/css/-/css-3.0.0.tgz", - "integrity": "sha512-DG9pFfwOrzc+hawpmqX/dHYHJG+Bsdb0klhyi1sDneOgGOXy9wQIC8hzyVp1e4NRYDBdxcylvywPkkXCHAzTyQ==", - "requires": { - "inherits": "^2.0.4", - "source-map": "^0.6.1", - "source-map-resolve": "^0.6.0" - } - }, "css-blank-pseudo": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/css-blank-pseudo/-/css-blank-pseudo-0.1.4.tgz", @@ -36014,7 +36007,8 @@ "eslint-plugin-react-hooks": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.2.0.tgz", - "integrity": "sha512-623WEiZJqxR7VdxFCKLI6d6LLpwJkGPYKODnkH3D7WpOG5KM8yWueBd8TLsNAetEJNF5iJmolaAKO3F8yzyVBQ==" + "integrity": "sha512-623WEiZJqxR7VdxFCKLI6d6LLpwJkGPYKODnkH3D7WpOG5KM8yWueBd8TLsNAetEJNF5iJmolaAKO3F8yzyVBQ==", + "requires": {} }, "eslint-plugin-testing-library": { "version": "3.10.1", @@ -39801,7 +39795,8 @@ "jest-pnp-resolver": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.2.tgz", - "integrity": "sha512-olV41bKSMm8BdnuMsewT4jqlZ8+3TCARAXjZGT9jcoSnrfUnRCqnMoF9XEeoWjbzObpqF9dRhHQj0Xb9QdF6/w==" + "integrity": "sha512-olV41bKSMm8BdnuMsewT4jqlZ8+3TCARAXjZGT9jcoSnrfUnRCqnMoF9XEeoWjbzObpqF9dRhHQj0Xb9QdF6/w==", + "requires": {} }, "jest-regex-util": { "version": "26.0.0", @@ -43626,9 +43621,9 @@ "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=" }, "prettier": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.5.1.tgz", - "integrity": "sha512-vBZcPRUR5MZJwoyi3ZoyQlc1rXeEck8KgeC9AwwOn+exuxLxq5toTRDTSaVrXHxelDMHy9zlicw8u66yxoSUFg==" + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.7.1.tgz", + "integrity": "sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g==" }, "pretty-bytes": { "version": "5.6.0", @@ -43935,7 +43930,8 @@ "react-content-loader": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/react-content-loader/-/react-content-loader-6.1.0.tgz", - "integrity": "sha512-S4/+doQrNs0PGDgUYCGGfdFjGax8dMQzYkWcSSxfaUcUjFkbnikWARuX9lWkglocIVhxnn3lxNb6uEWFFUzNUw==" + "integrity": "sha512-S4/+doQrNs0PGDgUYCGGfdFjGax8dMQzYkWcSSxfaUcUjFkbnikWARuX9lWkglocIVhxnn3lxNb6uEWFFUzNUw==", + "requires": {} }, "react-countup": { "version": "6.1.0", @@ -44082,12 +44078,12 @@ } }, "react-dom": { - "version": "18.1.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.1.0.tgz", - "integrity": "sha512-fU1Txz7Budmvamp7bshe4Zi32d0ll7ect+ccxNu9FlObT605GOEB8BfO4tmRJ39R5Zj831VCpvQ05QPBW5yb+w==", + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", "requires": { "loose-envify": "^1.1.0", - "scheduler": "^0.22.0" + "scheduler": "^0.23.0" } }, "react-error-overlay": { @@ -44140,7 +44136,8 @@ "react-moment": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/react-moment/-/react-moment-1.1.1.tgz", - "integrity": "sha512-WjwvxBSnmLMRcU33do0KixDB+9vP3e84eCse+rd+HNklAMNWyRgZTDEQlay/qK6lcXFPRuEIASJTpEt6pyK7Ww==" + "integrity": "sha512-WjwvxBSnmLMRcU33do0KixDB+9vP3e84eCse+rd+HNklAMNWyRgZTDEQlay/qK6lcXFPRuEIASJTpEt6pyK7Ww==", + "requires": {} }, "react-native-url-polyfill": { "version": "1.3.0", @@ -44846,7 +44843,8 @@ "ws": { "version": "8.8.1", "resolved": "https://registry.npmjs.org/ws/-/ws-8.8.1.tgz", - "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==" + "integrity": "sha512-bGy2JzvzkPowEJV++hF07hAD6niYSr0JzBNo/J29WsB57A2r7Wlc1UFcTR9IzrPvuNVO4B8LGqF8qcpsVOhJCA==", + "requires": {} } } }, @@ -45080,9 +45078,9 @@ } }, "scheduler": { - "version": "0.22.0", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.22.0.tgz", - "integrity": "sha512-6QAm1BgQI88NPYymgGQLCZgvep4FyePDWFpXVK+zNSUgHwlqpJy8VEh8Et0KxTACS4VWwMousBElAZOH9nkkoQ==", + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", "requires": { "loose-envify": "^1.1.0" } @@ -45605,15 +45603,6 @@ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-0.6.2.tgz", "integrity": "sha512-/3GptzWzu0+0MBQFrDKzw/DvvMTUORvgY6k6jd/VS6iCR4RDTKWH6v6WPwQoUO8667uQEf9Oe38DxAYWY5F/Ug==" }, - "source-map-resolve": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.6.0.tgz", - "integrity": "sha512-KXBr9d/fO/bWo97NXsPIAW1bFSBOuCnjbNTBMO7N59hsv5i9yzRDfcYwwt0l04+VqnKC+EwzvJZIP/qkuMgR/w==", - "requires": { - "atob": "^2.1.2", - "decode-uri-component": "^0.2.0" - } - }, "source-map-support": { "version": "0.5.21", "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", @@ -46949,12 +46938,14 @@ "use-composed-ref": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.3.0.tgz", - "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==" + "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "requires": {} }, "use-isomorphic-layout-effect": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", - "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==" + "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "requires": {} }, "use-latest": { "version": "1.2.1", @@ -48390,7 +48381,8 @@ "ws": { "version": "7.4.6", "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.6.tgz", - "integrity": "sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A==" + "integrity": "sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A==", + "requires": {} }, "xml-name-validator": { "version": "3.0.0", diff --git a/explorer/package.json b/explorer/package.json index eae3497de9..6d6d229cd1 100644 --- a/explorer/package.json +++ b/explorer/package.json @@ -15,8 +15,8 @@ "@sentry/react": "^7.6.0", "@solana/buffer-layout": "^3.0.0", "@solana/spl-token-registry": "^0.2.3736", - "@solana/web3.js": "^1.50.0", - "@testing-library/jest-dom": "^5.16.1", + "@solana/web3.js": "^1.53.0", + "@testing-library/jest-dom": "^5.16.5", "@testing-library/react": "^13.3.0", "@testing-library/user-event": "^14.2.3", "@types/bn.js": "^5.1.0", @@ -27,7 +27,7 @@ "@types/jest": "^28.1.4", "@types/node": "^18.0.3", "@types/react": "^18.0.8", - "@types/react-dom": "^18.0.3", + "@types/react-dom": "^18.0.6", "@types/react-router-dom": "^5.3.2", "@types/react-select": "^3.1.2", "@types/socket.io-client": "^3.0.0", @@ -41,12 +41,12 @@ "coingecko-api": "^1.0.10", "cross-fetch": "^3.1.4", "humanize-duration-ts": "^2.1.1", - "prettier": "^2.5.1", + "prettier": "^2.7.1", "react": "^18.1.0", "react-chartjs-2": "^2.11.2", "react-content-loader": "^6.1.0", "react-countup": "^6.1.0", - "react-dom": "^18.1.0", + "react-dom": "^18.2.0", "react-json-view": "^1.21.3", "react-moment": "^1.1.1", "react-router-dom": "^5.3.0", diff --git a/explorer/src/components/account/TokenHistoryCard.tsx b/explorer/src/components/account/TokenHistoryCard.tsx index a2c0f719ce..a2d6e18bba 100644 --- a/explorer/src/components/account/TokenHistoryCard.tsx +++ b/explorer/src/components/account/TokenHistoryCard.tsx @@ -394,8 +394,8 @@ const TokenTransactionRow = React.memo( statusText = "Success"; } - const instructions = - details?.data?.transaction?.transaction.message.instructions; + const transactionWithMeta = details?.data?.transactionWithMeta; + const instructions = transactionWithMeta?.transaction.message.instructions; if (!instructions) return ( @@ -424,9 +424,7 @@ const TokenTransactionRow = React.memo( let tokenInstructionNames: InstructionType[] = []; - if (details?.data?.transaction) { - const transaction = details.data.transaction; - + if (transactionWithMeta) { tokenInstructionNames = instructions .map((ix, index): InstructionType | undefined => { let name = "Unknown"; @@ -437,11 +435,11 @@ const TokenTransactionRow = React.memo( )[] = []; if ( - transaction.meta?.innerInstructions && + transactionWithMeta.meta?.innerInstructions && (cluster !== Cluster.MainnetBeta || - transaction.slot >= INNER_INSTRUCTIONS_START_SLOT) + transactionWithMeta.slot >= INNER_INSTRUCTIONS_START_SLOT) ) { - transaction.meta.innerInstructions.forEach((ix) => { + transactionWithMeta.meta.innerInstructions.forEach((ix) => { if (ix.index === index) { ix.instructions.forEach((inner) => { innerInstructions.push(inner); @@ -451,9 +449,9 @@ const TokenTransactionRow = React.memo( } let transactionInstruction; - if (transaction?.transaction) { + if (transactionWithMeta?.transaction) { transactionInstruction = intoTransactionInstruction( - transaction.transaction, + transactionWithMeta.transaction, ix ); } diff --git a/explorer/src/components/account/address-lookup-table/AddressLookupTableAccountSection.tsx b/explorer/src/components/account/address-lookup-table/AddressLookupTableAccountSection.tsx new file mode 100644 index 0000000000..bffec1fa8b --- /dev/null +++ b/explorer/src/components/account/address-lookup-table/AddressLookupTableAccountSection.tsx @@ -0,0 +1,84 @@ +import React from "react"; +import { TableCardBody } from "components/common/TableCardBody"; +import { SolBalance } from "utils"; +import { Account, useFetchAccountInfo } from "providers/accounts"; +import { Address } from "components/common/Address"; +import { AddressLookupTableAccount } from "@solana/web3.js"; +import { Slot } from "components/common/Slot"; + +export function AddressLookupTableAccountSection({ + account, + data, +}: { + account: Account; + data: Uint8Array; +}) { + const lookupTableAccount = React.useMemo(() => { + return new AddressLookupTableAccount({ + key: account.pubkey, + state: AddressLookupTableAccount.deserialize(data), + }); + }, [account, data]); + const refresh = useFetchAccountInfo(); + return ( +
+
+

+ Address Lookup Table Account +

+ +
+ + + + Address + +
+ + + + Balance (SOL) + + + + + + Activation Status + + {lookupTableAccount.isActive() ? "Active" : "Deactivated"} + + + + Last Extended Slot + + {lookupTableAccount.state.lastExtendedSlot === 0 ? ( + "None (Empty)" + ) : ( + + )} + + + + Authority + + {lookupTableAccount.state.authority === undefined ? ( + "None (Frozen)" + ) : ( +
+ )} + + + +
+ ); +} diff --git a/explorer/src/components/account/address-lookup-table/LookupTableEntriesCard.tsx b/explorer/src/components/account/address-lookup-table/LookupTableEntriesCard.tsx new file mode 100644 index 0000000000..f20ff53b21 --- /dev/null +++ b/explorer/src/components/account/address-lookup-table/LookupTableEntriesCard.tsx @@ -0,0 +1,60 @@ +import React from "react"; + +import { AddressLookupTableAccount, PublicKey } from "@solana/web3.js"; +import { Address } from "components/common/Address"; + +export function LookupTableEntriesCard({ + lookupTableAccountData, +}: { + lookupTableAccountData: Uint8Array; +}) { + const lookupTableState = React.useMemo(() => { + return AddressLookupTableAccount.deserialize(lookupTableAccountData); + }, [lookupTableAccountData]); + + return ( +
+
+
+
+

Lookup Table Entries

+
+
+
+ +
+ + + + + + + + + {lookupTableState.addresses.length > 0 && + lookupTableState.addresses.map((entry: PublicKey, index) => { + return renderRow(entry, index); + })} + +
IndexAddress
+
+ + {lookupTableState.addresses.length === 0 && ( +
+
No entries found
+
+ )} +
+ ); +} + +const renderRow = (entry: PublicKey, index: number) => { + return ( + + {index} + +
+ + + ); +}; diff --git a/explorer/src/components/account/address-lookup-table/types.ts b/explorer/src/components/account/address-lookup-table/types.ts new file mode 100644 index 0000000000..136c412d50 --- /dev/null +++ b/explorer/src/components/account/address-lookup-table/types.ts @@ -0,0 +1,13 @@ +import { PublicKey } from "@solana/web3.js"; + +const PROGRAM_ID: string = "AddressLookupTab1e1111111111111111111111111"; + +export function isAddressLookupTableAccount( + accountOwner: PublicKey, + accountData: Uint8Array +): boolean { + if (accountOwner.toBase58() !== PROGRAM_ID) return false; + if (!accountData || accountData.length === 0) return false; + const LOOKUP_TABLE_ACCOUNT_TYPE = 1; + return accountData[0] === LOOKUP_TABLE_ACCOUNT_TYPE; +} diff --git a/explorer/src/components/account/history/TokenInstructionsCard.tsx b/explorer/src/components/account/history/TokenInstructionsCard.tsx index e64f04c711..af62281c06 100644 --- a/explorer/src/components/account/history/TokenInstructionsCard.tsx +++ b/explorer/src/components/account/history/TokenInstructionsCard.tsx @@ -1,6 +1,6 @@ import React from "react"; import { - ParsedConfirmedTransaction, + ParsedTransactionWithMeta, ParsedInstruction, PartiallyDecodedInstruction, PublicKey, @@ -47,23 +47,23 @@ export function TokenInstructionsCard({ pubkey }: { pubkey: PublicKey }) { const { hasTimestamps, detailsList } = React.useMemo(() => { const detailedHistoryMap = history?.data?.transactionMap || - new Map(); + new Map(); const hasTimestamps = transactionRows.some((element) => element.blockTime); const detailsList: React.ReactNode[] = []; const mintMap = new Map(); transactionRows.forEach( ({ signatureInfo, signature, blockTime, statusClass, statusText }) => { - const parsed = detailedHistoryMap.get(signature); - if (!parsed) return; + const transactionWithMeta = detailedHistoryMap.get(signature); + if (!transactionWithMeta) return; - extractMintDetails(parsed, mintMap); + extractMintDetails(transactionWithMeta, mintMap); let instructions: (ParsedInstruction | PartiallyDecodedInstruction)[] = []; - InstructionContainer.create(parsed).instructions.forEach( - ({ instruction, inner }, index) => { + InstructionContainer.create(transactionWithMeta).instructions.forEach( + ({ instruction, inner }) => { if (isRelevantInstruction(pubkey, address, mintMap, instruction)) { instructions.push(instruction); } @@ -79,7 +79,7 @@ export function TokenInstructionsCard({ pubkey }: { pubkey: PublicKey }) { const programId = ix.programId; const instructionName = getTokenInstructionName( - parsed, + transactionWithMeta, ix, signatureInfo ); diff --git a/explorer/src/components/account/history/TokenTransfersCard.tsx b/explorer/src/components/account/history/TokenTransfersCard.tsx index 42400ecd50..a2e15de1bd 100644 --- a/explorer/src/components/account/history/TokenTransfersCard.tsx +++ b/explorer/src/components/account/history/TokenTransfersCard.tsx @@ -1,6 +1,6 @@ import React from "react"; import { - ParsedConfirmedTransaction, + ParsedTransactionWithMeta, ParsedInstruction, PartiallyDecodedInstruction, PublicKey, @@ -68,23 +68,23 @@ export function TokenTransfersCard({ pubkey }: { pubkey: PublicKey }) { const { hasTimestamps, detailsList } = React.useMemo(() => { const detailedHistoryMap = history?.data?.transactionMap || - new Map(); + new Map(); const hasTimestamps = transactionRows.some((element) => element.blockTime); const detailsList: React.ReactNode[] = []; const mintMap = new Map(); transactionRows.forEach( ({ signature, blockTime, statusText, statusClass }) => { - const parsed = detailedHistoryMap.get(signature); - if (!parsed) return; + const transactionWithMeta = detailedHistoryMap.get(signature); + if (!transactionWithMeta) return; // Extract mint information from token deltas // (used to filter out non-checked tokens transfers not belonging to this mint) - extractMintDetails(parsed, mintMap); + extractMintDetails(transactionWithMeta, mintMap); // Extract all transfers from transaction let transfers: IndexedTransfer[] = []; - InstructionContainer.create(parsed).instructions.forEach( + InstructionContainer.create(transactionWithMeta).instructions.forEach( ({ instruction, inner }, index) => { const transfer = getTransfer(instruction, cluster, signature); if (transfer) { diff --git a/explorer/src/components/account/history/common.tsx b/explorer/src/components/account/history/common.tsx index cfbd1d0ef7..78e4d55a05 100644 --- a/explorer/src/components/account/history/common.tsx +++ b/explorer/src/components/account/history/common.tsx @@ -1,4 +1,4 @@ -import { ParsedConfirmedTransaction } from "@solana/web3.js"; +import { ParsedTransactionWithMeta } from "@solana/web3.js"; export type MintDetails = { decimals: number; @@ -6,13 +6,15 @@ export type MintDetails = { }; export function extractMintDetails( - parsedTransaction: ParsedConfirmedTransaction, + transactionWithMeta: ParsedTransactionWithMeta, mintMap: Map ) { - if (parsedTransaction.meta?.preTokenBalances) { - parsedTransaction.meta.preTokenBalances.forEach((balance) => { + if (transactionWithMeta.meta?.preTokenBalances) { + transactionWithMeta.meta.preTokenBalances.forEach((balance) => { const account = - parsedTransaction.transaction.message.accountKeys[balance.accountIndex]; + transactionWithMeta.transaction.message.accountKeys[ + balance.accountIndex + ]; mintMap.set(account.pubkey.toBase58(), { decimals: balance.uiTokenAmount.decimals, mint: balance.mint, @@ -20,10 +22,12 @@ export function extractMintDetails( }); } - if (parsedTransaction.meta?.postTokenBalances) { - parsedTransaction.meta.postTokenBalances.forEach((balance) => { + if (transactionWithMeta.meta?.postTokenBalances) { + transactionWithMeta.meta.postTokenBalances.forEach((balance) => { const account = - parsedTransaction.transaction.message.accountKeys[balance.accountIndex]; + transactionWithMeta.transaction.message.accountKeys[ + balance.accountIndex + ]; mintMap.set(account.pubkey.toBase58(), { decimals: balance.uiTokenAmount.decimals, mint: balance.mint, diff --git a/explorer/src/components/instruction/AddressLookupTableDetailsCard.tsx b/explorer/src/components/instruction/AddressLookupTableDetailsCard.tsx new file mode 100644 index 0000000000..c0e9052065 --- /dev/null +++ b/explorer/src/components/instruction/AddressLookupTableDetailsCard.tsx @@ -0,0 +1,46 @@ +import React from "react"; +import { TransactionInstruction, SignatureResult } from "@solana/web3.js"; +import { InstructionCard } from "./InstructionCard"; +import { useCluster } from "providers/cluster"; +import { reportError } from "utils/sentry"; +import { parseAddressLookupTableInstructionTitle } from "./address-lookup-table/types"; + +export function AddressLookupTableDetailsCard({ + ix, + index, + result, + signature, + innerCards, + childIndex, +}: { + ix: TransactionInstruction; + index: number; + result: SignatureResult; + signature: string; + innerCards?: JSX.Element[]; + childIndex?: number; +}) { + const { url } = useCluster(); + + let title; + try { + title = parseAddressLookupTableInstructionTitle(ix); + } catch (error) { + reportError(error, { + url: url, + signature: signature, + }); + } + + return ( + + ); +} diff --git a/explorer/src/components/instruction/address-lookup-table/types.ts b/explorer/src/components/instruction/address-lookup-table/types.ts new file mode 100644 index 0000000000..9032b63d31 --- /dev/null +++ b/explorer/src/components/instruction/address-lookup-table/types.ts @@ -0,0 +1,31 @@ +import { TransactionInstruction } from "@solana/web3.js"; + +const PROGRAM_ID: string = "AddressLookupTab1e1111111111111111111111111"; + +const INSTRUCTION_LOOKUP: { [key: number]: string } = { + 0: "Create Lookup Table", + 1: "Freeze Lookup Table", + 2: "Extend Lookup Table", + 3: "Deactivate Lookup Table", + 4: "Close Lookup Table", +}; + +export function isAddressLookupTableInstruction( + instruction: TransactionInstruction +): boolean { + return PROGRAM_ID === instruction.programId.toBase58(); +} + +export function parseAddressLookupTableInstructionTitle( + instruction: TransactionInstruction +): string { + const code = instruction.data[0]; + + if (!(code in INSTRUCTION_LOOKUP)) { + throw new Error( + `Unrecognized Address Lookup Table instruction code: ${code}` + ); + } + + return INSTRUCTION_LOOKUP[code]; +} diff --git a/explorer/src/components/transaction/InstructionsSection.tsx b/explorer/src/components/transaction/InstructionsSection.tsx index c47e6dd8bc..7f284b8af1 100644 --- a/explorer/src/components/transaction/InstructionsSection.tsx +++ b/explorer/src/components/transaction/InstructionsSection.tsx @@ -25,6 +25,7 @@ import { SignatureProps, } from "pages/TransactionDetailsPage"; import { intoTransactionInstruction } from "utils/tx"; +import { isAddressLookupTableInstruction } from "components/instruction/address-lookup-table/types"; import { isSerumInstruction } from "components/instruction/serum/types"; import { isTokenLendingInstruction } from "components/instruction/token-lending/types"; import { isTokenSwapInstruction } from "components/instruction/token-swap/types"; @@ -48,6 +49,7 @@ import { useAnchorProgram } from "providers/anchor"; import { LoadingCard } from "components/common/LoadingCard"; import { ErrorBoundary } from "@sentry/react"; import { ComputeBudgetDetailsCard } from "components/instruction/ComputeBudgetDetailsCard"; +import { AddressLookupTableDetailsCard } from "components/instruction/AddressLookupTableDetailsCard"; export type InstructionDetailsProps = { tx: ParsedTransaction; @@ -66,11 +68,11 @@ export function InstructionsSection({ signature }: SignatureProps) { const refreshDetails = () => fetchDetails(signature); const result = status?.data?.info?.result; - if (!result || !details?.data?.transaction) { + const transactionWithMeta = details?.data?.transactionWithMeta; + if (!result || !transactionWithMeta) { return ; } - const { meta } = details.data.transaction; - const { transaction } = details.data?.transaction; + const { meta, transaction } = transactionWithMeta; if (transaction.message.instructions.length === 0) { return ; @@ -83,7 +85,7 @@ export function InstructionsSection({ signature }: SignatureProps) { if ( meta?.innerInstructions && (cluster !== Cluster.MainnetBeta || - details.data.transaction.slot >= INNER_INSTRUCTIONS_START_SLOT) + transactionWithMeta.slot >= INNER_INSTRUCTIONS_START_SLOT) ) { meta.innerInstructions.forEach((parsed: ParsedInnerInstruction) => { if (!innerInstructions[parsed.index]) { @@ -224,7 +226,9 @@ function InstructionCard({ childIndex, }; - if (isBonfidaBotInstruction(transactionIx)) { + if (isAddressLookupTableInstruction(transactionIx)) { + return ; + } else if (isBonfidaBotInstruction(transactionIx)) { return ; } else if (isMangoInstruction(transactionIx)) { return ; diff --git a/explorer/src/components/transaction/ProgramLogSection.tsx b/explorer/src/components/transaction/ProgramLogSection.tsx index 0f8f44e85f..8142ea279d 100644 --- a/explorer/src/components/transaction/ProgramLogSection.tsx +++ b/explorer/src/components/transaction/ProgramLogSection.tsx @@ -9,12 +9,12 @@ export function ProgramLogSection({ signature }: SignatureProps) { const { cluster, url } = useCluster(); const details = useTransactionDetails(signature); - const transaction = details?.data?.transaction; - if (!transaction) return null; - const message = transaction.transaction.message; + const transactionWithMeta = details?.data?.transactionWithMeta; + if (!transactionWithMeta) return null; + const message = transactionWithMeta.transaction.message; - const logMessages = transaction.meta?.logMessages || null; - const err = transaction.meta?.err || null; + const logMessages = transactionWithMeta.meta?.logMessages || null; + const err = transactionWithMeta.meta?.err || null; let prettyLogs = null; if (logMessages !== null) { diff --git a/explorer/src/components/transaction/TokenBalancesCard.tsx b/explorer/src/components/transaction/TokenBalancesCard.tsx index ff1080b1b9..ef06af5540 100644 --- a/explorer/src/components/transaction/TokenBalancesCard.tsx +++ b/explorer/src/components/transaction/TokenBalancesCard.tsx @@ -28,11 +28,10 @@ export function TokenBalancesCard({ signature }: SignatureProps) { return null; } - const preTokenBalances = details.data?.transaction?.meta?.preTokenBalances; - const postTokenBalances = details.data?.transaction?.meta?.postTokenBalances; - - const accountKeys = - details.data?.transaction?.transaction.message.accountKeys; + const transactionWithMeta = details.data?.transactionWithMeta; + const preTokenBalances = transactionWithMeta?.meta?.preTokenBalances; + const postTokenBalances = transactionWithMeta?.meta?.postTokenBalances; + const accountKeys = transactionWithMeta?.transaction.message.accountKeys; if (!preTokenBalances || !postTokenBalances || !accountKeys) { return null; diff --git a/explorer/src/pages/AccountDetailsPage.tsx b/explorer/src/pages/AccountDetailsPage.tsx index b9c0fadf76..1e69cc3d39 100644 --- a/explorer/src/pages/AccountDetailsPage.tsx +++ b/explorer/src/pages/AccountDetailsPage.tsx @@ -44,6 +44,9 @@ import { SecurityCard } from "components/account/SecurityCard"; import { AnchorAccountCard } from "components/account/AnchorAccountCard"; import { AnchorProgramCard } from "components/account/AnchorProgramCard"; import { useAnchorProgram } from "providers/anchor"; +import { isAddressLookupTableAccount } from "components/account/address-lookup-table/types"; +import { AddressLookupTableAccountSection } from "components/account/address-lookup-table/AddressLookupTableAccountSection"; +import { LookupTableEntriesCard } from "components/account/address-lookup-table/LookupTableEntriesCard"; const IDENTICON_WIDTH = 64; @@ -124,6 +127,13 @@ const TABS_LOOKUP: { [id: string]: Tab[] } = { path: "/security", }, ], + "address-lookup-table": [ + { + slug: "entries", + title: "Table Entries", + path: "/entries", + }, + ], }; const TOKEN_TABS_HIDDEN = [ @@ -309,7 +319,8 @@ function DetailsSections({ } function InfoSection({ account }: { account: Account }) { - const data = account?.details?.data; + const details = account?.details; + const data = details?.data; if (data && data.program === "bpf-upgradeable-loader") { return ( @@ -342,6 +353,16 @@ function InfoSection({ account }: { account: Account }) { return ( ); + } else if ( + details?.rawData && + isAddressLookupTableAccount(details.owner, details.rawData) + ) { + return ( + + ); } else { return ; } @@ -374,7 +395,8 @@ export type MoreTabs = | "domains" | "security" | "anchor-program" - | "anchor-account"; + | "anchor-account" + | "entries"; function MoreSection({ account, @@ -386,7 +408,8 @@ function MoreSection({ tabs: (JSX.Element | null)[]; }) { const pubkey = account.pubkey; - const data = account?.details?.data; + const details = account?.details; + const data = details?.data; return ( <> @@ -456,6 +479,11 @@ function MoreSection({ )} + {tab === "entries" && + details?.rawData && + isAddressLookupTableAccount(details.owner, details.rawData) && ( + + )} ); } @@ -484,6 +512,14 @@ function getTabs(pubkey: PublicKey, account: Account): TabComponent[] { tabs.push(...TABS_LOOKUP[programTypeKey]); } + // Add the key for address lookup tables + if ( + account.details?.rawData && + isAddressLookupTableAccount(account.details.owner, account.details.rawData) + ) { + tabs.push(...TABS_LOOKUP["address-lookup-table"]); + } + // Add the key for Metaplex NFTs if ( data && diff --git a/explorer/src/pages/TransactionDetailsPage.tsx b/explorer/src/pages/TransactionDetailsPage.tsx index e8404d0290..044c46d502 100644 --- a/explorer/src/pages/TransactionDetailsPage.tsx +++ b/explorer/src/pages/TransactionDetailsPage.tsx @@ -193,8 +193,9 @@ function StatusCard({ } } - const fee = details?.data?.transaction?.meta?.fee; - const transaction = details?.data?.transaction?.transaction; + const transactionWithMeta = details?.data?.transactionWithMeta; + const fee = transactionWithMeta?.meta?.fee; + const transaction = transactionWithMeta?.transaction; const blockhash = transaction?.message.recentBlockhash; const isNonce = (() => { if (!transaction || transaction.message.instructions.length < 1) { @@ -338,7 +339,8 @@ function DetailsSection({ signature }: SignatureProps) { const details = useTransactionDetails(signature); const fetchDetails = useFetchTransactionDetails(); const status = useTransactionStatus(signature); - const transaction = details?.data?.transaction?.transaction; + const transactionWithMeta = details?.data?.transactionWithMeta; + const transaction = transactionWithMeta?.transaction; const message = transaction?.message; const { status: clusterStatus } = useCluster(); const refreshDetails = () => fetchDetails(signature); @@ -360,7 +362,7 @@ function DetailsSection({ signature }: SignatureProps) { return ; } else if (details.status === FetchStatus.FetchFailed) { return ; - } else if (!details.data?.transaction || !message) { + } else if (!transactionWithMeta || !message) { return ; } @@ -377,11 +379,12 @@ function DetailsSection({ signature }: SignatureProps) { function AccountsCard({ signature }: SignatureProps) { const details = useTransactionDetails(signature); - if (!details?.data?.transaction) { + const transactionWithMeta = details?.data?.transactionWithMeta; + if (!transactionWithMeta) { return null; } - const { meta, transaction } = details.data.transaction; + const { meta, transaction } = transactionWithMeta; const { message } = transaction; if (!meta) { diff --git a/explorer/src/providers/accounts/history.tsx b/explorer/src/providers/accounts/history.tsx index 4d20df7dd4..618a426a15 100644 --- a/explorer/src/providers/accounts/history.tsx +++ b/explorer/src/providers/accounts/history.tsx @@ -4,7 +4,7 @@ import { ConfirmedSignatureInfo, TransactionSignature, Connection, - ParsedConfirmedTransaction, + ParsedTransactionWithMeta, } from "@solana/web3.js"; import { useCluster, Cluster } from "../cluster"; import * as Cache from "providers/cache"; @@ -13,7 +13,7 @@ import { reportError } from "utils/sentry"; const MAX_TRANSACTION_BATCH_SIZE = 10; -type TransactionMap = Map; +type TransactionMap = Map; type AccountHistory = { fetched: ConfirmedSignatureInfo[]; @@ -109,11 +109,14 @@ async function fetchParsedTransactions( 0, MAX_TRANSACTION_BATCH_SIZE ); - const fetched = await connection.getParsedConfirmedTransactions(signatures); + const fetched = await connection.getParsedTransactions(signatures); fetched.forEach( - (parsed: ParsedConfirmedTransaction | null, index: number) => { - if (parsed !== null) { - transactionMap.set(signatures[index], parsed); + ( + transactionWithMeta: ParsedTransactionWithMeta | null, + index: number + ) => { + if (transactionWithMeta !== null) { + transactionMap.set(signatures[index], transactionWithMeta); } } ); diff --git a/explorer/src/providers/transactions/parsed.tsx b/explorer/src/providers/transactions/parsed.tsx index 73c9d9a133..4ec8b2aaa0 100644 --- a/explorer/src/providers/transactions/parsed.tsx +++ b/explorer/src/providers/transactions/parsed.tsx @@ -2,7 +2,7 @@ import React from "react"; import { Connection, TransactionSignature, - ParsedConfirmedTransaction, + ParsedTransactionWithMeta, } from "@solana/web3.js"; import { useCluster, Cluster } from "../cluster"; import * as Cache from "providers/cache"; @@ -10,7 +10,7 @@ import { ActionType, FetchStatus } from "providers/cache"; import { reportError } from "utils/sentry"; export interface Details { - transaction?: ParsedConfirmedTransaction | null; + transactionWithMeta?: ParsedTransactionWithMeta | null; } type State = Cache.State
; @@ -53,9 +53,9 @@ async function fetchDetails( }); let fetchStatus; - let transaction; + let transactionWithMeta; try { - transaction = await new Connection(url).getParsedConfirmedTransaction( + transactionWithMeta = await new Connection(url).getParsedTransaction( signature, "confirmed" ); @@ -70,7 +70,7 @@ async function fetchDetails( type: ActionType.Update, status: fetchStatus, key: signature, - data: { transaction }, + data: { transactionWithMeta }, url, }); } diff --git a/explorer/src/utils/instruction.ts b/explorer/src/utils/instruction.ts index 7618e1d7bf..429204218c 100644 --- a/explorer/src/utils/instruction.ts +++ b/explorer/src/utils/instruction.ts @@ -7,7 +7,7 @@ import { ParsedInfo } from "validators"; import { reportError } from "utils/sentry"; import { ConfirmedSignatureInfo, - ParsedConfirmedTransaction, + ParsedTransactionWithMeta, ParsedInstruction, PartiallyDecodedInstruction, } from "@solana/web3.js"; @@ -43,30 +43,31 @@ export interface InstructionItem { export class InstructionContainer { readonly instructions: InstructionItem[]; - static create(parsedTransaction: ParsedConfirmedTransaction) { - return new InstructionContainer(parsedTransaction); + static create(transactionWithMeta: ParsedTransactionWithMeta) { + return new InstructionContainer(transactionWithMeta); } - constructor(parsedTransaction: ParsedConfirmedTransaction) { - this.instructions = parsedTransaction.transaction.message.instructions.map( - (instruction) => { - if ("parsed" in instruction) { - if (typeof instruction.parsed === "object") { - instruction.parsed = create(instruction.parsed, ParsedInfo); - } else if (typeof instruction.parsed !== "string") { - throw new Error("Unexpected parsed response"); + constructor(transactionWithMeta: ParsedTransactionWithMeta) { + this.instructions = + transactionWithMeta.transaction.message.instructions.map( + (instruction) => { + if ("parsed" in instruction) { + if (typeof instruction.parsed === "object") { + instruction.parsed = create(instruction.parsed, ParsedInfo); + } else if (typeof instruction.parsed !== "string") { + throw new Error("Unexpected parsed response"); + } } - } - return { - instruction, - inner: [], - }; - } - ); + return { + instruction, + inner: [], + }; + } + ); - if (parsedTransaction.meta?.innerInstructions) { - for (let inner of parsedTransaction.meta.innerInstructions) { + if (transactionWithMeta.meta?.innerInstructions) { + for (let inner of transactionWithMeta.meta.innerInstructions) { this.instructions[inner.index].inner.push(...inner.instructions); } } @@ -89,16 +90,16 @@ export function getTokenProgramInstructionName( } export function getTokenInstructionName( - transaction: ParsedConfirmedTransaction, + transactionWithMeta: ParsedTransactionWithMeta, ix: ParsedInstruction | PartiallyDecodedInstruction, signatureInfo: ConfirmedSignatureInfo ) { let name = "Unknown"; let transactionInstruction; - if (transaction?.transaction) { + if (transactionWithMeta?.transaction) { transactionInstruction = intoTransactionInstruction( - transaction.transaction, + transactionWithMeta.transaction, ix ); } @@ -163,7 +164,7 @@ export function getTokenInstructionName( } export function getTokenInstructionType( - transaction: ParsedConfirmedTransaction, + transactionWithMeta: ParsedTransactionWithMeta, ix: ParsedInstruction | PartiallyDecodedInstruction, signatureInfo: ConfirmedSignatureInfo, index: number @@ -171,8 +172,8 @@ export function getTokenInstructionType( const innerInstructions: (ParsedInstruction | PartiallyDecodedInstruction)[] = []; - if (transaction.meta?.innerInstructions) { - transaction.meta.innerInstructions.forEach((ix) => { + if (transactionWithMeta.meta?.innerInstructions) { + transactionWithMeta.meta.innerInstructions.forEach((ix) => { if (ix.index === index) { ix.instructions.forEach((inner) => { innerInstructions.push(inner); @@ -182,7 +183,8 @@ export function getTokenInstructionType( } let name = - getTokenInstructionName(transaction, ix, signatureInfo) || "Unknown"; + getTokenInstructionName(transactionWithMeta, ix, signatureInfo) || + "Unknown"; return { name, diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 8d588ed6a8..248149bbda 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -15,7 +15,7 @@ byteorder = "1.4.3" clap = "2.33" crossbeam-channel = "0.5" log = "0.4.17" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index 1a44abf249..28c52f974e 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -14,7 +14,7 @@ bs58 = "0.4.0" bv = { version = "0.11.1", features = ["serde"] } lazy_static = "1.4.0" log = { version = "0.4.17", features = ["std"] } -once_cell = "1.12.0" +once_cell = "1.13.0" serde = { version = "1.0", features = ["derive", "rc"] } serde_bytes = "0.11" serde_derive = "1.0" @@ -38,7 +38,7 @@ getrandom = { version = "0.1", features = ["dummy"] } hashbrown = { version = "0.12", features = ["raw"] } im = { version = "15.1.0", features = ["rayon", "serde"] } memmap2 = "0.5.3" -once_cell = { version = "1.8", features = ["alloc", "default", "race", "std"] } +once_cell = { version = "1.13", features = ["alloc", "default", "race", "std"] } rand_core = { version = "0.6.3", features = ["alloc", "getrandom", "std"] } subtle = { version = "2.4.1", features = ["default", "i128", "std"] } diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index e0dfa50b8a..2e1bdbcac1 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -411,7 +411,7 @@ lazy_static! { impl AbiExample for &Vec { fn example() -> Self { info!("AbiExample for (&Vec): {}", type_name::()); - &*VEC_U8 + &VEC_U8 } } diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index 0850052ccd..4eaef127d9 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -12,8 +12,8 @@ documentation = "https://docs.rs/solana-genesis" [dependencies] base64 = "0.13.0" clap = "2.33.1" -serde = "1.0.138" -serde_json = "1.0.81" +serde = "1.0.143" +serde_json = "1.0.83" serde_yaml = "0.8.26" solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } diff --git a/geyser-plugin-manager/Cargo.toml b/geyser-plugin-manager/Cargo.toml index d92ce74488..88d41c06ff 100644 --- a/geyser-plugin-manager/Cargo.toml +++ b/geyser-plugin-manager/Cargo.toml @@ -15,7 +15,7 @@ crossbeam-channel = "0.5" json5 = "0.4.1" libloading = "0.7.3" log = "0.4.17" -serde_json = "1.0.81" +serde_json = "1.0.83" solana-geyser-plugin-interface = { path = "../geyser-plugin-interface", version = "=1.12.0" } solana-measure = { path = "../measure", version = "=1.12.0" } solana-metrics = { path = "../metrics", version = "=1.12.0" } diff --git a/geyser-plugin-manager/src/slot_status_observer.rs b/geyser-plugin-manager/src/slot_status_observer.rs index bad8fa90ec..b2f6bf5f79 100644 --- a/geyser-plugin-manager/src/slot_status_observer.rs +++ b/geyser-plugin-manager/src/slot_status_observer.rs @@ -48,7 +48,7 @@ impl SlotStatusObserver { slot_status_notifier: SlotStatusNotifier, ) -> JoinHandle<()> { Builder::new() - .name("bank_notification_receiver".to_string()) + .name("solBankNotif".to_string()) .spawn(move || { while !exit.load(Ordering::Relaxed) { if let Ok(slot) = bank_notification_receiver.recv() { diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index 5ac13ea772..74e01fcfae 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -24,7 +24,7 @@ num-traits = "0.2" rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" -serde = "1.0.138" +serde = "1.0.143" serde_bytes = "0.11" serde_derive = "1.0.103" solana-bloom = { path = "../bloom", version = "=1.12.0" } @@ -50,7 +50,7 @@ thiserror = "1.0" [dev-dependencies] num_cpus = "1.13.1" regex = "1" -serial_test = "0.8.0" +serial_test = "0.9.0" [build-dependencies] rustc_version = "0.4" diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 66c906d043..f95dffa0d5 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -48,7 +48,7 @@ use { solana_ledger::shred::Shred, solana_measure::measure::Measure, solana_net_utils::{ - bind_common, bind_common_in_range, bind_in_range, bind_two_consecutive_in_range, + bind_common, bind_common_in_range, bind_in_range, bind_two_in_range_with_offset, find_available_port_in_range, multi_bind_in_range, PortRange, }, solana_perf::{ @@ -92,6 +92,7 @@ use { thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, }, + thiserror::Error, }; /// The Data plane fanout size, also used as the neighborhood size @@ -138,12 +139,17 @@ const MIN_STAKE_FOR_GOSSIP: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL; /// Minimum number of staked nodes for enforcing stakes in gossip. const MIN_NUM_STAKED_NODES: usize = 500; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Error)] pub enum ClusterInfoError { + #[error("NoPeers")] NoPeers, + #[error("NoLeader")] NoLeader, + #[error("BadContactInfo")] BadContactInfo, + #[error("BadGossipAddress")] BadGossipAddress, + #[error("TooManyIncrementalSnapshotHashes")] TooManyIncrementalSnapshotHashes, } @@ -1680,11 +1686,11 @@ impl ClusterInfo { ) -> JoinHandle<()> { let thread_pool = ThreadPoolBuilder::new() .num_threads(std::cmp::min(get_thread_count(), 8)) - .thread_name(|i| format!("ClusterInfo::gossip-{}", i)) + .thread_name(|i| format!("solRunGossip{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-gossip".to_string()) + .name("solGossip".to_string()) .spawn(move || { let mut last_push = timestamp(); let mut last_contact_info_trace = timestamp(); @@ -2168,14 +2174,18 @@ impl ClusterInfo { I: IntoIterator, { let keypair = self.keypair(); - let pongs_and_dests: Vec<_> = pings - .into_iter() - .filter_map(|(addr, ping)| { - let pong = Pong::new(&ping, &keypair).ok()?; - let pong = Protocol::PongMessage(pong); - Some((addr, pong)) - }) - .collect(); + let mut pongs_and_dests = Vec::new(); + for (addr, ping) in pings { + // Respond both with and without domain so that the other node will + // accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, &keypair) { + let pong = Protocol::PongMessage(pong); + pongs_and_dests.push((addr, pong)); + } + } + } if pongs_and_dests.is_empty() { None } else { @@ -2554,7 +2564,7 @@ impl ClusterInfo { ) -> JoinHandle<()> { let thread_pool = ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) - .thread_name(|i| format!("gossip-consume-{}", i)) + .thread_name(|i| format!("solGossipCons{:02}", i)) .build() .unwrap(); let run_consume = move || { @@ -2570,7 +2580,7 @@ impl ClusterInfo { } } }; - let thread_name = String::from("gossip-consume"); + let thread_name = String::from("solGossipConsum"); Builder::new().name(thread_name).spawn(run_consume).unwrap() } @@ -2586,11 +2596,11 @@ impl ClusterInfo { let recycler = PacketBatchRecycler::default(); let thread_pool = ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) - .thread_name(|i| format!("sol-gossip-work-{}", i)) + .thread_name(|i| format!("solGossipWork{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-listen".to_string()) + .name("solGossipListen".to_string()) .spawn(move || { while !exit.load(Ordering::Relaxed) { if let Err(err) = self.run_listen( @@ -2750,20 +2760,21 @@ impl Node { } pub fn new_localhost_with_pubkey(pubkey: &Pubkey) -> Self { let bind_ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); + let port_range = (1024, 65535); let ((_tpu_port, tpu), (_tpu_quic_port, tpu_quic)) = - bind_two_consecutive_in_range(bind_ip_addr, (1024, 65535)).unwrap(); + bind_two_in_range_with_offset(bind_ip_addr, port_range, QUIC_PORT_OFFSET).unwrap(); let (gossip_port, (gossip, ip_echo)) = - bind_common_in_range(bind_ip_addr, (1024, 65535)).unwrap(); + bind_common_in_range(bind_ip_addr, port_range).unwrap(); let gossip_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), gossip_port); let tvu = UdpSocket::bind("127.0.0.1:0").unwrap(); let tvu_forwards = UdpSocket::bind("127.0.0.1:0").unwrap(); let ((_tpu_forwards_port, tpu_forwards), (_tpu_forwards_quic_port, tpu_forwards_quic)) = - bind_two_consecutive_in_range(bind_ip_addr, (1024, 65535)).unwrap(); + bind_two_in_range_with_offset(bind_ip_addr, port_range, QUIC_PORT_OFFSET).unwrap(); let tpu_vote = UdpSocket::bind("127.0.0.1:0").unwrap(); let repair = UdpSocket::bind("127.0.0.1:0").unwrap(); - let rpc_port = find_available_port_in_range(bind_ip_addr, (1024, 65535)).unwrap(); + let rpc_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); let rpc_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_port); - let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, (1024, 65535)).unwrap(); + let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); let rpc_pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), rpc_pubsub_port); @@ -2839,9 +2850,9 @@ impl Node { let (tvu_port, tvu) = Self::bind(bind_ip_addr, port_range); let (tvu_forwards_port, tvu_forwards) = Self::bind(bind_ip_addr, port_range); let ((tpu_port, tpu), (_tpu_quic_port, tpu_quic)) = - bind_two_consecutive_in_range(bind_ip_addr, port_range).unwrap(); + bind_two_in_range_with_offset(bind_ip_addr, port_range, QUIC_PORT_OFFSET).unwrap(); let ((tpu_forwards_port, tpu_forwards), (_tpu_forwards_quic_port, tpu_forwards_quic)) = - bind_two_consecutive_in_range(bind_ip_addr, port_range).unwrap(); + bind_two_in_range_with_offset(bind_ip_addr, port_range, QUIC_PORT_OFFSET).unwrap(); let (tpu_vote_port, tpu_vote) = Self::bind(bind_ip_addr, port_range); let (_, retransmit_socket) = Self::bind(bind_ip_addr, port_range); let (repair_port, repair) = Self::bind(bind_ip_addr, port_range); @@ -3284,7 +3295,9 @@ RPC Enabled Nodes: 1"#; let pongs: Vec<(SocketAddr, Pong)> = pings .iter() .zip(&remote_nodes) - .map(|(ping, (keypair, socket))| (*socket, Pong::new(ping, keypair).unwrap())) + .map(|(ping, (keypair, socket))| { + (*socket, Pong::new(/*domain:*/ true, ping, keypair).unwrap()) + }) .collect(); let now = now + Duration::from_millis(1); cluster_info.handle_batch_pong_messages(pongs, now); @@ -3327,7 +3340,7 @@ RPC Enabled Nodes: 1"#; .collect(); let pongs: Vec<_> = pings .iter() - .map(|ping| Pong::new(ping, &this_node).unwrap()) + .map(|ping| Pong::new(/*domain:*/ false, ping, &this_node).unwrap()) .collect(); let recycler = PacketBatchRecycler::default(); let packets = cluster_info @@ -3339,9 +3352,9 @@ RPC Enabled Nodes: 1"#; &recycler, ) .unwrap(); - assert_eq!(remote_nodes.len(), packets.len()); + assert_eq!(remote_nodes.len() * 2, packets.len()); for (packet, (_, socket), pong) in izip!( - packets.into_iter(), + packets.into_iter().step_by(2), remote_nodes.into_iter(), pongs.into_iter() ) { @@ -3763,6 +3776,8 @@ RPC Enabled Nodes: 1"#; latest_refreshed_recent_blockhash, ); cluster_info.refresh_vote(latest_refresh_tx.clone(), refresh_slot); + // Sleep to avoid votes with same timestamp causing later vote to not override prior vote + std::thread::sleep(Duration::from_millis(1)); } // The diff since `max_ts` should only be the latest refreshed vote let votes = cluster_info.get_votes(&mut cursor); diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 2780bf7dab..04df91227b 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -256,7 +256,7 @@ impl CrdsGossipPull { if let Some(ping) = ping { pings.push((peer.gossip, ping)); } - check.then(|| (weight, peer)) + check.then_some((weight, peer)) }) .unzip() }; diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 9bc911b405..d487cf546e 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -80,7 +80,7 @@ impl GossipService { exit.clone(), ); let t_responder = streamer::responder( - "gossip", + "Gossip", gossip_socket, response_receiver, socket_addr_space, diff --git a/gossip/src/ping_pong.rs b/gossip/src/ping_pong.rs index 6c3a219cfd..16961f26f1 100644 --- a/gossip/src/ping_pong.rs +++ b/gossip/src/ping_pong.rs @@ -16,6 +16,8 @@ use { }, }; +const PING_PONG_HASH_PREFIX: &[u8] = "SOLANA_PING_PONG".as_bytes(); + #[derive(AbiExample, Debug, Deserialize, Serialize)] pub struct Ping { from: Pubkey, @@ -100,8 +102,17 @@ impl Signable for Ping { } impl Pong { - pub fn new(ping: &Ping, keypair: &Keypair) -> Result { - let hash = hash::hash(&serialize(&ping.token)?); + pub fn new( + domain: bool, + ping: &Ping, + keypair: &Keypair, + ) -> Result { + let token = serialize(&ping.token)?; + let hash = if domain { + hash::hashv(&[PING_PONG_HASH_PREFIX, &token]) + } else { + hash::hash(&token) + }; let pong = Pong { from: keypair.pubkey(), hash, @@ -187,9 +198,15 @@ impl PingCache { Some(t) if now.saturating_duration_since(*t) < delay => None, _ => { let ping = pingf()?; - let hash = hash::hash(&serialize(&ping.token).ok()?); - self.pings.put(node, now); + let token = serialize(&ping.token).ok()?; + // For backward compatibility, for now responses both with and + // without domain are accepted. + // TODO: remove no domain case once cluster is upgraded. + let hash = hash::hash(&token); + self.pending_cache.put(hash, node); + let hash = hash::hashv(&[PING_PONG_HASH_PREFIX, &token]); self.pending_cache.put(hash, node); + self.pings.put(node, now); Some(ping) } } @@ -281,10 +298,18 @@ mod tests { assert!(ping.verify()); assert!(ping.sanitize().is_ok()); - let pong = Pong::new(&ping, &keypair).unwrap(); + let pong = Pong::new(/*domain:*/ false, &ping, &keypair).unwrap(); assert!(pong.verify()); assert!(pong.sanitize().is_ok()); assert_eq!(hash::hash(&ping.token), pong.hash); + + let pong = Pong::new(/*domian:*/ true, &ping, &keypair).unwrap(); + assert!(pong.verify()); + assert!(pong.sanitize().is_ok()); + assert_eq!( + hash::hashv(&[PING_PONG_HASH_PREFIX, &ping.token]), + pong.hash + ); } #[test] @@ -339,7 +364,10 @@ mod tests { assert!(ping.is_none()); } Some(ping) => { - let pong = Pong::new(ping, keypair).unwrap(); + let domain = rng.gen_ratio(1, 2); + let pong = Pong::new(domain, ping, keypair).unwrap(); + assert!(cache.add(&pong, *socket, now)); + let pong = Pong::new(!domain, ping, keypair).unwrap(); assert!(cache.add(&pong, *socket, now)); } } diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index 7095465c38..5eff0147b5 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -608,7 +608,7 @@ fn network_run_pull( fn build_gossip_thread_pool() -> ThreadPool { ThreadPoolBuilder::new() .num_threads(get_thread_count().min(2)) - .thread_name(|i| format!("crds_gossip_test_{}", i)) + .thread_name(|i| format!("gossipTest{:02}", i)) .build() .unwrap() } diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index f3e136cdba..066bdb24f7 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -109,7 +109,7 @@ where } else { trace!("not converged {} {} {}", i, total + num, num * num); } - sleep(Duration::new(1, 0)); + sleep(Duration::from_secs(1)); } exit.store(true, Ordering::Relaxed); for (_, dr, _) in listen { @@ -251,7 +251,7 @@ pub fn cluster_info_retransmit() { if done { break; } - sleep(Duration::new(1, 0)); + sleep(Duration::from_secs(1)); } assert!(done); let mut p = Packet::default(); @@ -269,7 +269,7 @@ pub fn cluster_info_retransmit() { .into_par_iter() .map(|s| { let mut p = Packet::default(); - s.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); + s.set_read_timeout(Some(Duration::from_secs(1))).unwrap(); let res = s.recv_from(p.buffer_mut()); res.is_err() //true if failed to receive the retransmit packet }) diff --git a/install/Cargo.toml b/install/Cargo.toml index f75ff2d011..af4cffd7de 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/solana-install" atty = "0.2.11" bincode = "1.3.3" bzip2 = "0.4.3" -chrono = { version = "0.4.11", features = ["serde"] } +chrono = { version = "0.4.21", features = ["serde"] } clap = { version = "2.33.1" } console = "0.15.0" crossbeam-channel = "0.5" @@ -21,10 +21,10 @@ ctrlc = { version = "3.2.2", features = ["termination"] } dirs-next = "2.0.0" indicatif = "0.17.0" lazy_static = "1.4.0" -nix = "0.24.2" +nix = "0.25.0" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } -semver = "1.0.10" -serde = { version = "1.0.138", features = ["derive"] } +semver = "1.0.13" +serde = { version = "1.0.143", features = ["derive"] } serde_yaml = "0.8.26" solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-client = { path = "../client", version = "=1.12.0" } diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index a6586aa8c7..6c978a2e54 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/solana-ledger-tool" [dependencies] bs58 = "0.4.0" -chrono = "0.4.11" +chrono = "0.4.21" clap = "2.33.1" crossbeam-channel = "0.5" csv = "1.1.6" @@ -21,7 +21,7 @@ itertools = "0.10.3" log = { version = "0.4.17" } regex = "1" serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.81" +serde_json = "1.0.83" solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-output = { path = "../cli-output", version = "=1.12.0" } solana-core = { path = "../core", version = "=1.12.0" } diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 8dcce92534..1b93f15b60 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -624,6 +624,7 @@ pub fn bigtable_process_command( let runtime = tokio::runtime::Runtime::new().unwrap(); let verbose = matches.is_present("verbose"); + let force_update_to_open = matches.is_present("force_update_to_open"); let output_format = OutputFormat::from_matches(matches, "output_format", verbose); let (subcommand, sub_matches) = matches.subcommand(); @@ -650,6 +651,7 @@ pub fn bigtable_process_command( AccessType::Secondary, None, shred_storage_type, + force_update_to_open, ); let config = solana_storage_bigtable::LedgerStorageConfig { read_only: false, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index e885d5eda8..9484e1b691 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -25,7 +25,7 @@ use { solana_ledger::{ ancestor_iterator::AncestorIterator, bank_forks_utils, - blockstore::{create_new_ledger, Blockstore, PurgeType}, + blockstore::{create_new_ledger, Blockstore, BlockstoreError, PurgeType}, blockstore_db::{self, Database}, blockstore_options::{ AccessType, BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions, @@ -805,17 +805,56 @@ fn analyze_storage(database: &Database) { analyze_column::(database, "OptimisticSlots"); } +/// Open blockstore with temporary primary access to allow necessary, +/// persistent changes to be made to the blockstore (such as creation of new +/// column family(s)). Then, continue opening with `original_access_type` +fn open_blockstore_with_temporary_primary_access( + ledger_path: &Path, + original_access_type: AccessType, + wal_recovery_mode: Option, +) -> Result { + // Open with Primary will allow any configuration that automatically + // updates to take effect + info!("Attempting to temporarily open blockstore with Primary access in order to update"); + { + let _ = Blockstore::open_with_options( + ledger_path, + BlockstoreOptions { + access_type: AccessType::PrimaryForMaintenance, + recovery_mode: wal_recovery_mode.clone(), + enforce_ulimit_nofile: true, + ..BlockstoreOptions::default() + }, + )?; + } + // Now, attempt to open the blockstore with original AccessType + info!( + "Blockstore forced open succeeded, retrying with original access: {:?}", + original_access_type + ); + Blockstore::open_with_options( + ledger_path, + BlockstoreOptions { + access_type: original_access_type, + recovery_mode: wal_recovery_mode, + enforce_ulimit_nofile: true, + ..BlockstoreOptions::default() + }, + ) +} + fn open_blockstore( ledger_path: &Path, access_type: AccessType, wal_recovery_mode: Option, shred_storage_type: &ShredStorageType, + force_update_to_open: bool, ) -> Blockstore { match Blockstore::open_with_options( ledger_path, BlockstoreOptions { - access_type, - recovery_mode: wal_recovery_mode, + access_type: access_type.clone(), + recovery_mode: wal_recovery_mode.clone(), enforce_ulimit_nofile: true, column_options: LedgerColumnOptions { shred_storage_type: shred_storage_type.clone(), @@ -824,8 +863,37 @@ fn open_blockstore( }, ) { Ok(blockstore) => blockstore, + Err(BlockstoreError::RocksDb(err)) + if (err + .to_string() + // Missing column family + .starts_with("Invalid argument: Column family not found:") + || err + .to_string() + // Missing essential file, indicative of blockstore not existing + .starts_with("IO error: No such file or directory:")) + && access_type == AccessType::Secondary => + { + error!("Blockstore is incompatible with current software and requires updates"); + if !force_update_to_open { + error!("Use --force-update-to-open to allow blockstore to update"); + exit(1); + } + open_blockstore_with_temporary_primary_access( + ledger_path, + access_type, + wal_recovery_mode, + ) + .unwrap_or_else(|err| { + error!( + "Failed to open blockstore (with --force-update-to-open) at {:?}: {:?}", + ledger_path, err + ); + exit(1); + }) + } Err(err) => { - eprintln!("Failed to open ledger at {:?}: {:?}", ledger_path, err); + eprintln!("Failed to open blockstore at {:?}: {:?}", ledger_path, err); exit(1); } } @@ -887,9 +955,9 @@ fn load_bank_forks( }; if let Some(halt_slot) = process_options.halt_at_slot { - // Check if we have the slot data necessary to replay from starting_slot to halt_slot. + // Check if we have the slot data necessary to replay from starting_slot to >= halt_slot. // - This will not catch the case when loading from genesis without a full slot 0. - if !blockstore.slots_connected(starting_slot, halt_slot) { + if !blockstore.slot_range_connected(starting_slot, halt_slot) { eprintln!( "Unable to load bank forks at slot {} due to disconnected blocks.", halt_slot, @@ -1345,6 +1413,14 @@ fn main() { "Mode to recovery the ledger db write ahead log" ), ) + .arg( + Arg::with_name("force_update_to_open") + .long("force-update-to-open") + .takes_value(false) + .global(true) + .help("Allow commands that would otherwise not alter the \ + blockstore to make necessary updates in order to open it"), + ) .arg( Arg::with_name("snapshot_archive_path") .long("snapshot-archive-path") @@ -2039,6 +2115,7 @@ fn main() { let wal_recovery_mode = matches .value_of("wal_recovery_mode") .map(BlockstoreRecoveryMode::from); + let force_update_to_open = matches.is_present("force_update_to_open"); let verbose_level = matches.occurrences_of("verbose"); // TODO: the following shred_storage_type inference must be updated once the @@ -2074,6 +2151,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ), starting_slot, ending_slot, @@ -2093,9 +2171,16 @@ fn main() { AccessType::Secondary, None, &shred_storage_type, + force_update_to_open, ); - let target = - open_blockstore(&target_db, AccessType::Primary, None, &shred_storage_type); + let target = open_blockstore( + &target_db, + AccessType::Primary, + None, + &shred_storage_type, + force_update_to_open, + ); + for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { if slot > ending_slot { break; @@ -2173,6 +2258,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); match load_bank_forks( arg_matches, @@ -2225,6 +2311,7 @@ fn main() { AccessType::Secondary, None, &shred_storage_type, + force_update_to_open, ); for (slot, _meta) in ledger .slot_meta_iterator(starting_slot) @@ -2264,6 +2351,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); match load_bank_forks( arg_matches, @@ -2290,6 +2378,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); for slot in slots { println!("Slot {}", slot); @@ -2314,6 +2403,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ), starting_slot, Slot::MAX, @@ -2330,6 +2420,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { @@ -2342,6 +2433,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { @@ -2355,6 +2447,7 @@ fn main() { AccessType::Primary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); for slot in slots { match blockstore.set_dead_slot(slot) { @@ -2370,6 +2463,7 @@ fn main() { AccessType::Primary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); for slot in slots { match blockstore.remove_dead_slot(slot) { @@ -2388,6 +2482,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let mut ancestors = BTreeSet::new(); assert!( @@ -2549,6 +2644,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let (bank_forks, ..) = load_bank_forks( arg_matches, @@ -2593,6 +2689,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); match load_bank_forks( arg_matches, @@ -2707,6 +2804,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") { @@ -2719,6 +2817,11 @@ fn main() { value_t_or_exit!(arg_matches, "snapshot_slot", Slot) }; + assert!( + blockstore.meta(snapshot_slot).unwrap().is_some(), + "snapshot slot doesn't exist" + ); + let ending_slot = if is_minimized { let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); if ending_slot <= snapshot_slot { @@ -3095,6 +3198,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let (bank_forks, ..) = load_bank_forks( arg_matches, @@ -3159,6 +3263,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); match load_bank_forks( arg_matches, @@ -3694,6 +3799,7 @@ fn main() { access_type, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let end_slot = match end_slot { @@ -3770,6 +3876,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let max_height = if let Some(height) = arg_matches.value_of("max_height") { usize::from_str(height).expect("Maximum height must be a number") @@ -3837,6 +3944,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let num_slots = value_t_or_exit!(arg_matches, "num_slots", usize); let slots = blockstore @@ -3861,6 +3969,7 @@ fn main() { AccessType::Primary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let start_root = if let Some(root) = arg_matches.value_of("start_root") { Slot::from_str(root).expect("Before root must be a number") @@ -3914,6 +4023,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); match blockstore.slot_meta_iterator(0) { Ok(metas) => { @@ -3980,6 +4090,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ) .db(), ); @@ -3991,6 +4102,7 @@ fn main() { AccessType::Secondary, wal_recovery_mode, &shred_storage_type, + force_update_to_open, ); let mut slots: Vec = vec![]; diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index be056714bf..915bf2038d 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -13,15 +13,15 @@ edition = "2021" bincode = "1.3.3" bitflags = "1.3.1" byteorder = "1.4.3" -chrono = { version = "0.4.11", features = ["serde"] } +chrono = { version = "0.4.21", features = ["serde"] } chrono-humanize = "0.2.1" crossbeam-channel = "0.5" dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } fs_extra = "1.2.0" -futures = "0.3.21" +futures = "0.3.23" itertools = "0.10.3" lazy_static = "1.4.0" -libc = "0.2.126" +libc = "0.2.131" log = { version = "0.4.17" } lru = "0.7.7" num_cpus = "1.13.1" @@ -31,8 +31,8 @@ rand = "0.7.0" rand_chacha = "0.2.2" rayon = "1.5.3" reed-solomon-erasure = { version = "5.0.3", features = ["simd-accel"] } -serde = "1.0.138" -serde_bytes = "0.11.6" +serde = "1.0.143" +serde_bytes = "0.11.7" sha2 = "0.10.2" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.12.0" } @@ -52,7 +52,7 @@ solana-storage-proto = { path = "../storage-proto", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } static_assertions = "1.1.0" tempfile = "3.3.0" thiserror = "1.0" @@ -63,7 +63,7 @@ trees = "0.4.2" [dependencies.rocksdb] # Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts # when also using the bzip2 crate -version = "0.18.0" +version = "0.19.0" default-features = false features = ["lz4"] @@ -73,6 +73,7 @@ bs58 = "0.4.0" matches = "0.1.9" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-logger = { path = "../logger", version = "=1.12.0" } +test-case = "2.1.0" [build-dependencies] rustc_version = "0.4" diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index f43b07db12..4881cedc17 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -60,7 +60,7 @@ pub async fn upload_confirmed_blocks( starting_slot, err ) })? - .map_while(|slot| (slot <= ending_slot).then(|| slot)) + .map_while(|slot| (slot <= ending_slot).then_some(slot)) .collect(); if blockstore_slots.is_empty() { @@ -164,35 +164,37 @@ pub async fn upload_confirmed_blocks( let sender = sender.clone(); let slot_receiver = slot_receiver.clone(); let exit = exit.clone(); + std::thread::Builder::new() + .name("solBigTGetBlk".into()) + .spawn(move || { + let start = Instant::now(); + let mut num_blocks_read = 0; + + while let Ok(slot) = slot_receiver.recv() { + if exit.load(Ordering::Relaxed) { + break; + } - std::thread::spawn(move || { - let start = Instant::now(); - let mut num_blocks_read = 0; - - while let Ok(slot) = slot_receiver.recv() { - if exit.load(Ordering::Relaxed) { - break; + let _ = match blockstore.get_rooted_block(slot, true) { + Ok(confirmed_block) => { + num_blocks_read += 1; + sender.send((slot, Some(confirmed_block))) + } + Err(err) => { + warn!( + "Failed to get load confirmed block from slot {}: {:?}", + slot, err + ); + sender.send((slot, None)) + } + }; } - - let _ = match blockstore.get_rooted_block(slot, true) { - Ok(confirmed_block) => { - num_blocks_read += 1; - sender.send((slot, Some(confirmed_block))) - } - Err(err) => { - warn!( - "Failed to get load confirmed block from slot {}: {:?}", - slot, err - ); - sender.send((slot, None)) - } - }; - } - BlockstoreLoadStats { - num_blocks_read, - elapsed: start.elapsed(), - } - }) + BlockstoreLoadStats { + num_blocks_read, + elapsed: start.elapsed(), + } + }) + .unwrap() }) .collect(), receiver, diff --git a/ledger/src/bigtable_upload_service.rs b/ledger/src/bigtable_upload_service.rs index 812f87cf87..857190a47d 100644 --- a/ledger/src/bigtable_upload_service.rs +++ b/ledger/src/bigtable_upload_service.rs @@ -50,7 +50,7 @@ impl BigTableUploadService { ) -> Self { info!("Starting BigTable upload service"); let thread = Builder::new() - .name("bigtable-upload".to_string()) + .name("solBigTUpload".to_string()) .spawn(move || { Self::run( runtime, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 403f6105b5..80db8354a0 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -90,12 +90,12 @@ pub use { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_max_thread_count()) - .thread_name(|ix| format!("blockstore_{}", ix)) + .thread_name(|ix| format!("solBstore{:02}", ix)) .build() .unwrap(); static ref PAR_THREAD_POOL_ALL_CPUS: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) - .thread_name(|ix| format!("blockstore_{}", ix)) + .thread_name(|ix| format!("solBstoreAll{:02}", ix)) .build() .unwrap(); } @@ -546,9 +546,9 @@ impl Blockstore { self.prepare_rooted_slot_iterator(slot, IteratorDirection::Reverse) } - /// Determines if `starting_slot` and `ending_slot` are connected by full slots + /// Determines if we can iterate from `starting_slot` to >= `ending_slot` by full slots /// `starting_slot` is excluded from the `is_full()` check - pub fn slots_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool { + pub fn slot_range_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool { if starting_slot == ending_slot { return true; } @@ -562,8 +562,7 @@ impl Blockstore { if slot_meta.is_full() { match slot.cmp(&ending_slot) { cmp::Ordering::Less => next_slots.extend(slot_meta.next_slots), - cmp::Ordering::Equal => return true, - cmp::Ordering::Greater => {} // slot is greater than the ending slot, so all its children would be as well + _ => return true, } } } @@ -626,7 +625,7 @@ impl Blockstore { index: &mut Index, erasure_meta: &ErasureMeta, prev_inserted_shreds: &HashMap, - recovered_data_shreds: &mut Vec, + recovered_shreds: &mut Vec, data_cf: &LedgerColumn, code_cf: &LedgerColumn, ) { @@ -647,9 +646,9 @@ impl Blockstore { code_cf, )) .collect(); - if let Ok(mut result) = Shredder::try_recovery(available_shreds) { + if let Ok(mut result) = shred::recover(available_shreds) { Self::submit_metrics(slot, erasure_meta, true, "complete".into(), result.len()); - recovered_data_shreds.append(&mut result); + recovered_shreds.append(&mut result); } else { Self::submit_metrics(slot, erasure_meta, true, "incomplete".into(), 0); } @@ -710,7 +709,7 @@ impl Blockstore { ) -> Vec { let data_cf = db.column::(); let code_cf = db.column::(); - let mut recovered_data_shreds = vec![]; + let mut recovered_shreds = vec![]; // Recovery rules: // 1. Only try recovery around indexes for which new data or coding shreds are received // 2. For new data shreds, check if an erasure set exists. If not, don't try recovery @@ -726,7 +725,7 @@ impl Blockstore { index, erasure_meta, prev_inserted_shreds, - &mut recovered_data_shreds, + &mut recovered_shreds, &data_cf, &code_cf, ); @@ -745,7 +744,7 @@ impl Blockstore { } }; } - recovered_data_shreds + recovered_shreds } /// The main helper function that performs the shred insertion logic @@ -889,15 +888,18 @@ impl Blockstore { metrics.insert_shreds_elapsed_us += start.as_us(); let mut start = Measure::start("Shred recovery"); if let Some(leader_schedule_cache) = leader_schedule { - let recovered_data_shreds = Self::try_shred_recovery( + let recovered_shreds = Self::try_shred_recovery( db, &erasure_metas, &mut index_working_set, &just_inserted_shreds, ); - metrics.num_recovered += recovered_data_shreds.len(); - let recovered_data_shreds: Vec<_> = recovered_data_shreds + metrics.num_recovered += recovered_shreds + .iter() + .filter(|shred| shred.is_data()) + .count(); + let recovered_shreds: Vec<_> = recovered_shreds .into_iter() .filter_map(|shred| { let leader = @@ -906,6 +908,12 @@ impl Blockstore { metrics.num_recovered_failed_sig += 1; return None; } + // Since the data shreds are fully recovered from the + // erasure batch, no need to store coding shreds in + // blockstore. + if shred.is_code() { + return Some(shred); + } match self.check_insert_data_shred( shred.clone(), &mut erasure_metas, @@ -942,10 +950,10 @@ impl Blockstore { // Always collect recovered-shreds so that above insert code is // executed even if retransmit-sender is None. .collect(); - if !recovered_data_shreds.is_empty() { + if !recovered_shreds.is_empty() { if let Some(retransmit_sender) = retransmit_sender { let _ = retransmit_sender.send( - recovered_data_shreds + recovered_shreds .into_iter() .map(Shred::into_payload) .collect(), @@ -2872,28 +2880,29 @@ impl Blockstore { .and_then(|serialized_shred| { if serialized_shred.is_none() { if let Some(slot_meta) = slot_meta { - panic!( - "Shred with - slot: {}, - index: {}, - consumed: {}, - completed_indexes: {:?} - must exist if shred index was included in a range: {} {}", - slot, - i, - slot_meta.consumed, - slot_meta.completed_data_indexes, - start_index, - end_index - ); - } else { - return Err(BlockstoreError::InvalidShredData(Box::new( - bincode::ErrorKind::Custom(format!( - "Missing shred for slot {}, index {}", - slot, i - )), - ))); + if slot > self.lowest_cleanup_slot() { + panic!( + "Shred with + slot: {}, + index: {}, + consumed: {}, + completed_indexes: {:?} + must exist if shred index was included in a range: {} {}", + slot, + i, + slot_meta.consumed, + slot_meta.completed_data_indexes, + start_index, + end_index + ); + } } + return Err(BlockstoreError::InvalidShredData(Box::new( + bincode::ErrorKind::Custom(format!( + "Missing shred for slot {}, index {}", + slot, i + )), + ))); } Shred::new_from_serialized_shred(serialized_shred.unwrap()).map_err(|err| { @@ -3145,7 +3154,7 @@ impl Blockstore { } .expect("fetch from DuplicateSlots column family failed")?; let new_shred = Shred::new_from_serialized_shred(payload).unwrap(); - (existing_shred != *new_shred.payload()).then(|| existing_shred) + (existing_shred != *new_shred.payload()).then_some(existing_shred) } pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool { @@ -5004,7 +5013,7 @@ pub mod tests { blockstore .insert_shreds(vec![shreds.remove(1)], None, false) .unwrap(); - let timer = Duration::new(1, 0); + let timer = Duration::from_secs(1); assert!(recvr.recv_timeout(timer).is_err()); // Insert first shred, now we've made a consecutive block blockstore @@ -5501,7 +5510,7 @@ pub mod tests { } */ #[test] - fn test_slots_connected_chain() { + fn test_slot_range_connected_chain() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5510,12 +5519,12 @@ pub mod tests { make_and_insert_slot(&blockstore, slot, slot.saturating_sub(1)); } - assert!(blockstore.slots_connected(1, 3)); - assert!(!blockstore.slots_connected(1, 4)); // slot 4 does not exist + assert!(blockstore.slot_range_connected(1, 3)); + assert!(!blockstore.slot_range_connected(1, 4)); // slot 4 does not exist } #[test] - fn test_slots_connected_disconnected() { + fn test_slot_range_connected_disconnected() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5523,20 +5532,20 @@ pub mod tests { make_and_insert_slot(&blockstore, 2, 1); make_and_insert_slot(&blockstore, 4, 2); - assert!(!blockstore.slots_connected(1, 3)); // Slot 3 does not exit - assert!(blockstore.slots_connected(1, 4)); + assert!(blockstore.slot_range_connected(1, 3)); // Slot 3 does not exist, but we can still replay this range to slot 4 + assert!(blockstore.slot_range_connected(1, 4)); } #[test] - fn test_slots_connected_same_slot() { + fn test_slot_range_connected_same_slot() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - assert!(blockstore.slots_connected(54, 54)); + assert!(blockstore.slot_range_connected(54, 54)); } #[test] - fn test_slots_connected_starting_slot_not_full() { + fn test_slot_range_connected_starting_slot_not_full() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5544,7 +5553,7 @@ pub mod tests { make_and_insert_slot(&blockstore, 6, 5); assert!(!blockstore.meta(4).unwrap().unwrap().is_full()); - assert!(blockstore.slots_connected(4, 6)); + assert!(blockstore.slot_range_connected(4, 6)); } #[test] @@ -8969,14 +8978,8 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let coding1 = Shredder::generate_coding_shreds( - &shreds, false, // is_last_in_slot - 0, // next_code_index - ); - let coding2 = Shredder::generate_coding_shreds( - &shreds, true, // is_last_in_slot - 0, // next_code_index - ); + let coding1 = Shredder::generate_coding_shreds(&shreds, /*next_code_index:*/ 0); + let coding2 = Shredder::generate_coding_shreds(&shreds, /*next_code_index:*/ 1); for shred in &shreds { info!("shred {:?}", shred); } diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 10ccf3f945..ed5b4ab900 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -519,7 +519,7 @@ impl Rocks { /// /// Full list of properties that return int values could be found /// [here](https://github.com/facebook/rocksdb/blob/08809f5e6cd9cc4bc3958dd4d59457ae78c76660/include/rocksdb/db.h#L654-L689). - fn get_int_property_cf(&self, cf: &ColumnFamily, name: &str) -> Result { + fn get_int_property_cf(&self, cf: &ColumnFamily, name: &'static std::ffi::CStr) -> Result { match self.db.property_int_value_cf(cf, name) { Ok(Some(value)) => Ok(value.try_into().unwrap()), Ok(None) => Ok(0), @@ -1069,7 +1069,10 @@ impl Database { { let cf = self.cf_handle::(); let iter = self.backend.iterator_cf::(cf, iterator_mode); - Ok(iter.map(|(key, value)| (C::index(&key), value))) + Ok(iter.map(|pair| { + let (key, value) = pair.unwrap(); + (C::index(&key), value) + })) } #[inline] @@ -1174,7 +1177,10 @@ where ) -> Result)> + '_> { let cf = self.handle(); let iter = self.backend.iterator_cf::(cf, iterator_mode); - Ok(iter.map(|(key, value)| (C::index(&key), value))) + Ok(iter.map(|pair| { + let (key, value) = pair.unwrap(); + (C::index(&key), value) + })) } pub fn delete_slot( @@ -1256,7 +1262,7 @@ where /// /// Full list of properties that return int values could be found /// [here](https://github.com/facebook/rocksdb/blob/08809f5e6cd9cc4bc3958dd4d59457ae78c76660/include/rocksdb/db.h#L654-L689). - pub fn get_int_property(&self, name: &str) -> Result { + pub fn get_int_property(&self, name: &'static std::ffi::CStr) -> Result { self.backend.get_int_property_cf(self.handle(), name) } } diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 65101fe983..5cacf78198 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -61,7 +61,7 @@ mod serde_compat { D: Deserializer<'de>, { let val = u64::deserialize(deserializer)?; - Ok((val != u64::MAX).then(|| val)) + Ok((val != u64::MAX).then_some(val)) } } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 83c5ae1cd6..f515d9b532 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -105,7 +105,7 @@ struct ReplayEntry { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_max_thread_count()) - .thread_name(|ix| format!("blockstore_processor_{}", ix)) + .thread_name(|ix| format!("solBstoreProc{:02}", ix)) .build() .unwrap(); } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index dda68ff093..4c20924c96 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -49,6 +49,8 @@ //! So, given a) - c), we must restrict data shred's payload length such that the entire coding //! payload can fit into one coding shred / packet. +#[cfg(test)] +pub(crate) use shred_code::MAX_CODE_SHREDS_PER_SLOT; pub(crate) use shred_data::ShredData; pub use { self::stats::{ProcessShredsStats, ShredFetchStats}, @@ -59,6 +61,7 @@ use { crate::blockstore::{self, MAX_DATA_SHREDS_PER_SLOT}, bitflags::bitflags, num_enum::{IntoPrimitive, TryFromPrimitive}, + reed_solomon_erasure::Error::TooFewShardsPresent, serde::{Deserialize, Serialize}, solana_entry::entry::{create_ticks, Entry}, solana_perf::packet::Packet, @@ -66,7 +69,7 @@ use { clock::Slot, hash::{hashv, Hash}, pubkey::Pubkey, - signature::{Keypair, Signature, Signer}, + signature::{Keypair, Signature, Signer, SIGNATURE_BYTES}, }, static_assertions::const_assert_eq, std::fmt::Debug, @@ -90,7 +93,7 @@ pub const SIZE_OF_NONCE: usize = 4; const SIZE_OF_COMMON_SHRED_HEADER: usize = 83; const SIZE_OF_DATA_SHRED_HEADERS: usize = 88; const SIZE_OF_CODING_SHRED_HEADERS: usize = 89; -const SIZE_OF_SIGNATURE: usize = 64; +const SIZE_OF_SIGNATURE: usize = SIGNATURE_BYTES; const SIZE_OF_SHRED_VARIANT: usize = 1; const SIZE_OF_SHRED_SLOT: usize = 8; const SIZE_OF_SHRED_INDEX: usize = 4; @@ -99,7 +102,11 @@ const OFFSET_OF_SHRED_VARIANT: usize = SIZE_OF_SIGNATURE; const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_VARIANT; const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT; -pub const MAX_DATA_SHREDS_PER_FEC_BLOCK: u32 = 32; +// Shreds are uniformly split into erasure batches with a "target" number of +// data shreds per each batch as below. The actual number of data shreds in +// each erasure batch depends on the number of shreds obtained from serializing +// a &[Entry]. +pub const DATA_SHREDS_PER_FEC_BLOCK: usize = 32; // For legacy tests and benchmarks. const_assert_eq!(LEGACY_SHRED_DATA_CAPACITY, 1051); @@ -138,6 +145,10 @@ pub enum Error { InvalidPayloadSize(/*payload size:*/ usize), #[error("Invalid proof size: {0}")] InvalidProofSize(/*proof_size:*/ u8), + #[error("Invalid recovered shred")] + InvalidRecoveredShred, + #[error("Invalid shard size: {0}")] + InvalidShardSize(/*shard_size:*/ usize), #[error("Invalid shred flags: {0}")] InvalidShredFlags(u8), #[error("Invalid {0:?} shred index: {1}")] @@ -205,7 +216,7 @@ struct DataShredHeader { struct CodingShredHeader { num_data_shreds: u16, num_coding_shreds: u16, - position: u16, + position: u16, // [0..num_coding_shreds) } #[derive(Clone, Debug, PartialEq, Eq)] @@ -288,6 +299,8 @@ macro_rules! dispatch { } } +use dispatch; + impl Shred { dispatch!(fn common_header(&self) -> &ShredCommonHeader); dispatch!(fn set_signature(&mut self, signature: Signature)); @@ -488,6 +501,7 @@ impl Shred { } } + #[must_use] pub fn verify(&self, pubkey: &Pubkey) -> bool { let message = self.signed_message(); self.signature().verify(pubkey.as_ref(), message) @@ -607,7 +621,7 @@ pub mod layout { merkle::ShredData::get_signed_message_range(proof_size)? } }; - (shred.len() <= range.end).then(|| range) + (shred.len() <= range.end).then_some(range) } pub(crate) fn get_reference_tick(shred: &[u8]) -> Result { @@ -636,6 +650,28 @@ impl From for Shred { } } +impl From for Shred { + fn from(shred: merkle::Shred) -> Self { + match shred { + merkle::Shred::ShredCode(shred) => Self::ShredCode(ShredCode::Merkle(shred)), + merkle::Shred::ShredData(shred) => Self::ShredData(ShredData::Merkle(shred)), + } + } +} + +impl TryFrom for merkle::Shred { + type Error = Error; + + fn try_from(shred: Shred) -> Result { + match shred { + Shred::ShredCode(ShredCode::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredCode(ShredCode::Merkle(shred)) => Ok(Self::ShredCode(shred)), + Shred::ShredData(ShredData::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredData(ShredData::Merkle(shred)) => Ok(Self::ShredData(shred)), + } + } +} + impl From for ShredType { #[inline] fn from(shred_variant: ShredVariant) -> Self { @@ -676,6 +712,27 @@ impl TryFrom for ShredVariant { } } +pub(crate) fn recover(shreds: Vec) -> Result, Error> { + match shreds + .first() + .ok_or(TooFewShardsPresent)? + .common_header() + .shred_variant + { + ShredVariant::LegacyData | ShredVariant::LegacyCode => Shredder::try_recovery(shreds), + ShredVariant::MerkleCode(_) | ShredVariant::MerkleData(_) => { + let shreds = shreds + .into_iter() + .map(merkle::Shred::try_from) + .collect::>()?; + Ok(merkle::recover(shreds)? + .into_iter() + .map(Shred::from) + .collect()) + } + } +} + // Accepts shreds in the slot range [root + 1, max_slot]. #[must_use] pub fn should_discard_shred( diff --git a/ledger/src/shred/legacy.rs b/ledger/src/shred/legacy.rs index 71c4203785..1096ef1f54 100644 --- a/ledger/src/shred/legacy.rs +++ b/ledger/src/shred/legacy.rs @@ -20,15 +20,15 @@ const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, ShredCode::SIZE_OF_PAYLOAD); const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1228); const_assert_eq!(ShredData::CAPACITY, 1051); -// SIZE_OF_CODING_SHRED_HEADERS bytes at the end of data shreds +// ShredCode::SIZE_OF_HEADERS bytes at the end of data shreds // is never used and is not part of erasure coding. const_assert_eq!(SIZE_OF_ERASURE_ENCODED_SLICE, 1139); pub(super) const SIZE_OF_ERASURE_ENCODED_SLICE: usize = - ShredCode::SIZE_OF_PAYLOAD - SIZE_OF_CODING_SHRED_HEADERS; + ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS; // Layout: {common, data} headers | data | zero padding -// Everything up to SIZE_OF_CODING_SHRED_HEADERS bytes at the end (which is -// part of zero padding) is erasure coded. +// Everything up to ShredCode::SIZE_OF_HEADERS bytes at the end (which is part +// of zero padding) is erasure coded. // All payload past signature, including the entirety of zero paddings, is // signed. #[derive(Clone, Debug, Eq, PartialEq)] @@ -52,6 +52,7 @@ impl Shred for ShredData { // Legacy data shreds are always zero padded and // the same size as coding shreds. const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; + const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; fn from_payload(mut payload: Vec) -> Result { let mut cursor = Cursor::new(&payload[..]); @@ -64,7 +65,7 @@ impl Shred for ShredData { // Repair packets have nonce at the end of packet payload; see: // https://github.com/solana-labs/solana/pull/10109 // https://github.com/solana-labs/solana/pull/16602 - if payload.len() < SIZE_OF_DATA_SHRED_HEADERS { + if payload.len() < Self::SIZE_OF_HEADERS { return Err(Error::InvalidPayloadSize(payload.len())); } payload.resize(Self::SIZE_OF_PAYLOAD, 0u8); @@ -116,6 +117,7 @@ impl Shred for ShredData { impl Shred for ShredCode { impl_shred_common!(); const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; + const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS; fn from_payload(mut payload: Vec) -> Result { let mut cursor = Cursor::new(&payload[..]); @@ -147,10 +149,9 @@ impl Shred for ShredCode { return Err(Error::InvalidPayloadSize(self.payload.len())); } let mut shard = self.payload; - // SIZE_OF_CODING_SHRED_HEADERS bytes at the beginning of the - // coding shreds contains the header and is not part of erasure - // coding. - shard.drain(..SIZE_OF_CODING_SHRED_HEADERS); + // ShredCode::SIZE_OF_HEADERS bytes at the beginning of the coding + // shreds contains the header and is not part of erasure coding. + shard.drain(..Self::SIZE_OF_HEADERS); Ok(shard) } @@ -158,7 +159,7 @@ impl Shred for ShredCode { if self.payload.len() != Self::SIZE_OF_PAYLOAD { return Err(Error::InvalidPayloadSize(self.payload.len())); } - Ok(&self.payload[SIZE_OF_CODING_SHRED_HEADERS..]) + Ok(&self.payload[Self::SIZE_OF_HEADERS..]) } fn sanitize(&self) -> Result<(), Error> { @@ -185,15 +186,15 @@ impl ShredDataTrait for ShredData { let size = usize::from(self.data_header.size); #[allow(clippy::manual_range_contains)] if size > self.payload.len() - || size < SIZE_OF_DATA_SHRED_HEADERS - || size > SIZE_OF_DATA_SHRED_HEADERS + Self::CAPACITY + || size < Self::SIZE_OF_HEADERS + || size > Self::SIZE_OF_HEADERS + Self::CAPACITY { return Err(Error::InvalidDataSize { size: self.data_header.size, payload: self.payload.len(), }); } - Ok(&self.payload[SIZE_OF_DATA_SHRED_HEADERS..size]) + Ok(&self.payload[Self::SIZE_OF_HEADERS..size]) } // Only for tests. @@ -214,7 +215,7 @@ impl ShredCodeTrait for ShredCode { impl ShredData { // Maximum size of ledger data that can be embedded in a data-shred. pub(super) const CAPACITY: usize = - Self::SIZE_OF_PAYLOAD - SIZE_OF_DATA_SHRED_HEADERS - SIZE_OF_CODING_SHRED_HEADERS; + Self::SIZE_OF_PAYLOAD - Self::SIZE_OF_HEADERS - ShredCode::SIZE_OF_HEADERS; pub(super) fn new_from_data( slot: Slot, @@ -235,7 +236,7 @@ impl ShredData { version, fec_set_index, }; - let size = (data.len() + SIZE_OF_DATA_SHRED_HEADERS) as u16; + let size = (data.len() + Self::SIZE_OF_HEADERS) as u16; let flags = flags | unsafe { ShredFlags::from_bits_unchecked( @@ -254,7 +255,7 @@ impl ShredData { bincode::serialize_into(&mut cursor, &data_header).unwrap(); // TODO: Need to check if data is too large! let offset = cursor.position() as usize; - debug_assert_eq!(offset, SIZE_OF_DATA_SHRED_HEADERS); + debug_assert_eq!(offset, Self::SIZE_OF_HEADERS); payload[offset..offset + data.len()].copy_from_slice(data); Self { common_header, @@ -271,7 +272,7 @@ impl ShredData { pub(super) fn resize_stored_shred(mut shred: Vec) -> Result, Error> { // Old shreds might have been extra zero padded. - if !(SIZE_OF_DATA_SHRED_HEADERS..=ShredCode::SIZE_OF_PAYLOAD).contains(&shred.len()) { + if !(Self::SIZE_OF_HEADERS..=Self::SIZE_OF_PAYLOAD).contains(&shred.len()) { return Err(Error::InvalidPayloadSize(shred.len())); } shred.resize(Self::SIZE_OF_PAYLOAD, 0u8); @@ -310,7 +311,7 @@ impl ShredCode { // Tests may have an empty parity_shard. if !parity_shard.is_empty() { let offset = cursor.position() as usize; - debug_assert_eq!(offset, SIZE_OF_CODING_SHRED_HEADERS); + debug_assert_eq!(offset, Self::SIZE_OF_HEADERS); payload[offset..].copy_from_slice(parity_shard); } Self { @@ -325,7 +326,7 @@ impl ShredCode { mod test { use { super::*, - crate::shred::{ShredType, MAX_DATA_SHREDS_PER_SLOT}, + crate::shred::{shred_code::MAX_CODE_SHREDS_PER_SLOT, ShredType, MAX_DATA_SHREDS_PER_SLOT}, matches::assert_matches, }; @@ -433,10 +434,10 @@ mod test { } { let mut shred = shred.clone(); - shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32; + shred.common_header.index = MAX_CODE_SHREDS_PER_SLOT as u32; assert_matches!( shred.sanitize(), - Err(Error::InvalidShredIndex(ShredType::Code, 32768)) + Err(Error::InvalidShredIndex(ShredType::Code, 557_056)) ); } // pos >= num_coding is invalid. @@ -454,7 +455,7 @@ mod test { { let mut shred = shred.clone(); shred.common_header.fec_set_index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2; - shred.coding_header.num_data_shreds = 2; + shred.coding_header.num_data_shreds = 3; shred.coding_header.num_coding_shreds = 4; shred.coding_header.position = 1; shred.common_header.index = MAX_DATA_SHREDS_PER_SLOT as u32 - 2; @@ -463,6 +464,7 @@ mod test { Err(Error::InvalidErasureShardIndex { .. }) ); + shred.coding_header.num_data_shreds = 2; shred.coding_header.num_coding_shreds = 2000; assert_matches!(shred.sanitize(), Err(Error::InvalidNumCodingShreds(2000))); diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 7eb3bf2d69..9d0482b953 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -1,12 +1,20 @@ +#[cfg(test)] +use {crate::shred::ShredType, solana_sdk::pubkey::Pubkey}; use { - crate::shred::{ - common::impl_shred_common, - shred_code, shred_data, - traits::{Shred, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait}, - CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant, - SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS, - SIZE_OF_SIGNATURE, + crate::{ + shred::{ + common::impl_shred_common, + dispatch, shred_code, shred_data, + traits::{ + Shred as ShredTrait, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait, + }, + CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant, + SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS, + SIZE_OF_SIGNATURE, + }, + shredder::ReedSolomon, }, + reed_solomon_erasure::Error::{InvalidIndex, TooFewParityShards, TooFewShards}, solana_perf::packet::deserialize_from_with_limit, solana_sdk::{ clock::Slot, @@ -58,12 +66,58 @@ pub struct ShredCode { payload: Vec, } +#[derive(Clone, Debug, Eq, PartialEq)] +pub(super) enum Shred { + ShredCode(ShredCode), + ShredData(ShredData), +} + #[derive(Clone, Debug, Eq, PartialEq)] struct MerkleBranch { root: MerkleRoot, proof: Vec, } +impl Shred { + dispatch!(fn common_header(&self) -> &ShredCommonHeader); + dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); + dispatch!(fn erasure_shard_index(&self) -> Result); + dispatch!(fn merkle_tree_node(&self) -> Result); + dispatch!(fn sanitize(&self) -> Result<(), Error>); + dispatch!(fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error>); + + fn merkle_root(&self) -> &MerkleRoot { + match self { + Self::ShredCode(shred) => &shred.merkle_branch.root, + Self::ShredData(shred) => &shred.merkle_branch.root, + } + } +} + +#[cfg(test)] +impl Shred { + dispatch!(fn set_signature(&mut self, signature: Signature)); + dispatch!(fn signed_message(&self) -> &[u8]); + + fn index(&self) -> u32 { + self.common_header().index + } + + fn shred_type(&self) -> ShredType { + ShredType::from(self.common_header().shred_variant) + } + + fn signature(&self) -> Signature { + self.common_header().signature + } + + #[must_use] + fn verify(&self, pubkey: &Pubkey) -> bool { + let message = self.signed_message(); + self.signature().verify(pubkey.as_ref(), message) + } +} + impl ShredData { // proof_size is the number of proof entries in the merkle tree branch. fn proof_size(&self) -> Result { @@ -75,13 +129,13 @@ impl ShredData { // Maximum size of ledger data that can be embedded in a data-shred. // Also equal to: - // ShredCode::size_of_erasure_encoded_slice(proof_size).unwrap() - // - SIZE_OF_DATA_SHRED_HEADERS + // ShredCode::capacity(proof_size).unwrap() + // - ShredData::SIZE_OF_HEADERS // + SIZE_OF_SIGNATURE pub(super) fn capacity(proof_size: u8) -> Result { Self::SIZE_OF_PAYLOAD .checked_sub( - SIZE_OF_DATA_SHRED_HEADERS + Self::SIZE_OF_HEADERS + SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY, ) @@ -90,7 +144,7 @@ impl ShredData { pub(super) fn get_signed_message_range(proof_size: u8) -> Option> { let data_buffer_size = Self::capacity(proof_size).ok()?; - let offset = SIZE_OF_DATA_SHRED_HEADERS + data_buffer_size; + let offset = Self::SIZE_OF_HEADERS + data_buffer_size; Some(offset..offset + SIZE_OF_MERKLE_ROOT) } @@ -104,6 +158,52 @@ impl ShredData { let index = self.erasure_shard_index()?; Ok(verify_merkle_proof(index, node, &self.merkle_branch)) } + + fn from_recovered_shard(signature: &Signature, mut shard: Vec) -> Result { + let shard_size = shard.len(); + if shard_size + SIZE_OF_SIGNATURE > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, SIZE_OF_SIGNATURE); + shard[0..SIZE_OF_SIGNATURE].copy_from_slice(signature.as_ref()); + // Deserialize headers. + let mut cursor = Cursor::new(&shard[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleData(proof_size) => proof_size, + _ => return Err(Error::InvalidShredVariant), + }; + if ShredCode::capacity(proof_size)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + let data_header = deserialize_from_with_limit(&mut cursor)?; + Ok(Self { + common_header, + data_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload: shard, + }) + } + + fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let mut cursor = Cursor::new( + self.payload + .get_mut(offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + bincode::serialize_into(&mut cursor, &merkle_branch.root)?; + for entry in &merkle_branch.proof { + bincode::serialize_into(&mut cursor, entry)?; + } + self.merkle_branch = merkle_branch; + Ok(()) + } } impl ShredCode { @@ -115,13 +215,13 @@ impl ShredCode { } } - // Size of the chunk of payload which will be erasure coded. - fn size_of_erasure_encoded_slice(proof_size: u8) -> Result { + // Size of buffer embedding erasure codes. + fn capacity(proof_size: u8) -> Result { // Merkle branch is generated and signed after coding shreds are // generated. Coding shred headers cannot be erasure coded either. Self::SIZE_OF_PAYLOAD .checked_sub( - SIZE_OF_CODING_SHRED_HEADERS + Self::SIZE_OF_HEADERS + SIZE_OF_MERKLE_ROOT + SIZE_OF_MERKLE_PROOF_ENTRY * usize::from(proof_size), ) @@ -130,10 +230,10 @@ impl ShredCode { fn merkle_tree_node(&self) -> Result { let proof_size = self.proof_size()?; - let shard_size = Self::size_of_erasure_encoded_slice(proof_size)?; + let shard_size = Self::capacity(proof_size)?; let chunk = self .payload - .get(SIZE_OF_SIGNATURE..SIZE_OF_CODING_SHRED_HEADERS + shard_size) + .get(SIZE_OF_SIGNATURE..Self::SIZE_OF_HEADERS + shard_size) .ok_or(Error::InvalidPayloadSize(self.payload.len()))?; Ok(hashv(&[MERKLE_HASH_PREFIX_LEAF, chunk])) } @@ -145,8 +245,7 @@ impl ShredCode { } pub(super) fn get_signed_message_range(proof_size: u8) -> Option> { - let offset = - SIZE_OF_CODING_SHRED_HEADERS + Self::size_of_erasure_encoded_slice(proof_size).ok()?; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size).ok()?; Some(offset..offset + SIZE_OF_MERKLE_ROOT) } @@ -155,18 +254,76 @@ impl ShredCode { || self.merkle_branch.root != other.merkle_branch.root || self.common_header.signature != other.common_header.signature } + + fn from_recovered_shard( + common_header: ShredCommonHeader, + coding_header: CodingShredHeader, + mut shard: Vec, + ) -> Result { + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleCode(proof_size) => proof_size, + _ => return Err(Error::InvalidShredVariant), + }; + let shard_size = shard.len(); + if Self::capacity(proof_size)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, Self::SIZE_OF_HEADERS); + let mut cursor = Cursor::new(&mut shard[..]); + bincode::serialize_into(&mut cursor, &common_header)?; + bincode::serialize_into(&mut cursor, &coding_header)?; + Ok(Self { + common_header, + coding_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload: shard, + }) + } + + fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let mut cursor = Cursor::new( + self.payload + .get_mut(offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + bincode::serialize_into(&mut cursor, &merkle_branch.root)?; + for entry in &merkle_branch.proof { + bincode::serialize_into(&mut cursor, entry)?; + } + self.merkle_branch = merkle_branch; + Ok(()) + } } -impl Shred for ShredData { +impl MerkleBranch { + fn new_zeroed(proof_size: u8) -> Self { + Self { + root: MerkleRoot::default(), + proof: vec![MerkleProofEntry::default(); usize::from(proof_size)], + } + } +} + +impl ShredTrait for ShredData { impl_shred_common!(); // Also equal to: - // SIZE_OF_DATA_SHRED_HEADERS + // ShredData::SIZE_OF_HEADERS // + ShredData::capacity(proof_size).unwrap() // + SIZE_OF_MERKLE_ROOT // + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY const SIZE_OF_PAYLOAD: usize = - ShredCode::SIZE_OF_PAYLOAD - SIZE_OF_CODING_SHRED_HEADERS + SIZE_OF_SIGNATURE; + ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE; + const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; fn from_payload(mut payload: Vec) -> Result { if payload.len() < Self::SIZE_OF_PAYLOAD { @@ -213,7 +370,7 @@ impl Shred for ShredData { let proof_size = self.proof_size()?; let data_buffer_size = Self::capacity(proof_size)?; let mut shard = self.payload; - shard.truncate(SIZE_OF_DATA_SHRED_HEADERS + data_buffer_size); + shard.truncate(Self::SIZE_OF_HEADERS + data_buffer_size); shard.drain(0..SIZE_OF_SIGNATURE); Ok(shard) } @@ -225,7 +382,7 @@ impl Shred for ShredData { let proof_size = self.proof_size()?; let data_buffer_size = Self::capacity(proof_size)?; self.payload - .get(SIZE_OF_SIGNATURE..SIZE_OF_DATA_SHRED_HEADERS + data_buffer_size) + .get(SIZE_OF_SIGNATURE..Self::SIZE_OF_HEADERS + data_buffer_size) .ok_or(Error::InvalidPayloadSize(self.payload.len())) } @@ -249,9 +406,10 @@ impl Shred for ShredData { } } -impl Shred for ShredCode { +impl ShredTrait for ShredCode { impl_shred_common!(); const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; + const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS; fn from_payload(mut payload: Vec) -> Result { let mut cursor = Cursor::new(&payload[..]); @@ -262,7 +420,7 @@ impl Shred for ShredCode { }; let coding_header = deserialize_from_with_limit(&mut cursor)?; // Skip erasure code shard. - let shard_size = Self::size_of_erasure_encoded_slice(proof_size)?; + let shard_size = Self::capacity(proof_size)?; let shard_size = i64::try_from(shard_size).unwrap(); cursor.seek(SeekFrom::Current(shard_size))?; // Deserialize merkle branch. @@ -294,9 +452,9 @@ impl Shred for ShredCode { return Err(Error::InvalidPayloadSize(self.payload.len())); } let proof_size = self.proof_size()?; - let shard_size = Self::size_of_erasure_encoded_slice(proof_size)?; + let shard_size = Self::capacity(proof_size)?; let mut shard = self.payload; - shard.drain(..SIZE_OF_CODING_SHRED_HEADERS); + shard.drain(..Self::SIZE_OF_HEADERS); shard.truncate(shard_size); Ok(shard) } @@ -306,9 +464,9 @@ impl Shred for ShredCode { return Err(Error::InvalidPayloadSize(self.payload.len())); } let proof_size = self.proof_size()?; - let shard_size = Self::size_of_erasure_encoded_slice(proof_size)?; + let shard_size = Self::capacity(proof_size)?; self.payload - .get(SIZE_OF_CODING_SHRED_HEADERS..SIZE_OF_CODING_SHRED_HEADERS + shard_size) + .get(Self::SIZE_OF_HEADERS..Self::SIZE_OF_HEADERS + shard_size) .ok_or(Error::InvalidPayloadSize(self.payload.len())) } @@ -343,15 +501,15 @@ impl ShredDataTrait for ShredData { let data_buffer_size = Self::capacity(proof_size)?; let size = usize::from(self.data_header.size); if size > self.payload.len() - || size < SIZE_OF_DATA_SHRED_HEADERS - || size > SIZE_OF_DATA_SHRED_HEADERS + data_buffer_size + || size < Self::SIZE_OF_HEADERS + || size > Self::SIZE_OF_HEADERS + data_buffer_size { return Err(Error::InvalidDataSize { size: self.data_header.size, payload: self.payload.len(), }); } - Ok(&self.payload[SIZE_OF_DATA_SHRED_HEADERS..size]) + Ok(&self.payload[Self::SIZE_OF_HEADERS..size]) } // Only for tests. @@ -390,7 +548,6 @@ fn verify_merkle_proof(index: usize, node: Hash, merkle_branch: &MerkleBranch) - (index, root) == (0usize, &merkle_branch.root[..]) } -#[cfg(test)] fn make_merkle_tree(mut nodes: Vec) -> Vec { let mut size = nodes.len(); while size > 1 { @@ -406,7 +563,6 @@ fn make_merkle_tree(mut nodes: Vec) -> Vec { nodes } -#[cfg(test)] fn make_merkle_branch( mut index: usize, // leaf index ~ shred's erasure shard index. mut size: usize, // number of leaves ~ erasure batch size. @@ -433,13 +589,174 @@ fn make_merkle_branch( Some(MerkleBranch { root, proof }) } +pub(super) fn recover(mut shreds: Vec) -> Result, Error> { + // Grab {common, coding} headers from first coding shred. + let headers = shreds.iter().find_map(|shred| { + let shred = match shred { + Shred::ShredCode(shred) => shred, + Shred::ShredData(_) => return None, + }; + let position = u32::from(shred.coding_header.position); + let common_header = ShredCommonHeader { + index: shred.common_header.index.checked_sub(position)?, + ..shred.common_header + }; + let coding_header = CodingShredHeader { + position: 0u16, + ..shred.coding_header + }; + Some((common_header, coding_header)) + }); + let (common_header, coding_header) = headers.ok_or(TooFewParityShards)?; + debug_assert!(matches!( + common_header.shred_variant, + ShredVariant::MerkleCode(_) + )); + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleCode(proof_size) => proof_size, + ShredVariant::MerkleData(_) | ShredVariant::LegacyCode | ShredVariant::LegacyData => { + return Err(Error::InvalidShredVariant); + } + }; + // Verify that shreds belong to the same erasure batch + // and have consistent headers. + debug_assert!(shreds.iter().all(|shred| { + let ShredCommonHeader { + signature, + shred_variant, + slot, + index: _, + version, + fec_set_index, + } = shred.common_header(); + signature == &common_header.signature + && slot == &common_header.slot + && version == &common_header.version + && fec_set_index == &common_header.fec_set_index + && match shred { + Shred::ShredData(_) => shred_variant == &ShredVariant::MerkleData(proof_size), + Shred::ShredCode(shred) => { + let CodingShredHeader { + num_data_shreds, + num_coding_shreds, + position: _, + } = shred.coding_header; + shred_variant == &ShredVariant::MerkleCode(proof_size) + && num_data_shreds == coding_header.num_data_shreds + && num_coding_shreds == coding_header.num_coding_shreds + } + } + })); + let num_data_shreds = usize::from(coding_header.num_data_shreds); + let num_coding_shreds = usize::from(coding_header.num_coding_shreds); + let num_shards = num_data_shreds + num_coding_shreds; + // Obtain erasure encoded shards from shreds. + let shreds = { + let mut batch = vec![None; num_shards]; + while let Some(shred) = shreds.pop() { + let index = match shred.erasure_shard_index() { + Ok(index) if index < batch.len() => index, + _ => return Err(Error::from(InvalidIndex)), + }; + batch[index] = Some(shred); + } + batch + }; + let mut shards: Vec>> = shreds + .iter() + .map(|shred| Some(shred.as_ref()?.erasure_shard_as_slice().ok()?.to_vec())) + .collect(); + ReedSolomon::new(num_data_shreds, num_coding_shreds)?.reconstruct(&mut shards)?; + let mask: Vec<_> = shreds.iter().map(Option::is_some).collect(); + // Reconstruct code and data shreds from erasure encoded shards. + let mut shreds: Vec<_> = shreds + .into_iter() + .zip(shards) + .enumerate() + .map(|(index, (shred, shard))| { + if let Some(shred) = shred { + return Ok(shred); + } + let shard = shard.ok_or(TooFewShards)?; + if index < num_data_shreds { + let shred = ShredData::from_recovered_shard(&common_header.signature, shard)?; + let ShredCommonHeader { + signature: _, + shred_variant, + slot, + index: _, + version, + fec_set_index, + } = shred.common_header; + if shred_variant != ShredVariant::MerkleData(proof_size) + || common_header.slot != slot + || common_header.version != version + || common_header.fec_set_index != fec_set_index + { + return Err(Error::InvalidRecoveredShred); + } + Ok(Shred::ShredData(shred)) + } else { + let offset = index - num_data_shreds; + let coding_header = CodingShredHeader { + position: offset as u16, + ..coding_header + }; + let common_header = ShredCommonHeader { + index: common_header.index + offset as u32, + ..common_header + }; + let shred = ShredCode::from_recovered_shard(common_header, coding_header, shard)?; + Ok(Shred::ShredCode(shred)) + } + }) + .collect::>()?; + // Compute merkle tree and set the merkle branch on the recovered shreds. + let nodes: Vec<_> = shreds + .iter() + .map(Shred::merkle_tree_node) + .collect::>()?; + let tree = make_merkle_tree(nodes); + let merkle_root = &tree.last().unwrap().as_ref()[..SIZE_OF_MERKLE_ROOT]; + let merkle_root = MerkleRoot::try_from(merkle_root).unwrap(); + for (index, (shred, mask)) in shreds.iter_mut().zip(&mask).enumerate() { + if *mask { + if shred.merkle_root() != &merkle_root { + return Err(Error::InvalidMerkleProof); + } + } else { + let merkle_branch = + make_merkle_branch(index, num_shards, &tree).ok_or(Error::InvalidMerkleProof)?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + shred.set_merkle_branch(merkle_branch)?; + } + } + // TODO: No need to verify merkle proof in sanitize here. + shreds + .into_iter() + .zip(mask) + .filter(|(_, mask)| !mask) + .map(|(shred, _)| shred.sanitize().map(|_| shred)) + .collect() +} + #[cfg(test)] mod test { - use {super::*, rand::Rng, std::iter::repeat_with}; + use { + super::*, + itertools::Itertools, + matches::assert_matches, + rand::{seq::SliceRandom, CryptoRng, Rng}, + solana_sdk::signature::{Keypair, Signer}, + std::{cmp::Ordering, iter::repeat_with}, + test_case::test_case, + }; // Total size of a data shred including headers and merkle branch. fn shred_data_size_of_payload(proof_size: u8) -> usize { - SIZE_OF_DATA_SHRED_HEADERS + ShredData::SIZE_OF_HEADERS + ShredData::capacity(proof_size).unwrap() + SIZE_OF_MERKLE_ROOT + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY @@ -451,9 +768,8 @@ mod test { // size of erasure encoded header. fn shred_data_capacity(proof_size: u8) -> usize { const SIZE_OF_ERASURE_ENCODED_HEADER: usize = - SIZE_OF_DATA_SHRED_HEADERS - SIZE_OF_SIGNATURE; - ShredCode::size_of_erasure_encoded_slice(proof_size).unwrap() - - SIZE_OF_ERASURE_ENCODED_HEADER + ShredData::SIZE_OF_HEADERS - SIZE_OF_SIGNATURE; + ShredCode::capacity(proof_size).unwrap() - SIZE_OF_ERASURE_ENCODED_HEADER } fn shred_data_size_of_erasure_encoded_slice(proof_size: u8) -> usize { @@ -484,10 +800,10 @@ mod test { } #[test] - fn test_size_of_erasure_encoded_slice() { + fn test_shred_code_capacity() { for proof_size in 0..0x15 { assert_eq!( - ShredCode::size_of_erasure_encoded_slice(proof_size).unwrap(), + ShredCode::capacity(proof_size).unwrap(), shred_data_size_of_erasure_encoded_slice(proof_size), ); } @@ -525,4 +841,153 @@ mod test { run_merkle_tree_round_trip(size); } } + + #[test_case(37)] + #[test_case(64)] + #[test_case(73)] + fn test_recover_merkle_shreds(num_shreds: usize) { + let mut rng = rand::thread_rng(); + for num_data_shreds in 1..num_shreds { + let num_coding_shreds = num_shreds - num_data_shreds; + run_recover_merkle_shreds(&mut rng, num_data_shreds, num_coding_shreds); + } + } + + fn run_recover_merkle_shreds( + rng: &mut R, + num_data_shreds: usize, + num_coding_shreds: usize, + ) { + let keypair = Keypair::generate(rng); + let num_shreds = num_data_shreds + num_coding_shreds; + let proof_size = (num_shreds as f64).log2().ceil() as u8; + let capacity = ShredData::capacity(proof_size).unwrap(); + let common_header = ShredCommonHeader { + signature: Signature::default(), + shred_variant: ShredVariant::MerkleData(proof_size), + slot: 145865705, + index: 1835, + version: 4978, + fec_set_index: 1835, + }; + let data_header = DataShredHeader { + parent_offset: 25, + flags: unsafe { ShredFlags::from_bits_unchecked(0b0010_1010) }, + size: 0, + }; + let coding_header = CodingShredHeader { + num_data_shreds: num_data_shreds as u16, + num_coding_shreds: num_coding_shreds as u16, + position: 0, + }; + let mut shreds = Vec::with_capacity(num_shreds); + for i in 0..num_data_shreds { + let common_header = ShredCommonHeader { + index: common_header.index + i as u32, + ..common_header + }; + let size = ShredData::SIZE_OF_HEADERS + rng.gen_range(0, capacity); + let data_header = DataShredHeader { + size: size as u16, + ..data_header + }; + let mut payload = vec![0u8; ShredData::SIZE_OF_PAYLOAD]; + let mut cursor = Cursor::new(&mut payload[..]); + bincode::serialize_into(&mut cursor, &common_header).unwrap(); + bincode::serialize_into(&mut cursor, &data_header).unwrap(); + rng.fill(&mut payload[ShredData::SIZE_OF_HEADERS..size]); + let shred = ShredData { + common_header, + data_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload, + }; + shreds.push(Shred::ShredData(shred)); + } + let data: Vec<_> = shreds + .iter() + .map(Shred::erasure_shard_as_slice) + .collect::>() + .unwrap(); + let mut parity = vec![vec![0u8; data[0].len()]; num_coding_shreds]; + ReedSolomon::new(num_data_shreds, num_coding_shreds) + .unwrap() + .encode_sep(&data, &mut parity[..]) + .unwrap(); + for (i, code) in parity.into_iter().enumerate() { + let common_header = ShredCommonHeader { + shred_variant: ShredVariant::MerkleCode(proof_size), + index: common_header.index + i as u32 + 7, + ..common_header + }; + let coding_header = CodingShredHeader { + position: i as u16, + ..coding_header + }; + let mut payload = vec![0u8; ShredCode::SIZE_OF_PAYLOAD]; + let mut cursor = Cursor::new(&mut payload[..]); + bincode::serialize_into(&mut cursor, &common_header).unwrap(); + bincode::serialize_into(&mut cursor, &coding_header).unwrap(); + payload[ShredCode::SIZE_OF_HEADERS..ShredCode::SIZE_OF_HEADERS + code.len()] + .copy_from_slice(&code); + let shred = ShredCode { + common_header, + coding_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload, + }; + shreds.push(Shred::ShredCode(shred)); + } + let nodes: Vec<_> = shreds + .iter() + .map(Shred::merkle_tree_node) + .collect::>() + .unwrap(); + let tree = make_merkle_tree(nodes); + for (index, shred) in shreds.iter_mut().enumerate() { + let merkle_branch = make_merkle_branch(index, num_shreds, &tree).unwrap(); + assert_eq!(merkle_branch.proof.len(), usize::from(proof_size)); + shred.set_merkle_branch(merkle_branch).unwrap(); + let signature = keypair.sign_message(shred.signed_message()); + shred.set_signature(signature); + assert!(shred.verify(&keypair.pubkey())); + assert_matches!(shred.sanitize(), Ok(())); + } + assert_eq!(shreds.iter().map(Shred::signature).dedup().count(), 1); + for size in num_data_shreds..num_shreds { + let mut shreds = shreds.clone(); + let mut removed_shreds = Vec::new(); + while shreds.len() > size { + let index = rng.gen_range(0, shreds.len()); + removed_shreds.push(shreds.swap_remove(index)); + } + shreds.shuffle(rng); + // Should at least contain one coding shred. + if shreds.iter().all(|shred| { + matches!( + shred.common_header().shred_variant, + ShredVariant::MerkleData(_) + ) + }) { + assert_matches!( + recover(shreds), + Err(Error::ErasureError(TooFewParityShards)) + ); + continue; + } + let recovered_shreds = recover(shreds).unwrap(); + assert_eq!(size + recovered_shreds.len(), num_shreds); + assert_eq!(recovered_shreds.len(), removed_shreds.len()); + removed_shreds.sort_by(|a, b| { + if a.shred_type() == b.shred_type() { + a.index().cmp(&b.index()) + } else if a.shred_type() == ShredType::Data { + Ordering::Less + } else { + Ordering::Greater + } + }); + assert_eq!(recovered_shreds, removed_shreds); + } + } } diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 25ce8a2385..1fe3fef026 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -3,14 +3,16 @@ use { common::dispatch, legacy, merkle, traits::{Shred, ShredCode as ShredCodeTrait}, - CodingShredHeader, Error, ShredCommonHeader, ShredType, MAX_DATA_SHREDS_PER_FEC_BLOCK, + CodingShredHeader, Error, ShredCommonHeader, ShredType, DATA_SHREDS_PER_FEC_BLOCK, MAX_DATA_SHREDS_PER_SLOT, SIZE_OF_NONCE, }, solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, signature::Signature}, static_assertions::const_assert_eq, }; -pub(super) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT; +// See ERASURE_BATCH_SIZE. +const_assert_eq!(MAX_CODE_SHREDS_PER_SLOT, 32_768 * 17); +pub(crate) const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT * 17; const_assert_eq!(ShredCode::SIZE_OF_PAYLOAD, 1228); @@ -117,7 +119,7 @@ pub(super) fn erasure_shard_index(shred: &T) -> Option let position = usize::from(coding_header.position); let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let index = position.checked_add(num_data_shreds)?; - (index < fec_set_size).then(|| index) + (index < fec_set_size).then_some(index) } pub(super) fn sanitize(shred: &T) -> Result<(), Error> { @@ -132,8 +134,8 @@ pub(super) fn sanitize(shred: &T) -> Result<(), Error> { common_header.index, )); } - let num_coding_shreds = u32::from(coding_header.num_coding_shreds); - if num_coding_shreds > 8 * MAX_DATA_SHREDS_PER_FEC_BLOCK { + let num_coding_shreds = usize::from(coding_header.num_coding_shreds); + if num_coding_shreds > 8 * DATA_SHREDS_PER_FEC_BLOCK { return Err(Error::InvalidNumCodingShreds( coding_header.num_coding_shreds, )); diff --git a/ledger/src/shred/traits.rs b/ledger/src/shred/traits.rs index 70e049113d..b5326b62a7 100644 --- a/ledger/src/shred/traits.rs +++ b/ledger/src/shred/traits.rs @@ -7,6 +7,8 @@ pub(super) trait Shred: Sized { // Total size of payload including headers, merkle // branches (if any), zero paddings, etc. const SIZE_OF_PAYLOAD: usize; + // Size of common and code/data headers. + const SIZE_OF_HEADERS: usize; fn from_payload(shred: Vec) -> Result; fn common_header(&self) -> &ShredCommonHeader; diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 22df6b87d7..1b1db2f977 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -1,6 +1,6 @@ use { crate::shred::{ - Error, ProcessShredsStats, Shred, ShredData, ShredFlags, MAX_DATA_SHREDS_PER_FEC_BLOCK, + Error, ProcessShredsStats, Shred, ShredData, ShredFlags, DATA_SHREDS_PER_FEC_BLOCK, }, itertools::Itertools, lazy_static::lazy_static, @@ -19,12 +19,21 @@ use { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) - .thread_name(|ix| format!("shredder_{}", ix)) + .thread_name(|ix| format!("solShredder{:02}", ix)) .build() .unwrap(); } -type ReedSolomon = reed_solomon_erasure::ReedSolomon; +// Maps number of data shreds to the optimal erasure batch size which has the +// same recovery probabilities as a 32:32 erasure batch. +const ERASURE_BATCH_SIZE: [usize; 33] = [ + 0, 18, 20, 22, 23, 25, 27, 28, 30, // 8 + 32, 33, 35, 36, 38, 39, 41, 42, // 16 + 43, 45, 46, 48, 49, 51, 52, 53, // 24 + 55, 56, 58, 59, 60, 62, 63, 64, // 32 +]; + +pub(crate) type ReedSolomon = reed_solomon_erasure::ReedSolomon; #[derive(Debug)] pub struct Shredder { @@ -65,45 +74,20 @@ impl Shredder { Vec, // data shreds Vec, // coding shreds ) { - let data_shreds = self.entries_to_data_shreds( - keypair, - entries, - is_last_in_slot, - next_shred_index, - next_shred_index, // fec_set_offset - stats, - ); - let coding_shreds = Self::data_shreds_to_coding_shreds( - keypair, - &data_shreds, - is_last_in_slot, - next_code_index, - stats, - ) - .unwrap(); + let data_shreds = + self.entries_to_data_shreds(keypair, entries, is_last_in_slot, next_shred_index, stats); + let coding_shreds = + Self::data_shreds_to_coding_shreds(keypair, &data_shreds, next_code_index, stats) + .unwrap(); (data_shreds, coding_shreds) } - /// Each FEC block has maximum MAX_DATA_SHREDS_PER_FEC_BLOCK shreds. - /// "FEC set index" is the index of first data shred in that FEC block. - /// **Data** shreds with the same value of: - /// (data_shred.index() - fec_set_offset) / MAX_DATA_SHREDS_PER_FEC_BLOCK - /// belong to the same FEC set. - /// Coding shreds inherit their fec_set_index from the data shreds that - /// they are generated from. - pub fn fec_set_index(data_shred_index: u32, fec_set_offset: u32) -> Option { - let diff = data_shred_index.checked_sub(fec_set_offset)?; - Some(data_shred_index - diff % MAX_DATA_SHREDS_PER_FEC_BLOCK) - } - - pub fn entries_to_data_shreds( + fn entries_to_data_shreds( &self, keypair: &Keypair, entries: &[Entry], is_last_in_slot: bool, next_shred_index: u32, - // Shred index offset at which FEC sets are generated. - fec_set_offset: u32, process_stats: &mut ProcessShredsStats, ) -> Vec { let mut serialize_time = Measure::start("shred_serialize"); @@ -119,7 +103,7 @@ impl Shredder { let num_shreds = (serialized_shreds.len() + data_buffer_size - 1) / data_buffer_size; let last_shred_index = next_shred_index + num_shreds as u32 - 1; // 1) Generate data shreds - let make_data_shred = |shred_index: u32, data| { + let make_data_shred = |data, shred_index: u32, fec_set_index: u32| { let flags = if shred_index != last_shred_index { ShredFlags::empty() } else if is_last_in_slot { @@ -129,7 +113,6 @@ impl Shredder { ShredFlags::DATA_COMPLETE_SHRED }; let parent_offset = self.slot - self.parent_slot; - let fec_set_index = Self::fec_set_index(shred_index, fec_set_offset); let mut shred = Shred::new_from_data( self.slot, shred_index, @@ -138,18 +121,24 @@ impl Shredder { flags, self.reference_tick, self.version, - fec_set_index.unwrap(), + fec_set_index, ); shred.sign(keypair); shred }; - let data_shreds: Vec = PAR_THREAD_POOL.install(|| { - serialized_shreds - .par_chunks(data_buffer_size) + let shreds: Vec<&[u8]> = serialized_shreds.chunks(data_buffer_size).collect(); + let fec_set_offsets: Vec = + get_fec_set_offsets(shreds.len(), DATA_SHREDS_PER_FEC_BLOCK).collect(); + assert_eq!(shreds.len(), fec_set_offsets.len()); + let shreds: Vec = PAR_THREAD_POOL.install(|| { + shreds + .into_par_iter() + .zip(fec_set_offsets) .enumerate() - .map(|(i, shred_data)| { + .map(|(i, (shred, offset))| { let shred_index = next_shred_index + i as u32; - make_data_shred(shred_index, shred_data) + let fec_set_index = next_shred_index + offset as u32; + make_data_shred(shred, shred_index, fec_set_index) }) .collect() }); @@ -157,15 +146,14 @@ impl Shredder { process_stats.serialize_elapsed += serialize_time.as_us(); process_stats.gen_data_elapsed += gen_data_time.as_us(); - process_stats.record_num_data_shreds(data_shreds.len()); + process_stats.record_num_data_shreds(shreds.len()); - data_shreds + shreds } - pub fn data_shreds_to_coding_shreds( + fn data_shreds_to_coding_shreds( keypair: &Keypair, data_shreds: &[Shred], - is_last_in_slot: bool, next_code_index: u32, process_stats: &mut ProcessShredsStats, ) -> Result, Error> { @@ -185,8 +173,7 @@ impl Shredder { .iter() .scan(next_code_index, |next_code_index, chunk| { let num_data_shreds = chunk.len(); - let erasure_batch_size = - get_erasure_batch_size(num_data_shreds, is_last_in_slot); + let erasure_batch_size = get_erasure_batch_size(num_data_shreds); *next_code_index += (erasure_batch_size - num_data_shreds) as u32; Some(*next_code_index) }), @@ -198,7 +185,7 @@ impl Shredder { .into_par_iter() .zip(next_code_index) .flat_map(|(shreds, next_code_index)| { - Shredder::generate_coding_shreds(&shreds, is_last_in_slot, next_code_index) + Shredder::generate_coding_shreds(&shreds, next_code_index) }) .collect() }); @@ -221,7 +208,6 @@ impl Shredder { /// Generates coding shreds for the data shreds in the current FEC set pub fn generate_coding_shreds>( data: &[T], - is_last_in_slot: bool, next_code_index: u32, ) -> Vec { let (slot, index, version, fec_set_index) = { @@ -241,9 +227,10 @@ impl Shredder { && shred.version() == version && shred.fec_set_index() == fec_set_index)); let num_data = data.len(); - let num_coding = get_erasure_batch_size(num_data, is_last_in_slot) + let num_coding = get_erasure_batch_size(num_data) .checked_sub(num_data) .unwrap(); + assert!(num_coding > 0); let data: Vec<_> = data .iter() .map(Borrow::borrow) @@ -360,21 +347,43 @@ impl Shredder { } /// Maps number of data shreds in each batch to the erasure batch size. -fn get_erasure_batch_size(num_data_shreds: usize, is_last_in_slot: bool) -> usize { - if is_last_in_slot { - 2 * num_data_shreds.max(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) - } else { - 2 * num_data_shreds - } +fn get_erasure_batch_size(num_data_shreds: usize) -> usize { + ERASURE_BATCH_SIZE + .get(num_data_shreds) + .copied() + .unwrap_or(2 * num_data_shreds) +} + +// Returns offsets to fec_set_index when spliting shreds into erasure batches. +fn get_fec_set_offsets( + mut num_shreds: usize, + min_chunk_size: usize, +) -> impl Iterator { + let mut offset = 0; + std::iter::from_fn(move || { + if num_shreds == 0 { + return None; + } + let num_chunks = (num_shreds / min_chunk_size).max(1); + let chunk_size = (num_shreds + num_chunks - 1) / num_chunks; + let offsets = std::iter::repeat(offset).take(chunk_size); + num_shreds -= chunk_size; + offset += chunk_size; + Some(offsets) + }) + .flatten() } #[cfg(test)] mod tests { use { super::*, - crate::shred::{ - self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred, - ShredType, + crate::{ + blockstore::MAX_DATA_SHREDS_PER_SLOT, + shred::{ + self, max_entries_per_n_shred, max_ticks_per_n_shreds, verify_test_data_shred, + ShredType, MAX_CODE_SHREDS_PER_SLOT, + }, }, bincode::serialized_size, matches::assert_matches, @@ -427,8 +436,7 @@ mod tests { let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap(); let num_expected_data_shreds = (size + data_buffer_size - 1) / data_buffer_size; let num_expected_coding_shreds = - get_erasure_batch_size(num_expected_data_shreds, /*is_last_in_slot:*/ true) - - num_expected_data_shreds; + get_erasure_batch_size(num_expected_data_shreds) - num_expected_data_shreds; let start_index = 0; let (data_shreds, coding_shreds) = shredder.entries_to_shreds( &keypair, @@ -684,7 +692,7 @@ mod tests { assert_eq!(data_shreds.len(), num_data_shreds); assert_eq!( num_coding_shreds, - get_erasure_batch_size(num_data_shreds, is_last_in_slot) - num_data_shreds + get_erasure_batch_size(num_data_shreds) - num_data_shreds ); let all_shreds = data_shreds @@ -989,19 +997,34 @@ mod tests { start_index, // next_code_index &mut ProcessShredsStats::default(), ); - let max_per_block = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; - data_shreds.iter().enumerate().for_each(|(i, s)| { - let expected_fec_set_index = start_index + (i - i % max_per_block) as u32; - assert_eq!(s.fec_set_index(), expected_fec_set_index); - }); - - coding_shreds.iter().enumerate().for_each(|(i, s)| { - let mut expected_fec_set_index = start_index + (i - i % max_per_block) as u32; - while expected_fec_set_index as usize - start_index as usize > data_shreds.len() { - expected_fec_set_index -= max_per_block as u32; - } - assert_eq!(s.fec_set_index(), expected_fec_set_index); - }); + const MIN_CHUNK_SIZE: usize = DATA_SHREDS_PER_FEC_BLOCK; + let chunks: Vec<_> = data_shreds + .iter() + .group_by(|shred| shred.fec_set_index()) + .into_iter() + .map(|(fec_set_index, chunk)| (fec_set_index, chunk.count())) + .collect(); + assert!(chunks + .iter() + .all(|(_, chunk_size)| *chunk_size >= MIN_CHUNK_SIZE)); + assert!(chunks + .iter() + .all(|(_, chunk_size)| *chunk_size < 2 * MIN_CHUNK_SIZE)); + assert_eq!(chunks[0].0, start_index); + assert!(chunks.iter().tuple_windows().all( + |((fec_set_index, chunk_size), (next_fec_set_index, _chunk_size))| fec_set_index + + *chunk_size as u32 + == *next_fec_set_index + )); + assert!(coding_shreds.len() >= data_shreds.len()); + assert!(coding_shreds + .iter() + .zip(&data_shreds) + .all(|(code, data)| code.fec_set_index() == data.fec_set_index())); + assert_eq!( + coding_shreds.last().unwrap().fec_set_index(), + data_shreds.last().unwrap().fec_set_index() + ); } #[test] @@ -1028,42 +1051,73 @@ mod tests { &entries, true, // is_last_in_slot start_index, - start_index, // fec_set_offset &mut stats, ); - assert!(data_shreds.len() > MAX_DATA_SHREDS_PER_FEC_BLOCK as usize); let next_code_index = data_shreds[0].index(); - (1..=MAX_DATA_SHREDS_PER_FEC_BLOCK as usize).for_each(|count| { - for is_last_in_slot in [false, true] { - let coding_shreds = Shredder::data_shreds_to_coding_shreds( - &keypair, - &data_shreds[..count], - is_last_in_slot, - next_code_index, - &mut stats, - ) - .unwrap(); - let num_coding_shreds = get_erasure_batch_size(count, is_last_in_slot) - count; - assert_eq!(coding_shreds.len(), num_coding_shreds); - } - }); - for is_last_in_slot in [false, true] { + for size in (1..data_shreds.len()).step_by(5) { + let data_shreds = &data_shreds[..size]; let coding_shreds = Shredder::data_shreds_to_coding_shreds( &keypair, - &data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1], - is_last_in_slot, + data_shreds, next_code_index, &mut stats, ) .unwrap(); - let num_shreds = - get_erasure_batch_size(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize, is_last_in_slot) - + get_erasure_batch_size(1, is_last_in_slot); + let num_shreds: usize = data_shreds + .iter() + .group_by(|shred| shred.fec_set_index()) + .into_iter() + .map(|(_, chunk)| get_erasure_batch_size(chunk.count())) + .sum(); + assert_eq!(coding_shreds.len(), num_shreds - data_shreds.len()); + } + } + + #[test] + fn test_get_fec_set_offsets() { + const MIN_CHUNK_SIZE: usize = 32usize; + for num_shreds in 0usize..MIN_CHUNK_SIZE { + let offsets: Vec<_> = get_fec_set_offsets(num_shreds, MIN_CHUNK_SIZE).collect(); + assert_eq!(offsets, vec![0usize; num_shreds]); + } + for num_shreds in MIN_CHUNK_SIZE..MIN_CHUNK_SIZE * 8 { + let chunks: Vec<_> = get_fec_set_offsets(num_shreds, MIN_CHUNK_SIZE) + .group_by(|offset| *offset) + .into_iter() + .map(|(offset, chunk)| (offset, chunk.count())) + .collect(); assert_eq!( - coding_shreds.len(), - num_shreds - MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1 + chunks + .iter() + .map(|(_offset, chunk_size)| chunk_size) + .sum::(), + num_shreds + ); + assert!(chunks + .iter() + .all(|(_offset, chunk_size)| *chunk_size >= MIN_CHUNK_SIZE)); + assert!(chunks + .iter() + .all(|(_offset, chunk_size)| *chunk_size < 2 * MIN_CHUNK_SIZE)); + assert_eq!(chunks[0].0, 0); + assert!(chunks.iter().tuple_windows().all( + |((offset, chunk_size), (next_offset, _chunk_size))| offset + chunk_size + == *next_offset + )); + } + } + + #[test] + fn test_max_shreds_per_slot() { + for num_data_shreds in 0..128 { + let num_coding_shreds = get_erasure_batch_size(num_data_shreds) + .checked_sub(num_data_shreds) + .unwrap(); + assert!( + MAX_DATA_SHREDS_PER_SLOT * num_coding_shreds + <= MAX_CODE_SHREDS_PER_SLOT * num_data_shreds ); } } diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index aecad26aa6..87bffcb004 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -26,7 +26,7 @@ const SIGN_SHRED_GPU_MIN: usize = 256; lazy_static! { static ref SIGVERIFY_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) - .thread_name(|ix| format!("sigverify_shreds_{}", ix)) + .thread_name(|ix| format!("solSvrfyShred{:02}", ix)) .build() .unwrap(); } diff --git a/ledger/src/token_balances.rs b/ledger/src/token_balances.rs index 174ae8e78e..586bbd8526 100644 --- a/ledger/src/token_balances.rs +++ b/ledger/src/token_balances.rs @@ -6,9 +6,7 @@ use { solana_measure::measure::Measure, solana_metrics::datapoint_debug, solana_runtime::{ - account_overrides::AccountOverrides, - bank::{Bank, TransactionBalances}, - transaction_batch::TransactionBatch, + account_overrides::AccountOverrides, bank::Bank, transaction_batch::TransactionBatch, }, solana_sdk::{account::ReadableAccount, pubkey::Pubkey}, solana_transaction_status::{ @@ -18,7 +16,7 @@ use { extension::StateWithExtensions, state::{Account as TokenAccount, Mint}, }, - std::{collections::HashMap, sync::Arc}, + std::collections::HashMap, }; fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option { @@ -39,34 +37,6 @@ fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option { } } -pub fn collect_balances_with_cache( - batch: &TransactionBatch, - bank: &Arc, - account_overrides: Option<&AccountOverrides>, -) -> TransactionBalances { - let mut balances: TransactionBalances = vec![]; - for transaction in batch.sanitized_transactions() { - let mut transaction_balances: Vec = vec![]; - for account_key in transaction.message().account_keys().iter() { - let balance = { - if let Some(account_override) = - account_overrides.and_then(|overrides| overrides.get(account_key)) - { - account_override.lamports() - } else { - bank.get_account(account_key) - .map(|a| a.lamports()) - .unwrap_or(0) - } - }; - - transaction_balances.push(balance); - } - balances.push(transaction_balances); - } - balances -} - pub fn collect_token_balances( bank: &Bank, batch: &TransactionBatch, diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index 192e36ecf0..23a1cf83f9 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -3,7 +3,7 @@ use { solana_entry::entry::Entry, solana_ledger::shred::{ max_entries_per_n_shred, verify_test_data_shred, ProcessShredsStats, Shred, Shredder, - LEGACY_SHRED_DATA_CAPACITY, MAX_DATA_SHREDS_PER_FEC_BLOCK, + DATA_SHREDS_PER_FEC_BLOCK, LEGACY_SHRED_DATA_CAPACITY, }, solana_sdk::{ clock::Slot, @@ -26,7 +26,7 @@ fn test_multi_fec_block_coding() { let slot = 0x1234_5678_9abc_def0; let shredder = Shredder::new(slot, slot - 5, 0, 0).unwrap(); let num_fec_sets = 100; - let num_data_shreds = (MAX_DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets) as usize; + let num_data_shreds = DATA_SHREDS_PER_FEC_BLOCK * num_fec_sets; let keypair0 = Keypair::new(); let keypair1 = Keypair::new(); let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); @@ -67,8 +67,8 @@ fn test_multi_fec_block_coding() { let mut all_shreds = vec![]; for i in 0..num_fec_sets { - let shred_start_index = (MAX_DATA_SHREDS_PER_FEC_BLOCK * i) as usize; - let end_index = shred_start_index + MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - 1; + let shred_start_index = DATA_SHREDS_PER_FEC_BLOCK * i; + let end_index = shred_start_index + DATA_SHREDS_PER_FEC_BLOCK - 1; let fec_set_shreds = data_shreds[shred_start_index..=end_index] .iter() .cloned() @@ -99,11 +99,7 @@ fn test_multi_fec_block_coding() { shred_info.insert(i * 2, recovered_shred); } - all_shreds.extend( - shred_info - .into_iter() - .take(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize), - ); + all_shreds.extend(shred_info.into_iter().take(DATA_SHREDS_PER_FEC_BLOCK)); } let result = Shredder::deshred(&all_shreds[..]).unwrap(); @@ -193,11 +189,11 @@ fn setup_different_sized_fec_blocks( let tx0 = system_transaction::transfer(&keypair0, &keypair1.pubkey(), 1, Hash::default()); let entry = Entry::new(&Hash::default(), 1, vec![tx0]); - // Make enough entries for `MAX_DATA_SHREDS_PER_FEC_BLOCK + 2` shreds so one - // fec set will have `MAX_DATA_SHREDS_PER_FEC_BLOCK` shreds and the next + // Make enough entries for `DATA_SHREDS_PER_FEC_BLOCK + 2` shreds so one + // fec set will have `DATA_SHREDS_PER_FEC_BLOCK` shreds and the next // will have 2 shreds. - assert!(MAX_DATA_SHREDS_PER_FEC_BLOCK > 2); - let num_shreds_per_iter = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 2; + assert!(DATA_SHREDS_PER_FEC_BLOCK > 2); + let num_shreds_per_iter = DATA_SHREDS_PER_FEC_BLOCK + 2; let num_entries = max_entries_per_n_shred( &entry, num_shreds_per_iter as u64, diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 9a7a72ad8a..fadf50500f 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -32,7 +32,7 @@ tempfile = "3.3.0" [dev-dependencies] assert_matches = "1.5.0" gag = "1.0.0" -serial_test = "0.8.0" +serial_test = "0.9.0" solana-download-utils = { path = "../download-utils", version = "=1.12.0" } solana-logger = { path = "../logger", version = "=1.12.0" } diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index e0ad36f300..7f8c4331a9 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -35,7 +35,7 @@ use { solana_vote_program::vote_transaction, std::{ collections::{HashMap, HashSet}, - net::SocketAddr, + net::{IpAddr, Ipv4Addr, SocketAddr}, path::Path, sync::{Arc, RwLock}, thread::sleep, @@ -43,6 +43,14 @@ use { }, }; +pub fn get_client_facing_addr(contact_info: &ContactInfo) -> (SocketAddr, SocketAddr) { + let (rpc, mut tpu) = contact_info.client_facing_addr(); + // QUIC certificate authentication requires the IP Address to match. ContactInfo might have + // 0.0.0.0 as the IP instead of 127.0.0.1. + tpu.set_ip(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + (rpc, tpu) +} + /// Spend and verify from every node in the network pub fn spend_and_verify_all_nodes( entry_point_info: &ContactInfo, @@ -61,7 +69,7 @@ pub fn spend_and_verify_all_nodes( return; } let random_keypair = Keypair::new(); - let (rpc, tpu) = ingress_node.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(ingress_node); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let bal = client .poll_get_balance_with_commitment( @@ -83,7 +91,7 @@ pub fn spend_and_verify_all_nodes( if ignore_nodes.contains(&validator.id) { continue; } - let (rpc, tpu) = validator.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(validator); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); client.poll_for_signature_confirmation(&sig, confs).unwrap(); } @@ -95,7 +103,7 @@ pub fn verify_balances( node: &ContactInfo, connection_cache: Arc, ) { - let (rpc, tpu) = node.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(node); let client = ThinClient::new(rpc, tpu, connection_cache); for (pk, b) in expected_balances { let bal = client @@ -112,7 +120,7 @@ pub fn send_many_transactions( max_tokens_per_transfer: u64, num_txs: u64, ) -> HashMap { - let (rpc, tpu) = node.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(node); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let mut expected_balances = HashMap::new(); for _ in 0..num_txs { @@ -205,7 +213,7 @@ pub fn kill_entry_and_spend_and_verify_rest( let cluster_nodes = discover_cluster(&entry_point_info.gossip, nodes, socket_addr_space).unwrap(); assert!(cluster_nodes.len() >= nodes); - let (rpc, tpu) = entry_point_info.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(entry_point_info); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); // sleep long enough to make sure we are in epoch 3 @@ -235,7 +243,7 @@ pub fn kill_entry_and_spend_and_verify_rest( continue; } - let (rpc, tpu) = ingress_node.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(ingress_node); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let balance = client .poll_get_balance_with_commitment( @@ -318,7 +326,7 @@ pub fn check_for_new_roots( assert!(loop_start.elapsed() < loop_timeout); for (i, ingress_node) in contact_infos.iter().enumerate() { - let (rpc, tpu) = ingress_node.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(ingress_node); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let root_slot = client .get_slot_with_commitment(CommitmentConfig::finalized()) @@ -351,7 +359,7 @@ pub fn check_no_new_roots( .iter() .enumerate() .map(|(i, ingress_node)| { - let (rpc, tpu) = ingress_node.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(ingress_node); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); let initial_root = client .get_slot() @@ -370,7 +378,7 @@ pub fn check_no_new_roots( let mut reached_end_slot = false; loop { for contact_info in contact_infos { - let (rpc, tpu) = contact_info.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(contact_info); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); current_slot = client .get_slot_with_commitment(CommitmentConfig::processed()) @@ -393,7 +401,7 @@ pub fn check_no_new_roots( } for (i, ingress_node) in contact_infos.iter().enumerate() { - let (rpc, tpu) = ingress_node.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(ingress_node); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); assert_eq!( client @@ -415,7 +423,7 @@ fn poll_all_nodes_for_signature( if validator.id == entry_point_info.id { continue; } - let (rpc, tpu) = validator.client_facing_addr(); + let (rpc, tpu) = get_client_facing_addr(validator); let client = ThinClient::new(rpc, tpu, connection_cache.clone()); client.poll_for_signature_confirmation(sig, confs)?; } diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index aaf54bb6bb..0f1ca19f87 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -49,7 +49,7 @@ use { solana_streamer::socket::SocketAddrSpace, solana_vote_program::{ vote_instruction, - vote_state::{VoteInit, VoteState}, + vote_state::{self, VoteInit}, }, std::{ collections::HashMap, @@ -280,7 +280,8 @@ impl LocalCluster { socket_addr_space, DEFAULT_TPU_USE_QUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE, - ); + ) + .expect("assume successful validator start"); let mut validators = HashMap::new(); let leader_info = ValidatorInfo { @@ -318,7 +319,7 @@ impl LocalCluster { }) .collect(); for (stake, validator_config, (key, _)) in izip!( - (&config.node_stakes[1..]).iter(), + config.node_stakes[1..].iter(), config.validator_configs[1..].iter(), validator_keys[1..].iter(), ) { @@ -424,7 +425,7 @@ impl LocalCluster { mut voting_keypair: Option>, socket_addr_space: SocketAddrSpace, ) -> Pubkey { - let (rpc, tpu) = self.entry_point_info.client_facing_addr(); + let (rpc, tpu) = cluster_tests::get_client_facing_addr(&self.entry_point_info); let client = ThinClient::new(rpc, tpu, self.connection_cache.clone()); // Must have enough tokens to fund vote account and set delegate @@ -478,7 +479,8 @@ impl LocalCluster { socket_addr_space, DEFAULT_TPU_USE_QUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE, - ); + ) + .expect("assume successful validator start"); let validator_pubkey = validator_keypair.pubkey(); let validator_info = ClusterValidatorInfo::new( @@ -510,7 +512,7 @@ impl LocalCluster { } pub fn transfer(&self, source_keypair: &Keypair, dest_pubkey: &Pubkey, lamports: u64) -> u64 { - let (rpc, tpu) = self.entry_point_info.client_facing_addr(); + let (rpc, tpu) = cluster_tests::get_client_facing_addr(&self.entry_point_info); let client = ThinClient::new(rpc, tpu, self.connection_cache.clone()); Self::transfer_with_client(&client, source_keypair, dest_pubkey, lamports) } @@ -706,7 +708,7 @@ impl LocalCluster { (Ok(Some(stake_account)), Ok(Some(vote_account))) => { match ( stake_state::stake_from(&stake_account), - VoteState::from(&vote_account), + vote_state::from(&vote_account), ) { (Some(stake_state), Some(vote_state)) => { if stake_state.delegation.voter_pubkey != vote_account_pubkey @@ -757,7 +759,7 @@ impl Cluster for LocalCluster { fn get_validator_client(&self, pubkey: &Pubkey) -> Option { self.validators.get(pubkey).map(|f| { - let (rpc, tpu) = f.info.contact_info.client_facing_addr(); + let (rpc, tpu) = cluster_tests::get_client_facing_addr(&f.info.contact_info); ThinClient::new(rpc, tpu, self.connection_cache.clone()) }) } @@ -839,7 +841,8 @@ impl Cluster for LocalCluster { socket_addr_space, DEFAULT_TPU_USE_QUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE, - ); + ) + .expect("assume successful validator start"); cluster_validator_info.validator = Some(restarted_node); cluster_validator_info } diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 68b7ae25ac..119eaa8ee8 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -56,6 +56,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { accounts_db_test_hash_calculation: config.accounts_db_test_hash_calculation, accounts_db_skip_shrink: config.accounts_db_skip_shrink, tpu_coalesce_ms: config.tpu_coalesce_ms, + staked_nodes_overrides: config.staked_nodes_overrides.clone(), validator_exit: Arc::new(RwLock::new(Exit::default())), poh_hashes_per_batch: config.poh_hashes_per_batch, no_wait_for_vote_to_start_leader: config.no_wait_for_vote_to_start_leader, @@ -64,7 +65,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { wait_to_vote_slot: config.wait_to_vote_slot, ledger_column_options: config.ledger_column_options.clone(), runtime_config: config.runtime_config.clone(), - enable_quic_servers: config.enable_quic_servers, maybe_relayer_config: config.maybe_relayer_config.clone(), shred_receiver_address: config.shred_receiver_address, tip_manager_config: config.tip_manager_config.clone(), diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index d3956d7adc..6fad4c541c 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -173,6 +173,7 @@ fn test_spend_and_verify_all_nodes_3() { #[test] #[serial] +#[ignore] fn test_local_cluster_signature_subscribe() { solana_logger::setup_with_default(RUST_LOG_FILTER); let num_nodes = 2; @@ -191,7 +192,7 @@ fn test_local_cluster_signature_subscribe() { .unwrap(); let non_bootstrap_info = cluster.get_contact_info(&non_bootstrap_id).unwrap(); - let (rpc, tpu) = non_bootstrap_info.client_facing_addr(); + let (rpc, tpu) = cluster_tests::get_client_facing_addr(non_bootstrap_info); let tx_client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone()); let (blockhash, _) = tx_client @@ -311,6 +312,7 @@ fn test_two_unbalanced_stakes() { #[test] #[serial] +#[ignore] fn test_forwarding() { solana_logger::setup_with_default(RUST_LOG_FILTER); // Set up a cluster where one node is never the leader, so all txs sent to this node @@ -418,7 +420,7 @@ fn test_mainnet_beta_cluster_type() { .unwrap(); assert_eq!(cluster_nodes.len(), 1); - let (rpc, tpu) = cluster.entry_point_info.client_facing_addr(); + let (rpc, tpu) = cluster_tests::get_client_facing_addr(&cluster.entry_point_info); let client = ThinClient::new(rpc, tpu, cluster.connection_cache.clone()); // Programs that are available at epoch 0 @@ -1228,6 +1230,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st #[allow(unused_attributes)] #[test] #[serial] +#[ignore] fn test_snapshot_restart_tower() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 2 nodes @@ -2520,6 +2523,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { #[test] #[serial] +#[ignore] fn test_votes_land_in_fork_during_long_partition() { let total_stake = 3 * DEFAULT_NODE_STAKE; // Make `lighter_stake` insufficient for switching threshold diff --git a/local-cluster/tests/local_cluster_slow_1.rs b/local-cluster/tests/local_cluster_slow_1.rs index 29a5f314c4..2faf69f1e5 100644 --- a/local-cluster/tests/local_cluster_slow_1.rs +++ b/local-cluster/tests/local_cluster_slow_1.rs @@ -50,6 +50,7 @@ mod common; #[test] #[serial] +#[ignore] // Steps in this test: // We want to create a situation like: /* @@ -588,6 +589,7 @@ fn test_duplicate_shreds_broadcast_leader() { #[test] #[serial] +#[ignore] fn test_switch_threshold_uses_gossip_votes() { solana_logger::setup_with_default(RUST_LOG_FILTER); let total_stake = 100 * DEFAULT_NODE_STAKE; diff --git a/local-cluster/tests/local_cluster_slow_2.rs b/local-cluster/tests/local_cluster_slow_2.rs index 6488ddea1e..d6d315ed0d 100644 --- a/local-cluster/tests/local_cluster_slow_2.rs +++ b/local-cluster/tests/local_cluster_slow_2.rs @@ -201,6 +201,7 @@ fn test_leader_failure_4() { #[test] #[serial] +#[ignore] fn test_ledger_cleanup_service() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_ledger_cleanup_service"); diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index 0d1c26e5fc..e6033a4535 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -12,8 +12,8 @@ publish = false [dependencies] byte-unit = "4.0.14" clap = { version = "3.1.5", features = ["cargo"] } -serde = "1.0.138" -serde_json = "1.0.81" +serde = "1.0.143" +serde_json = "1.0.83" solana-logger = { path = "../logger", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 8e815408d2..ac14099d8c 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -20,7 +20,7 @@ solana-sdk = { path = "../sdk", version = "=1.12.0" } [dev-dependencies] env_logger = "0.9.0" rand = "0.7.0" -serial_test = "0.8.0" +serial_test = "0.9.0" [lib] name = "solana_metrics" diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index 99f1183648..4b9b183de9 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -173,7 +173,11 @@ impl MetricsAgent { max_points_per_sec: usize, ) -> Self { let (sender, receiver) = unbounded::(); - thread::spawn(move || Self::run(&receiver, &writer, write_frequency, max_points_per_sec)); + + thread::Builder::new() + .name("solMetricsAgent".into()) + .spawn(move || Self::run(&receiver, &writer, write_frequency, max_points_per_sec)) + .unwrap(); Self { sender } } diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml index 7b2b7d5453..b41c716a8b 100644 --- a/net-shaper/Cargo.toml +++ b/net-shaper/Cargo.toml @@ -12,8 +12,8 @@ publish = false [dependencies] clap = { version = "3.1.5", features = ["cargo"] } rand = "0.7.0" -serde = { version = "1.0.138", features = ["derive"] } -serde_json = "1.0.81" +serde = { version = "1.0.143", features = ["derive"] } +serde_json = "1.0.83" solana-logger = { path = "../logger", version = "=1.12.0" } [[bin]] diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 23ac5bae30..a26760134e 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -14,9 +14,9 @@ bincode = "1.3.3" clap = { version = "3.1.5", features = ["cargo"] } crossbeam-channel = "0.5" log = "0.4.17" -nix = "0.24.2" +nix = "0.25.0" rand = "0.7.0" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" socket2 = "0.4.4" solana-logger = { path = "../logger", version = "=1.12.0" } diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index 9f60bb9b57..f690a05f6b 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -144,15 +144,18 @@ fn do_verify_reachable_ports( for (port, tcp_listener) in tcp_listeners { let (sender, receiver) = unbounded(); let listening_addr = tcp_listener.local_addr().unwrap(); - let thread_handle = std::thread::spawn(move || { - debug!("Waiting for incoming connection on tcp/{}", port); - match tcp_listener.incoming().next() { - Some(_) => sender - .send(()) - .unwrap_or_else(|err| warn!("send failure: {}", err)), - None => warn!("tcp incoming failed"), - } - }); + let thread_handle = std::thread::Builder::new() + .name(format!("solVrfyTcp{:05}", port)) + .spawn(move || { + debug!("Waiting for incoming connection on tcp/{}", port); + match tcp_listener.incoming().next() { + Some(_) => sender + .send(()) + .unwrap_or_else(|err| warn!("send failure: {}", err)), + None => warn!("tcp incoming failed"), + } + }) + .unwrap(); match receiver.recv_timeout(timeout) { Ok(_) => { info!("tcp/{} is reachable", port); @@ -222,33 +225,37 @@ fn do_verify_reachable_ports( let port = udp_socket.local_addr().unwrap().port(); let udp_socket = udp_socket.try_clone().expect("Unable to clone udp socket"); let reachable_ports = reachable_ports.clone(); - std::thread::spawn(move || { - let start = Instant::now(); - - let original_read_timeout = udp_socket.read_timeout().unwrap(); - udp_socket - .set_read_timeout(Some(Duration::from_millis(250))) - .unwrap(); - loop { - if reachable_ports.read().unwrap().contains(&port) - || Instant::now().duration_since(start) >= timeout - { - break; - } - let recv_result = udp_socket.recv(&mut [0; 1]); - debug!( - "Waited for incoming datagram on udp/{}: {:?}", - port, recv_result - ); - - if recv_result.is_ok() { - reachable_ports.write().unwrap().insert(port); - break; + std::thread::Builder::new() + .name(format!("solVrfyUdp{:05}", port)) + .spawn(move || { + let start = Instant::now(); + + let original_read_timeout = udp_socket.read_timeout().unwrap(); + udp_socket + .set_read_timeout(Some(Duration::from_millis(250))) + .unwrap(); + loop { + if reachable_ports.read().unwrap().contains(&port) + || Instant::now().duration_since(start) >= timeout + { + break; + } + + let recv_result = udp_socket.recv(&mut [0; 1]); + debug!( + "Waited for incoming datagram on udp/{}: {:?}", + port, recv_result + ); + + if recv_result.is_ok() { + reachable_ports.write().unwrap().insert(port); + break; + } } - } - udp_socket.set_read_timeout(original_read_timeout).unwrap(); - }) + udp_socket.set_read_timeout(original_read_timeout).unwrap(); + }) + .unwrap() }) .collect(); @@ -519,31 +526,34 @@ pub fn bind_common( .and_then(|_| TcpListener::bind(&addr).map(|listener| (sock.into(), listener))) } -pub fn bind_two_consecutive_in_range( +pub fn bind_two_in_range_with_offset( ip_addr: IpAddr, range: PortRange, + offset: u16, ) -> io::Result<((u16, UdpSocket), (u16, UdpSocket))> { - let mut first: Option = None; + if range.1.saturating_sub(range.0) < offset { + return Err(io::Error::new( + io::ErrorKind::Other, + "range too small to find two ports with the correct offset".to_string(), + )); + } for port in range.0..range.1 { - if let Ok(bind) = bind_to(ip_addr, port, false) { - match first { - Some(first_bind) => { + if let Ok(first_bind) = bind_to(ip_addr, port, false) { + if range.1.saturating_sub(port) >= offset { + if let Ok(second_bind) = bind_to(ip_addr, port + offset, false) { return Ok(( (first_bind.local_addr().unwrap().port(), first_bind), - (bind.local_addr().unwrap().port(), bind), + (second_bind.local_addr().unwrap().port(), second_bind), )); } - None => { - first = Some(bind); - } + } else { + break; } - } else { - first = None; } } Err(io::Error::new( io::ErrorKind::Other, - "couldn't find two consecutive ports in range".to_string(), + "couldn't find two ports with the correct offset in range".to_string(), )) } @@ -818,12 +828,21 @@ mod tests { } #[test] - fn test_bind_two_consecutive_in_range() { + fn test_bind_two_in_range_with_offset() { solana_logger::setup(); let ip_addr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); - if let Ok(((port1, _), (port2, _))) = bind_two_consecutive_in_range(ip_addr, (1024, 65535)) + let offset = 6; + if let Ok(((port1, _), (port2, _))) = + bind_two_in_range_with_offset(ip_addr, (1024, 65535), offset) + { + assert!(port2 == port1 + offset); + } + let offset = 42; + if let Ok(((port1, _), (port2, _))) = + bind_two_in_range_with_offset(ip_addr, (1024, 65535), offset) { - assert!(port2 == port1 + 1); + assert!(port2 == port1 + offset); } + assert!(bind_two_in_range_with_offset(ip_addr, (1024, 1044), offset).is_err()); } } diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 31f3cfb2cc..3380f2ac89 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -21,7 +21,7 @@ lazy_static = "1.4.0" log = "0.4.17" rand = "0.7.0" rayon = "1.5.3" -serde = "1.0.138" +serde = "1.0.143" solana-metrics = { path = "../metrics", version = "=1.12.0" } solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.12.0" } solana-sdk = { path = "../sdk", version = "=1.12.0" } @@ -29,8 +29,8 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } [target."cfg(target_os = \"linux\")".dependencies] caps = "0.5.3" -libc = "0.2.126" -nix = "0.24.2" +libc = "0.2.131" +nix = "0.25.0" [lib] name = "solana_perf" diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index b2f02fa152..0b0fdb0857 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -45,7 +45,7 @@ pub const VERIFY_MIN_PACKETS_PER_THREAD: usize = 128; lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) - .thread_name(|ix| format!("sigverify_{}", ix)) + .thread_name(|ix| format!("solSigVerify{:02}", ix)) .build() .unwrap(); } @@ -830,12 +830,7 @@ mod tests { pub fn memfind(a: &[A], b: &[A]) -> Option { assert!(a.len() >= b.len()); let end = a.len() - b.len() + 1; - for i in 0..end { - if a[i..i + b.len()] == b[..] { - return Some(i); - } - } - None + (0..end).find(|&i| a[i..i + b.len()] == b[..]) } #[test] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 184e6f2341..f6a58332f4 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -508,7 +508,7 @@ impl PohRecorder { start: Arc::new(Instant::now()), min_tick_height: bank.tick_height(), max_tick_height: bank.max_tick_height(), - transaction_index: track_transaction_indexes.then(|| 0), + transaction_index: track_transaction_indexes.then_some(0), }; trace!("new working bank"); assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot()); diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index 5d102e408b..d6d809f13c 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -106,7 +106,7 @@ impl PohService { let poh_exit_ = poh_exit.clone(); let poh_config = poh_config.clone(); let tick_producer = Builder::new() - .name("solana-poh-service-tick_producer".to_string()) + .name("solPohTickProd".to_string()) .spawn(move || { solana_sys_tuner::request_realtime_poh(); if poh_config.hashes_per_tick.is_none() { @@ -450,7 +450,7 @@ mod tests { let exit = exit.clone(); Builder::new() - .name("solana-poh-service-entry_producer".to_string()) + .name("solPohEntryProd".to_string()) .spawn(move || { let now = Instant::now(); let mut total_us = 0; diff --git a/program-runtime/src/compute_budget.rs b/program-runtime/src/compute_budget.rs index 31432f387f..60e21b2e99 100644 --- a/program-runtime/src/compute_budget.rs +++ b/program-runtime/src/compute_budget.rs @@ -127,7 +127,6 @@ impl ComputeBudget { &mut self, instructions: impl Iterator, default_units_per_instruction: bool, - support_set_compute_unit_price_ix: bool, ) -> Result { let mut num_non_compute_budget_instructions: usize = 0; let mut updated_compute_unit_limit = None; @@ -136,70 +135,47 @@ impl ComputeBudget { for (i, (program_id, instruction)) in instructions.enumerate() { if compute_budget::check_id(program_id) { - if support_set_compute_unit_price_ix { - let invalid_instruction_data_error = TransactionError::InstructionError( - i as u8, - InstructionError::InvalidInstructionData, - ); - let duplicate_instruction_error = - TransactionError::DuplicateInstruction(i as u8); + let invalid_instruction_data_error = TransactionError::InstructionError( + i as u8, + InstructionError::InvalidInstructionData, + ); + let duplicate_instruction_error = TransactionError::DuplicateInstruction(i as u8); - match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }) => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); - prioritization_fee = - Some(PrioritizationFeeType::Deprecated(additional_fee as u64)); - } - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - if requested_heap_size.is_some() { - return Err(duplicate_instruction_error); - } - requested_heap_size = Some((bytes, i as u8)); + match try_from_slice_unchecked(&instruction.data) { + Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { + units: compute_unit_limit, + additional_fee, + }) => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); } - Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { - if updated_compute_unit_limit.is_some() { - return Err(duplicate_instruction_error); - } - updated_compute_unit_limit = Some(compute_unit_limit); + if prioritization_fee.is_some() { + return Err(duplicate_instruction_error); } - Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { - if prioritization_fee.is_some() { - return Err(duplicate_instruction_error); - } - prioritization_fee = - Some(PrioritizationFeeType::ComputeUnitPrice(micro_lamports)); - } - _ => return Err(invalid_instruction_data_error), + updated_compute_unit_limit = Some(compute_unit_limit); + prioritization_fee = + Some(PrioritizationFeeType::Deprecated(additional_fee as u64)); } - } else if i < 3 { - match try_from_slice_unchecked(&instruction.data) { - Ok(ComputeBudgetInstruction::RequestUnitsDeprecated { - units: compute_unit_limit, - additional_fee, - }) => { - updated_compute_unit_limit = Some(compute_unit_limit); - prioritization_fee = - Some(PrioritizationFeeType::Deprecated(additional_fee as u64)); + Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { + if requested_heap_size.is_some() { + return Err(duplicate_instruction_error); } - Ok(ComputeBudgetInstruction::RequestHeapFrame(bytes)) => { - requested_heap_size = Some((bytes, 0)); + requested_heap_size = Some((bytes, i as u8)); + } + Ok(ComputeBudgetInstruction::SetComputeUnitLimit(compute_unit_limit)) => { + if updated_compute_unit_limit.is_some() { + return Err(duplicate_instruction_error); } - _ => { - return Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )) + updated_compute_unit_limit = Some(compute_unit_limit); + } + Ok(ComputeBudgetInstruction::SetComputeUnitPrice(micro_lamports)) => { + if prioritization_fee.is_some() { + return Err(duplicate_instruction_error); } + prioritization_fee = + Some(PrioritizationFeeType::ComputeUnitPrice(micro_lamports)); } + _ => return Err(invalid_instruction_data_error), } } else { // only include non-request instructions in default max calc @@ -255,19 +231,8 @@ mod tests { }, }; - fn request_units_deprecated(units: u32, additional_fee: u32) -> Instruction { - Instruction::new_with_borsh( - compute_budget::id(), - &ComputeBudgetInstruction::RequestUnitsDeprecated { - units, - additional_fee, - }, - vec![], - ) - } - macro_rules! test { - ( $instructions: expr, $expected_result: expr, $expected_budget: expr, $type_change: expr ) => { + ( $instructions: expr, $expected_result: expr, $expected_budget: expr ) => { let payer_keypair = Keypair::new(); let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( &[&payer_keypair], @@ -275,16 +240,13 @@ mod tests { Hash::default(), )); let mut compute_budget = ComputeBudget::default(); - let result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - true, - $type_change, - ); + let result = + compute_budget.process_instructions(tx.message().program_instructions_iter(), true); assert_eq!($expected_result, result); assert_eq!(compute_budget, $expected_budget); }; ( $instructions: expr, $expected_result: expr, $expected_budget: expr) => { - test!($instructions, $expected_result, $expected_budget, true); + test!($instructions, $expected_result, $expected_budget); }; } @@ -346,35 +308,6 @@ mod tests { } ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::set_compute_unit_limit(1), // ignored - ], - Ok(PrioritizationFeeDetails::default()), - ComputeBudget { - compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 * 3, - ..ComputeBudget::default() - }, - false - ); - - // Prioritization fee - test!( - &[request_units_deprecated(1, 42)], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::Deprecated(42), - 1, - )), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - }, - false - ); - test!( &[ ComputeBudgetInstruction::set_compute_unit_limit(1), @@ -390,19 +323,6 @@ mod tests { } ); - test!( - &[request_units_deprecated(1, u32::MAX)], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::Deprecated(u32::MAX as u64), - 1 - )), - ComputeBudget { - compute_unit_limit: 1, - ..ComputeBudget::default() - }, - false - ); - // HeapFrame test!( &[], @@ -520,21 +440,6 @@ mod tests { } ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ComputeBudgetInstruction::request_heap_frame(MAX_HEAP_FRAME_BYTES), - ComputeBudgetInstruction::set_compute_unit_limit(MAX_COMPUTE_UNIT_LIMIT), - ComputeBudgetInstruction::set_compute_unit_price(u64::MAX), - ], - Err(TransactionError::InstructionError( - 0, - InstructionError::InvalidInstructionData, - )), - ComputeBudget::default(), - false - ); - test!( &[ Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), @@ -553,24 +458,6 @@ mod tests { } ); - test!( - &[ - Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - request_units_deprecated(MAX_COMPUTE_UNIT_LIMIT, u32::MAX), - ComputeBudgetInstruction::request_heap_frame(MIN_HEAP_FRAME_BYTES as u32), - ], - Ok(PrioritizationFeeDetails::new( - PrioritizationFeeType::Deprecated(u32::MAX as u64), - MAX_COMPUTE_UNIT_LIMIT as u64, - )), - ComputeBudget { - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT as u64, - heap_size: Some(MIN_HEAP_FRAME_BYTES as usize), - ..ComputeBudget::default() - }, - false - ); - // Duplicates test!( &[ diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index a827cc51da..ca3582c7ae 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -1,5 +1,3 @@ -#[allow(deprecated)] -use solana_sdk::keyed_account::{create_keyed_accounts_unified, KeyedAccount}; use { crate::{ accounts_data_meter::AccountsDataMeter, @@ -175,38 +173,6 @@ impl fmt::Display for AllocErr { } } -#[deprecated( - since = "1.11.0", - note = "Please use InstructionContext instead of StackFrame" -)] -#[allow(deprecated)] -pub struct StackFrame<'a> { - pub number_of_program_accounts: usize, - pub keyed_accounts: Vec>, - pub keyed_accounts_range: std::ops::Range, -} - -#[allow(deprecated)] -impl<'a> StackFrame<'a> { - pub fn new(number_of_program_accounts: usize, keyed_accounts: Vec>) -> Self { - let keyed_accounts_range = std::ops::Range { - start: 0, - end: keyed_accounts.len(), - }; - Self { - number_of_program_accounts, - keyed_accounts, - keyed_accounts_range, - } - } - - pub fn program_id(&self) -> Option<&Pubkey> { - self.keyed_accounts - .get(self.number_of_program_accounts.saturating_sub(1)) - .map(|keyed_account| keyed_account.unsigned_key()) - } -} - struct SyscallContext { check_aligned: bool, check_size: bool, @@ -216,8 +182,6 @@ struct SyscallContext { pub struct InvokeContext<'a> { pub transaction_context: &'a mut TransactionContext, - #[allow(deprecated)] - invoke_stack: Vec>, rent: Rent, pre_accounts: Vec, builtin_programs: &'a [BuiltinProgram], @@ -252,7 +216,6 @@ impl<'a> InvokeContext<'a> { ) -> Self { Self { transaction_context, - invoke_stack: Vec::with_capacity(compute_budget.max_invoke_depth), rent, pre_accounts: Vec::new(), builtin_programs, @@ -362,7 +325,7 @@ impl<'a> InvokeContext<'a> { .get_instruction_context_stack_height()) .any(|level| { self.transaction_context - .get_instruction_context_at(level) + .get_instruction_context_at_nesting_level(level) .and_then(|instruction_context| { instruction_context .try_borrow_last_program_account(self.transaction_context) @@ -384,40 +347,6 @@ impl<'a> InvokeContext<'a> { } } - // Create the KeyedAccounts that will be passed to the program - #[allow(deprecated)] - let keyed_accounts = program_indices - .iter() - .map(|account_index| { - Ok(( - false, - false, - self.transaction_context - .get_key_of_account_at_index(*account_index)?, - self.transaction_context - .get_account_at_index(*account_index)?, - )) - }) - .chain(instruction_accounts.iter().map(|instruction_account| { - Ok(( - instruction_account.is_signer, - instruction_account.is_writable, - self.transaction_context - .get_key_of_account_at_index(instruction_account.index_in_transaction)?, - self.transaction_context - .get_account_at_index(instruction_account.index_in_transaction)?, - )) - })) - .collect::, InstructionError>>()?; - - // Unsafe will be removed together with the keyed_accounts - #[allow(deprecated)] - self.invoke_stack.push(StackFrame::new( - program_indices.len(), - create_keyed_accounts_unified(unsafe { - std::mem::transmute(keyed_accounts.as_slice()) - }), - )); self.syscall_context.push(None); self.transaction_context .push(program_indices, instruction_accounts, instruction_data) @@ -426,7 +355,6 @@ impl<'a> InvokeContext<'a> { /// Pop a stack frame from the invocation stack pub fn pop(&mut self) -> Result<(), InstructionError> { self.syscall_context.pop(); - self.invoke_stack.pop(); self.transaction_context.pop() } @@ -915,19 +843,6 @@ impl<'a> InvokeContext<'a> { Err(InstructionError::UnsupportedProgramId) } - #[deprecated( - since = "1.11.0", - note = "Please use BorrowedAccount instead of KeyedAccount" - )] - #[allow(deprecated)] - /// Get the list of keyed accounts including the chain of program accounts - pub fn get_keyed_accounts(&self) -> Result<&[KeyedAccount], InstructionError> { - self.invoke_stack - .last() - .and_then(|frame| frame.keyed_accounts.get(frame.keyed_accounts_range.clone())) - .ok_or(InstructionError::CallDepth) - } - /// Get this invocation's LogCollector pub fn get_log_collector(&self) -> Option>> { self.log_collector.clone() diff --git a/program-runtime/src/pre_account.rs b/program-runtime/src/pre_account.rs index e01c35fbc0..2ca91ba090 100644 --- a/program-runtime/src/pre_account.rs +++ b/program-runtime/src/pre_account.rs @@ -120,7 +120,6 @@ impl PreAccount { if outermost_call { timings.total_account_count = timings.total_account_count.saturating_add(1); - timings.total_data_size = timings.total_data_size.saturating_add(post.data().len()); if owner_changed || lamports_changed || data_len_changed @@ -129,8 +128,6 @@ impl PreAccount { || self.changed { timings.changed_account_count = timings.changed_account_count.saturating_add(1); - timings.data_size_changed = - timings.data_size_changed.saturating_add(post.data().len()); } } diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index 41a850f31d..8e63dfabf5 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -190,16 +190,6 @@ eager_macro_rules! { $eager_1 $self.details.total_account_count, i64 ), - ( - "execute_details_total_data_size", - $self.details.total_data_size, - i64 - ), - ( - "execute_details_data_size_changed", - $self.details.data_size_changed, - i64 - ), ( "execute_details_create_executor_register_syscalls_us", $self @@ -409,8 +399,6 @@ pub struct ExecuteDetailsTimings { pub get_or_create_executor_us: u64, pub changed_account_count: u64, pub total_account_count: u64, - pub total_data_size: usize, - pub data_size_changed: usize, pub create_executor_register_syscalls_us: u64, pub create_executor_load_elf_us: u64, pub create_executor_verify_code_us: u64, @@ -430,8 +418,6 @@ impl ExecuteDetailsTimings { ); saturating_add_assign!(self.changed_account_count, other.changed_account_count); saturating_add_assign!(self.total_account_count, other.total_account_count); - saturating_add_assign!(self.total_data_size, other.total_data_size); - saturating_add_assign!(self.data_size_changed, other.data_size_changed); saturating_add_assign!( self.create_executor_register_syscalls_us, other.create_executor_register_syscalls_us @@ -547,15 +533,12 @@ mod tests { let mut other_execute_details_timings = construct_execute_timings_with_program(&program_id, us, compute_units_consumed); let account_count = 1; - let data_size_changed = 1; other_execute_details_timings.serialize_us = us; other_execute_details_timings.create_vm_us = us; other_execute_details_timings.execute_us = us; other_execute_details_timings.deserialize_us = us; other_execute_details_timings.changed_account_count = account_count; other_execute_details_timings.total_account_count = account_count; - other_execute_details_timings.total_data_size = data_size_changed; - other_execute_details_timings.data_size_changed = data_size_changed; // Accumulate the other instance into the current instance execute_details_timings.accumulate(&other_execute_details_timings); diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index ac27c5b5bb..f4a97751c9 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -14,7 +14,7 @@ base64 = "0.13.0" bincode = "1.3.3" chrono-humanize = "0.2.1" log = "0.4.17" -serde = "1.0.138" +serde = "1.0.143" solana-banks-client = { path = "../banks-client", version = "=1.12.0" } solana-banks-server = { path = "../banks-server", version = "=1.12.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.12.0" } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 8780b874e2..bdab4f1458 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -23,7 +23,7 @@ use { runtime_config::RuntimeConfig, }, solana_sdk::{ - account::{Account, AccountSharedData, ReadableAccount}, + account::{Account, AccountSharedData}, account_info::AccountInfo, clock::Slot, entrypoint::{deserialize, ProgramResult, SUCCESS}, @@ -40,7 +40,7 @@ use { signature::{Keypair, Signer}, sysvar::{Sysvar, SysvarId}, }, - solana_vote_program::vote_state::{VoteState, VoteStateVersions}, + solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{ cell::RefCell, collections::{HashMap, HashSet}, @@ -291,23 +291,12 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { } _ => {} } - if borrowed_account.is_executable() != account_info.executable { - borrowed_account - .set_executable(account_info.executable) - .unwrap(); - } // Change the owner at the end so that we are allowed to change the lamports and data before if borrowed_account.get_owner() != account_info.owner { borrowed_account .set_owner(account_info.owner.as_ref()) .unwrap(); } - drop(borrowed_account); - let account = transaction_context - .get_account_at_index(instruction_account.index_in_transaction) - .unwrap() - .borrow(); - assert_eq!(account.rent_epoch(), account_info.rent_epoch); if instruction_account.is_writable { account_indices.push((instruction_account.index_in_caller, account_info_index)); } @@ -447,6 +436,7 @@ pub struct ProgramTest { prefer_bpf: bool, use_bpf_jit: bool, deactivate_feature_set: HashSet, + transaction_account_lock_limit: Option, } impl Default for ProgramTest { @@ -479,6 +469,7 @@ impl Default for ProgramTest { prefer_bpf, use_bpf_jit: false, deactivate_feature_set: HashSet::default(), + transaction_account_lock_limit: None, } } } @@ -511,6 +502,11 @@ impl ProgramTest { self.compute_max_units = Some(compute_max_units); } + /// Override the default transaction account lock limit + pub fn set_transaction_account_lock_limit(&mut self, transaction_account_lock_limit: usize) { + self.transaction_account_lock_limit = Some(transaction_account_lock_limit); + } + /// Override the BPF compute budget #[allow(deprecated)] #[deprecated(since = "1.8.0", note = "please use `set_compute_max_units` instead")] @@ -790,6 +786,7 @@ impl ProgramTest { compute_unit_limit: max_units, ..ComputeBudget::default() }), + transaction_account_lock_limit: self.transaction_account_lock_limit, ..RuntimeConfig::default() }), ); @@ -1065,14 +1062,14 @@ impl ProgramTestContext { // generate some vote activity for rewards let mut vote_account = bank.get_account(vote_account_address).unwrap(); - let mut vote_state = VoteState::from(&vote_account).unwrap(); + let mut vote_state = vote_state::from(&vote_account).unwrap(); let epoch = bank.epoch(); for _ in 0..number_of_credits { vote_state.increment_credits(epoch, 1); } let versioned = VoteStateVersions::new_current(vote_state); - VoteState::to(&versioned, &mut vote_account).unwrap(); + vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account(vote_account_address, &vote_account); } diff --git a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs index b91a318028..4066484859 100644 --- a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs +++ b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs @@ -3,20 +3,29 @@ use { common::{assert_ix_error, overwrite_slot_hashes_with_slots, setup_test_context}, solana_address_lookup_table_program::{ id, - instruction::create_lookup_table, + instruction::{create_lookup_table, create_lookup_table_signed}, + processor::process_instruction, state::{AddressLookupTable, LOOKUP_TABLE_META_SIZE}, }, solana_program_test::*, solana_sdk::{ - clock::Slot, instruction::InstructionError, pubkey::Pubkey, rent::Rent, signature::Signer, - signer::keypair::Keypair, transaction::Transaction, + clock::Slot, feature_set, instruction::InstructionError, pubkey::Pubkey, rent::Rent, + signature::Signer, signer::keypair::Keypair, transaction::Transaction, }, }; mod common; +pub async fn setup_test_context_without_authority_feature() -> ProgramTestContext { + let mut program_test = ProgramTest::new("", id(), Some(process_instruction)); + program_test.deactivate_feature( + feature_set::relax_authority_signer_check_for_lookup_table_creation::id(), + ); + program_test.start_with_context().await +} + #[tokio::test] -async fn test_create_lookup_table() { +async fn test_create_lookup_table_idempotent() { let mut context = setup_test_context().await; let test_recent_slot = 123; @@ -25,8 +34,7 @@ async fn test_create_lookup_table() { let client = &mut context.banks_client; let payer = &context.payer; let recent_blockhash = context.last_blockhash; - let authority_keypair = Keypair::new(); - let authority_address = authority_keypair.pubkey(); + let authority_address = Pubkey::new_unique(); let (create_lookup_table_ix, lookup_table_address) = create_lookup_table(authority_address, payer.pubkey(), test_recent_slot); @@ -35,7 +43,7 @@ async fn test_create_lookup_table() { let transaction = Transaction::new_signed_with_payer( &[create_lookup_table_ix.clone()], Some(&payer.pubkey()), - &[payer, &authority_keypair], + &[payer], recent_blockhash, ); @@ -45,7 +53,7 @@ async fn test_create_lookup_table() { .await .unwrap() .unwrap(); - assert_eq!(lookup_table_account.owner, crate::id()); + assert_eq!(lookup_table_account.owner, id()); assert_eq!(lookup_table_account.data.len(), LOOKUP_TABLE_META_SIZE); assert_eq!( lookup_table_account.lamports, @@ -59,6 +67,47 @@ async fn test_create_lookup_table() { assert_eq!(lookup_table.addresses.len(), 0); } + // Second create should succeed too + { + let recent_blockhash = client + .get_new_latest_blockhash(&recent_blockhash) + .await + .unwrap(); + let transaction = Transaction::new_signed_with_payer( + &[create_lookup_table_ix], + Some(&payer.pubkey()), + &[payer], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + } +} + +#[tokio::test] +async fn test_create_lookup_table_not_idempotent() { + let mut context = setup_test_context_without_authority_feature().await; + + let test_recent_slot = 123; + overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let authority_keypair = Keypair::new(); + let authority_address = authority_keypair.pubkey(); + let (create_lookup_table_ix, ..) = + create_lookup_table_signed(authority_address, payer.pubkey(), test_recent_slot); + + let transaction = Transaction::new_signed_with_payer( + &[create_lookup_table_ix.clone()], + Some(&payer.pubkey()), + &[payer, &authority_keypair], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + // Second create should fail { context.last_blockhash = client @@ -97,11 +146,11 @@ async fn test_create_lookup_table_use_payer_as_authority() { } #[tokio::test] -async fn test_create_lookup_table_without_signer() { - let mut context = setup_test_context().await; +async fn test_create_lookup_table_missing_signer() { + let mut context = setup_test_context_without_authority_feature().await; let unsigned_authority_address = Pubkey::new_unique(); - let mut ix = create_lookup_table( + let mut ix = create_lookup_table_signed( unsigned_authority_address, context.payer.pubkey(), Slot::MAX, @@ -122,15 +171,14 @@ async fn test_create_lookup_table_without_signer() { async fn test_create_lookup_table_not_recent_slot() { let mut context = setup_test_context().await; let payer = &context.payer; - let authority_keypair = Keypair::new(); - let authority_address = authority_keypair.pubkey(); + let authority_address = Pubkey::new_unique(); let ix = create_lookup_table(authority_address, payer.pubkey(), Slot::MAX).0; assert_ix_error( &mut context, ix, - Some(&authority_keypair), + None, InstructionError::InvalidInstructionData, ) .await; @@ -142,17 +190,10 @@ async fn test_create_lookup_table_pda_mismatch() { let test_recent_slot = 123; overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); let payer = &context.payer; - let authority_keypair = Keypair::new(); - let authority_address = authority_keypair.pubkey(); + let authority_address = Pubkey::new_unique(); let mut ix = create_lookup_table(authority_address, payer.pubkey(), test_recent_slot).0; ix.accounts[0].pubkey = Pubkey::new_unique(); - assert_ix_error( - &mut context, - ix, - Some(&authority_keypair), - InstructionError::InvalidArgument, - ) - .await; + assert_ix_error(&mut context, ix, None, InstructionError::InvalidArgument).await; } diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index bb9534be47..76c459d23d 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -15,7 +15,7 @@ bytemuck = "1.11.0" log = "0.4.17" num-derive = "0.3" num-traits = "0.2" -serde = { version = "1.0.138", features = ["derive"] } +serde = { version = "1.0.143", features = ["derive"] } solana-frozen-abi = { path = "../../frozen-abi", version = "=1.12.0" } solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.12.0" } solana-program = { path = "../../sdk/program", version = "=1.12.0" } diff --git a/programs/address-lookup-table/src/instruction.rs b/programs/address-lookup-table/src/instruction.rs index 80a6ddb7a9..573dbe561a 100644 --- a/programs/address-lookup-table/src/instruction.rs +++ b/programs/address-lookup-table/src/instruction.rs @@ -80,10 +80,43 @@ pub fn derive_lookup_table_address( /// Constructs an instruction to create a table account and returns /// the instruction and the table account's derived address. +/// +/// # Note +/// +/// This instruction requires the authority to be a signer but +/// in v1.12 the address lookup table program will no longer require +/// the authority to sign the transaction. +pub fn create_lookup_table_signed( + authority_address: Pubkey, + payer_address: Pubkey, + recent_slot: Slot, +) -> (Instruction, Pubkey) { + create_lookup_table_common(authority_address, payer_address, recent_slot, true) +} + +/// Constructs an instruction to create a table account and returns +/// the instruction and the table account's derived address. +/// +/// # Note +/// +/// This instruction doesn't require the authority to be a signer but +/// until v1.12 the address lookup table program still requires the +/// authority to sign the transaction. pub fn create_lookup_table( authority_address: Pubkey, payer_address: Pubkey, recent_slot: Slot, +) -> (Instruction, Pubkey) { + create_lookup_table_common(authority_address, payer_address, recent_slot, false) +} + +/// Constructs an instruction to create a table account and returns +/// the instruction and the table account's derived address. +fn create_lookup_table_common( + authority_address: Pubkey, + payer_address: Pubkey, + recent_slot: Slot, + authority_is_signer: bool, ) -> (Instruction, Pubkey) { let (lookup_table_address, bump_seed) = derive_lookup_table_address(&authority_address, recent_slot); @@ -95,7 +128,7 @@ pub fn create_lookup_table( }, vec![ AccountMeta::new(lookup_table_address, false), - AccountMeta::new_readonly(authority_address, true), + AccountMeta::new_readonly(authority_address, authority_is_signer), AccountMeta::new(payer_address, true), AccountMeta::new_readonly(system_program::id(), false), ], diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 39d36b988c..faf2dc0554 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -9,6 +9,7 @@ use { solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, solana_sdk::{ clock::Slot, + feature_set, instruction::InstructionError, program_utils::limited_deserialize, pubkey::{Pubkey, PUBKEY_BYTES}, @@ -58,7 +59,12 @@ impl Processor { instruction_context.try_borrow_instruction_account(transaction_context, 0)?; let lookup_table_lamports = lookup_table_account.get_lamports(); let table_key = *lookup_table_account.get_key(); - if !lookup_table_account.get_data().is_empty() { + let lookup_table_owner = *lookup_table_account.get_owner(); + if !invoke_context + .feature_set + .is_active(&feature_set::relax_authority_signer_check_for_lookup_table_creation::id()) + && !lookup_table_account.get_data().is_empty() + { ic_msg!(invoke_context, "Table account must not be allocated"); return Err(InstructionError::AccountAlreadyInitialized); } @@ -67,7 +73,11 @@ impl Processor { let authority_account = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; let authority_key = *authority_account.get_key(); - if !authority_account.is_signer() { + if !invoke_context + .feature_set + .is_active(&feature_set::relax_authority_signer_check_for_lookup_table_creation::id()) + && !authority_account.is_signer() + { ic_msg!(invoke_context, "Authority account must be a signer"); return Err(InstructionError::MissingRequiredSignature); } @@ -116,6 +126,14 @@ impl Processor { return Err(InstructionError::InvalidArgument); } + if invoke_context + .feature_set + .is_active(&feature_set::relax_authority_signer_check_for_lookup_table_creation::id()) + && crate::check_id(&lookup_table_owner) + { + return Ok(()); + } + let table_account_data_len = LOOKUP_TABLE_META_SIZE; let rent = invoke_context.get_sysvar_cache().get_rent()?; let required_lamports = rent diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 3f7ca6f672..e533007115 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -494,9 +494,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" dependencies = [ "bitflags", "cexpr", @@ -971,9 +971,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1079fb8528d9f9c888b1e8aa651e6e079ade467323d58f75faf1d30b1808f540" +checksum = "dc948ebb96241bb40ab73effeb80d9f93afaad49359d159a5e61be51619fe813" dependencies = [ "libc", ] @@ -2326,9 +2326,9 @@ checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" [[package]] name = "librocksdb-sys" -version = "0.6.1+6.28.2" +version = "0.8.0+7.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" +checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" dependencies = [ "bindgen", "bzip2-sys", @@ -2657,14 +2657,16 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" dependencies = [ + "autocfg", "bitflags", "cfg-if 1.0.0", "libc", "memoffset", + "pin-utils", ] [[package]] @@ -2934,9 +2936,9 @@ checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" [[package]] name = "ouroboros" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7425ea87a1e31df63a27b6d31e21a35a9003268032a876465e8d43c2364b0de2" +checksum = "55190d158a4c09a30bdb5e3b2c50a37f299b8dd9f59d0e1510782732e8bf8877" dependencies = [ "aliasable", "ouroboros_macro", @@ -2944,9 +2946,9 @@ dependencies = [ [[package]] name = "ouroboros_macro" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "734aa7a4a6390b162112523cac2923a18e4f23b917880a68c826bf6e8bf48f06" +checksum = "816c4556bb87c05aad7710d02e88ed50a93f837d73dfe417ec5e890a9e1bbec7" dependencies = [ "Inflector", "proc-macro-error", @@ -3476,9 +3478,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21afdc492bf2a8688cb386be6605d1163b6ace89afa5e3b529037d2b4334b860" +checksum = "5b435e71d9bfa0d8889927231970c51fb89c58fa63bffcab117c9c7a41e5ef8f" dependencies = [ "bytes", "futures-channel", @@ -3782,9 +3784,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" +checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" dependencies = [ "libc", "librocksdb-sys", @@ -4038,9 +4040,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.83" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38dd04e3c8279e75b31ef29dbdceebfe5ad89f4d0937213c53f7d49d01b3d5a7" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa", "ryu", @@ -4849,6 +4851,7 @@ dependencies = [ "log", "quinn", "quinn-proto", + "quinn-udp", "rand 0.7.3", "rand_chacha 0.2.2", "rayon", @@ -5865,6 +5868,8 @@ dependencies = [ "percentage", "pkcs8", "quinn", + "quinn-proto", + "quinn-udp", "rand 0.7.3", "rcgen", "rustls 0.20.6", @@ -5961,8 +5966,10 @@ dependencies = [ "log", "num_cpus", "rand 0.7.3", + "rayon", "serde", "serde_json", + "serde_yaml", "signal-hook", "solana-clap-utils", "solana-cli-config", @@ -6020,6 +6027,7 @@ dependencies = [ "solana-frozen-abi 1.12.0", "solana-frozen-abi-macro 1.12.0", "solana-metrics", + "solana-program 1.12.0", "solana-program-runtime", "solana-sdk 1.12.0", "thiserror", @@ -6098,9 +6106,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80a28c5dfe7e8af38daa39d6561c8e8b9ed7a2f900951ebe7362ad6348d36c73" +checksum = "fe055100805e9069715acf73529ec563ad987a4d042da9defe9b7554560f2df4" dependencies = [ "byteorder 1.4.3", "combine", @@ -6178,9 +6186,9 @@ dependencies = [ [[package]] name = "spl-token-2022" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" +checksum = "e4c0ebca4740cc4c892aa31e07d0b4dc1a24cac4748376d4b34f8eb0fee9ff46" dependencies = [ "arrayref", "bytemuck", diff --git a/programs/bpf/Cargo.toml b/programs/bpf/Cargo.toml index 03669280ae..fd0410271e 100644 --- a/programs/bpf/Cargo.toml +++ b/programs/bpf/Cargo.toml @@ -38,7 +38,7 @@ solana-program-runtime = { path = "../../program-runtime", version = "=1.12.0" } solana-runtime = { path = "../../runtime", version = "=1.12.0" } solana-sdk = { path = "../../sdk", version = "=1.12.0" } solana-transaction-status = { path = "../../transaction-status", version = "=1.12.0" } -solana_rbpf = "=0.2.31" +solana_rbpf = "=0.2.32" [dev-dependencies] solana-ledger = { path = "../../ledger", version = "=1.12.0" } diff --git a/programs/bpf/c/src/invoke/invoke.c b/programs/bpf/c/src/invoke/invoke.c index cfc8075cef..f37ff8a1d2 100644 --- a/programs/bpf/c/src/invoke/invoke.c +++ b/programs/bpf/c/src/invoke/invoke.c @@ -25,13 +25,12 @@ static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER = 12; static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE = 13; static const uint8_t TEST_WRITABLE_DEESCALATION_WRITABLE = 14; static const uint8_t TEST_NESTED_INVOKE_TOO_DEEP = 15; -static const uint8_t TEST_EXECUTABLE_LAMPORTS = 16; -static const uint8_t TEST_CALL_PRECOMPILE = 17; -static const uint8_t ADD_LAMPORTS = 18; -static const uint8_t TEST_RETURN_DATA_TOO_LARGE = 19; -static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER = 20; -static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE = 21; -static const uint8_t TEST_MAX_ACCOUNT_INFOS_EXCEEDED = 22; +static const uint8_t TEST_CALL_PRECOMPILE = 16; +static const uint8_t ADD_LAMPORTS = 17; +static const uint8_t TEST_RETURN_DATA_TOO_LARGE = 18; +static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER = 19; +static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE = 20; +static const uint8_t TEST_MAX_ACCOUNT_INFOS_EXCEEDED = 21; static const int MINT_INDEX = 0; static const int ARGUMENT_INDEX = 1; @@ -320,6 +319,21 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_assert(accounts[ARGUMENT_INDEX].data[i] == 0); } } + + sol_log("Test that is_executable and rent_epoch are ignored"); + { + accounts[INVOKED_ARGUMENT_INDEX].executable = true; + accounts[INVOKED_ARGUMENT_INDEX].rent_epoch += 1; + SolAccountMeta arguments[] = { + {accounts[INVOKED_ARGUMENT_INDEX].key, true, false}}; + uint8_t data[] = {RETURN_OK}; + const SolInstruction instruction = {accounts[INVOKED_PROGRAM_INDEX].key, + arguments, SOL_ARRAY_SIZE(arguments), + data, SOL_ARRAY_SIZE(data)}; + + sol_assert(SUCCESS == + sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts))); + } break; } case TEST_PRIVILEGE_ESCALATION_SIGNER: { @@ -593,25 +607,6 @@ extern uint64_t entrypoint(const uint8_t *input) { do_nested_invokes(5, accounts, params.ka_num); break; } - case TEST_EXECUTABLE_LAMPORTS: { - sol_log("Test executable lamports"); - accounts[ARGUMENT_INDEX].executable = true; - *accounts[ARGUMENT_INDEX].lamports -= 1; - *accounts[DERIVED_KEY1_INDEX].lamports +=1; - SolAccountMeta arguments[] = { - {accounts[ARGUMENT_INDEX].key, true, false}, - {accounts[DERIVED_KEY1_INDEX].key, true, false}, - }; - uint8_t data[] = {ADD_LAMPORTS, 0, 0, 0}; - SolPubkey program_id; - sol_memcpy(&program_id, params.program_id, sizeof(SolPubkey)); - const SolInstruction instruction = {&program_id, - arguments, SOL_ARRAY_SIZE(arguments), - data, SOL_ARRAY_SIZE(data)}; - sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); - *accounts[ARGUMENT_INDEX].lamports += 1; - break; - } case TEST_CALL_PRECOMPILE: { sol_log("Test calling precompile from cpi"); SolAccountMeta arguments[] = {}; diff --git a/programs/bpf/rust/invoke/src/instructions.rs b/programs/bpf/rust/invoke/src/instructions.rs index 08a1fa3216..464ec39281 100644 --- a/programs/bpf/rust/invoke/src/instructions.rs +++ b/programs/bpf/rust/invoke/src/instructions.rs @@ -15,13 +15,12 @@ pub const TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER: u8 = 12; pub const TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE: u8 = 13; pub const TEST_WRITABLE_DEESCALATION_WRITABLE: u8 = 14; pub const TEST_NESTED_INVOKE_TOO_DEEP: u8 = 15; -pub const TEST_EXECUTABLE_LAMPORTS: u8 = 16; -pub const TEST_CALL_PRECOMPILE: u8 = 17; -pub const ADD_LAMPORTS: u8 = 18; -pub const TEST_RETURN_DATA_TOO_LARGE: u8 = 19; -pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER: u8 = 20; -pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE: u8 = 21; -pub const TEST_MAX_ACCOUNT_INFOS_EXCEEDED: u8 = 22; +pub const TEST_CALL_PRECOMPILE: u8 = 16; +pub const ADD_LAMPORTS: u8 = 17; +pub const TEST_RETURN_DATA_TOO_LARGE: u8 = 18; +pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER: u8 = 19; +pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE: u8 = 20; +pub const TEST_MAX_ACCOUNT_INFOS_EXCEEDED: u8 = 21; pub const MINT_INDEX: usize = 0; pub const ARGUMENT_INDEX: usize = 1; diff --git a/programs/bpf/rust/invoke/src/processor.rs b/programs/bpf/rust/invoke/src/processor.rs index c3c92cf2f1..c519b7353b 100644 --- a/programs/bpf/rust/invoke/src/processor.rs +++ b/programs/bpf/rust/invoke/src/processor.rs @@ -639,38 +639,6 @@ fn process_instruction( TEST_NESTED_INVOKE_TOO_DEEP => { let _ = do_nested_invokes(5, accounts); } - TEST_EXECUTABLE_LAMPORTS => { - msg!("Test executable lamports"); - let mut accounts = accounts.to_vec(); - - // set account to executable and subtract lamports - accounts[ARGUMENT_INDEX].executable = true; - { - let mut lamports = (*accounts[ARGUMENT_INDEX].lamports).borrow_mut(); - **lamports = (*lamports).saturating_sub(1); - } - // add lamports to dest account - { - let mut lamports = (*accounts[DERIVED_KEY1_INDEX].lamports).borrow_mut(); - **lamports = (*lamports).saturating_add(1); - } - - let instruction = create_instruction( - *program_id, - &[ - (accounts[ARGUMENT_INDEX].key, true, false), - (accounts[DERIVED_KEY1_INDEX].key, true, false), - ], - vec![ADD_LAMPORTS, 0, 0, 0], - ); - let _ = invoke(&instruction, &accounts); - - // reset executable account - { - let mut lamports = (*accounts[ARGUMENT_INDEX].lamports).borrow_mut(); - **lamports = (*lamports).saturating_add(1); - } - } TEST_CALL_PRECOMPILE => { msg!("Test calling precompiled program from cpi"); let instruction = diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index 23fa296d29..a509f3334d 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -1125,6 +1125,7 @@ fn test_program_bpf_invoke_sanity() { invoked_program_id.clone(), invoked_program_id.clone(), invoked_program_id.clone(), + invoked_program_id.clone(), ], Languages::Rust => vec![ system_program::id(), @@ -1333,13 +1334,6 @@ fn test_program_bpf_invoke_sanity() { None, ); - do_invoke_failure_test_local( - TEST_EXECUTABLE_LAMPORTS, - TransactionError::InstructionError(0, InstructionError::ExecutableLamportChange), - &[invoke_program_id.clone()], - None, - ); - do_invoke_failure_test_local( TEST_CALL_PRECOMPILE, TransactionError::InstructionError(0, InstructionError::ProgramFailedToComplete), @@ -3804,7 +3798,6 @@ fn test_program_fees() { congestion_multiplier, &fee_structure, true, - true, ); bank_client .send_and_confirm_message(&[&mint_keypair], message) @@ -3826,7 +3819,6 @@ fn test_program_fees() { congestion_multiplier, &fee_structure, true, - true, ); assert!(expected_normal_fee < expected_prioritized_fee); diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 49167df6d4..8366265ef6 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -19,7 +19,7 @@ solana-metrics = { path = "../../metrics", version = "=1.12.0" } solana-program-runtime = { path = "../../program-runtime", version = "=1.12.0" } solana-sdk = { path = "../../sdk", version = "=1.12.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=1.12.0" } -solana_rbpf = "=0.2.31" +solana_rbpf = "=0.2.32" thiserror = "1.0" [dev-dependencies] diff --git a/programs/bpf_loader/src/allocator_bump.rs b/programs/bpf_loader/src/allocator_bump.rs index 7100723f97..00819a9ae0 100644 --- a/programs/bpf_loader/src/allocator_bump.rs +++ b/programs/bpf_loader/src/allocator_bump.rs @@ -2,21 +2,21 @@ use { solana_program_runtime::invoke_context::{Alloc, AllocErr}, - solana_rbpf::aligned_memory::AlignedMemory, + solana_rbpf::{aligned_memory::AlignedMemory, ebpf::HOST_ALIGN}, std::alloc::Layout, }; #[derive(Debug)] pub struct BpfAllocator { #[allow(dead_code)] - heap: AlignedMemory, + heap: AlignedMemory, start: u64, len: u64, pos: u64, } impl BpfAllocator { - pub fn new(heap: AlignedMemory, virtual_address: u64) -> Self { + pub fn new(heap: AlignedMemory, virtual_address: u64) -> Self { let len = heap.len() as u64; Self { heap, diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 741f5ea553..ee629bc915 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -42,7 +42,6 @@ use { entrypoint::{HEAP_LENGTH, SUCCESS}, feature_set::{ cap_accounts_data_len, cap_bpf_program_instruction_accounts, - disable_bpf_deprecated_load_instructions, disable_bpf_unresolved_symbols_at_runtime, disable_deploy_of_alloc_free_syscall, disable_deprecated_loader, enable_bpf_loader_extend_program_data_ix, error_on_syscall_bpf_function_hash_collisions, reject_callx_r10, @@ -167,17 +166,10 @@ pub fn create_executor( enable_instruction_meter: true, enable_instruction_tracing: log_enabled!(Trace), enable_symbol_and_section_labels: false, - disable_unresolved_symbols_at_runtime: invoke_context - .feature_set - .is_active(&disable_bpf_unresolved_symbols_at_runtime::id()), reject_broken_elfs: reject_deployment_of_broken_elfs, noop_instruction_rate: 256, sanitize_user_provided_values: true, encrypt_environment_registers: true, - disable_deprecated_load_instructions: reject_deployment_of_broken_elfs - && invoke_context - .feature_set - .is_active(&disable_bpf_deprecated_load_instructions::id()), syscall_bpf_function_hash_collision: invoke_context .feature_set .is_active(&error_on_syscall_bpf_function_hash_collisions::id()), @@ -189,6 +181,8 @@ pub fn create_executor( optimize_rodata: false, static_syscalls: false, enable_elf_vaddr: false, + reject_rodata_stack_overlap: false, + new_elf_parser: false, // Warning, do not use `Config::default()` so that configuration here is explicit. }; let mut create_executor_metrics = executor_metrics::CreateMetrics::default(); @@ -303,7 +297,7 @@ pub fn create_vm<'a, 'b>( .saturating_mul(compute_budget.heap_cost), ); let mut heap = - AlignedMemory::new_with_size(compute_budget.heap_size.unwrap_or(HEAP_LENGTH), HOST_ALIGN); + AlignedMemory::::zero_filled(compute_budget.heap_size.unwrap_or(HEAP_LENGTH)); let parameter_region = MemoryRegion::new_writable(parameter_bytes, MM_INPUT_START); let mut vm = EbpfVm::new(program, heap.as_slice_mut(), vec![parameter_region])?; syscalls::bind_syscall_context_objects(&mut vm, invoke_context, heap, orig_account_lengths)?; diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 0508746b2a..cb02226953 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -22,7 +22,7 @@ pub fn serialize_parameters( transaction_context: &TransactionContext, instruction_context: &InstructionContext, should_cap_ix_accounts: bool, -) -> Result<(AlignedMemory, Vec), InstructionError> { +) -> Result<(AlignedMemory, Vec), InstructionError> { let num_ix_accounts = instruction_context.get_number_of_instruction_accounts(); if should_cap_ix_accounts && num_ix_accounts > usize::from(MAX_INSTRUCTION_ACCOUNTS) { return Err(InstructionError::MaxAccountsExceeded); @@ -80,7 +80,7 @@ pub fn deserialize_parameters( pub fn serialize_parameters_unaligned( transaction_context: &TransactionContext, instruction_context: &InstructionContext, -) -> Result { +) -> Result, InstructionError> { // Calculate size in order to alloc once let mut size = size_of::(); for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { @@ -106,7 +106,7 @@ pub fn serialize_parameters_unaligned( size += size_of::() // instruction data len + instruction_context.get_instruction_data().len() // instruction data + size_of::(); // program id - let mut v = AlignedMemory::new(size, HOST_ALIGN); + let mut v = AlignedMemory::::with_capacity(size); v.write_u64::(instruction_context.get_number_of_instruction_accounts() as u64) .map_err(|_| InstructionError::InvalidArgument)?; @@ -208,7 +208,7 @@ pub fn deserialize_parameters_unaligned( pub fn serialize_parameters_aligned( transaction_context: &TransactionContext, instruction_context: &InstructionContext, -) -> Result { +) -> Result, InstructionError> { // Calculate size in order to alloc once let mut size = size_of::(); for instruction_account_index in 0..instruction_context.get_number_of_instruction_accounts() { @@ -239,7 +239,7 @@ pub fn serialize_parameters_aligned( size += size_of::() // data len + instruction_context.get_instruction_data().len() + size_of::(); // program id; - let mut v = AlignedMemory::new(size, HOST_ALIGN); + let mut v = AlignedMemory::::with_capacity(size); // Serialize into the buffer v.write_u64::(instruction_context.get_number_of_instruction_accounts() as u64) @@ -275,7 +275,7 @@ pub fn serialize_parameters_aligned( .map_err(|_| InstructionError::InvalidArgument)?; v.write_all(borrowed_account.get_data()) .map_err(|_| InstructionError::InvalidArgument)?; - v.resize( + v.fill_write( MAX_PERMITTED_DATA_INCREASE + (v.write_index() as *const u8).align_offset(BPF_ALIGN_OF_U128), 0, @@ -497,21 +497,20 @@ mod tests { &program_indices, ) .instruction_accounts; + let instruction_data = vec![]; - let transaction_context = + let mut transaction_context = TransactionContext::new(transaction_accounts, Some(Rent::default()), 1, 1); - let instruction_data = vec![]; - let instruction_context = InstructionContext::new( - 0, - 0, - &program_indices, - &instruction_accounts, - &instruction_data, - ); + transaction_context + .push(&program_indices, &instruction_accounts, &instruction_data) + .unwrap(); + let instruction_context = transaction_context + .get_instruction_context_at_index_in_trace(0) + .unwrap(); let serialization_result = serialize_parameters( &transaction_context, - &instruction_context, + instruction_context, should_cap_ix_accounts, ); assert_eq!( diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index f569812565..215f7267fb 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -619,6 +619,9 @@ where .get_current_instruction_context() .map_err(SyscallError::InstructionError)?; let mut accounts = Vec::with_capacity(instruction_accounts.len().saturating_add(1)); + let is_disable_cpi_setting_executable_and_rent_epoch_active = invoke_context + .feature_set + .is_active(&disable_cpi_setting_executable_and_rent_epoch::id()); let program_account_index = program_indices .last() @@ -680,7 +683,9 @@ where } _ => {} } - if callee_account.is_executable() != caller_account.executable { + if !is_disable_cpi_setting_executable_and_rent_epoch_active + && callee_account.is_executable() != caller_account.executable + { callee_account .set_executable(caller_account.executable) .map_err(SyscallError::InstructionError)?; @@ -696,7 +701,9 @@ where .transaction_context .get_account_at_index(instruction_account.index_in_transaction) .map_err(SyscallError::InstructionError)?; - if callee_account.borrow().rent_epoch() != caller_account.rent_epoch { + if !is_disable_cpi_setting_executable_and_rent_epoch_active + && callee_account.borrow().rent_epoch() != caller_account.rent_epoch + { if invoke_context .feature_set .is_active(&enable_early_verification_of_account_modifications::id()) @@ -827,7 +834,6 @@ fn check_authorized_program( instruction_data: &[u8], invoke_context: &InvokeContext, ) -> Result<(), EbpfError> { - #[allow(clippy::blocks_in_if_conditions)] if native_loader::check_id(program_id) || bpf_loader::check_id(program_id) || bpf_loader_deprecated::check_id(program_id) @@ -835,12 +841,9 @@ fn check_authorized_program( && !(bpf_loader_upgradeable::is_upgrade_instruction(instruction_data) || bpf_loader_upgradeable::is_set_authority_instruction(instruction_data) || bpf_loader_upgradeable::is_close_instruction(instruction_data))) - || (invoke_context - .feature_set - .is_active(&prevent_calling_precompiles_as_programs::id()) - && is_precompile(program_id, |feature_id: &Pubkey| { - invoke_context.feature_set.is_active(feature_id) - })) + || is_precompile(program_id, |feature_id: &Pubkey| { + invoke_context.feature_set.is_active(feature_id) + }) { return Err(SyscallError::ProgramNotSupported(*program_id).into()); } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 651b714396..2893849a9f 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -20,7 +20,7 @@ use { }, solana_rbpf::{ aligned_memory::AlignedMemory, - ebpf, + ebpf::{self, HOST_ALIGN}, error::EbpfError, memory_region::{AccessType, MemoryMapping}, question_mark, @@ -34,10 +34,9 @@ use { entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ self, blake3_syscall_enabled, check_physical_overlapping, check_slice_translation_size, - curve25519_syscall_enabled, disable_fees_sysvar, - enable_early_verification_of_account_modifications, libsecp256k1_0_5_upgrade_enabled, - limit_secp256k1_recovery_id, prevent_calling_precompiles_as_programs, - syscall_saturated_math, + curve25519_syscall_enabled, disable_cpi_setting_executable_and_rent_epoch, + disable_fees_sysvar, enable_early_verification_of_account_modifications, + libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, syscall_saturated_math, }, hash::{Hasher, HASH_BYTES}, instruction::{ @@ -362,7 +361,7 @@ pub fn register_syscalls( pub fn bind_syscall_context_objects<'a, 'b>( vm: &mut EbpfVm<'a, RequisiteVerifier, BpfError, crate::ThisInstructionMeter>, invoke_context: &'a mut InvokeContext<'b>, - heap: AlignedMemory, + heap: AlignedMemory, orig_account_lengths: Vec, ) -> Result<(), EbpfError> { let check_aligned = bpf_loader_deprecated::id() @@ -1710,34 +1709,37 @@ declare_syscall!( result ); + // Reverse iterate through the instruction trace, + // ignoring anything except instructions on the same level let stack_height = invoke_context.get_stack_height(); - let instruction_trace = invoke_context.transaction_context.get_instruction_trace(); - let instruction_context = if stack_height == TRANSACTION_LEVEL_STACK_HEIGHT { - // pick one of the top-level instructions - instruction_trace - .len() - .checked_sub(2) - .and_then(|result| result.checked_sub(index as usize)) - .and_then(|index| instruction_trace.get(index)) - .and_then(|instruction_list| instruction_list.first()) - } else { - // Walk the last list of inner instructions - instruction_trace.last().and_then(|inners| { - let mut current_index = 0; - inners.iter().rev().skip(1).find(|instruction_context| { - if stack_height == instruction_context.get_stack_height() { - if index == current_index { - return true; - } else { - current_index = current_index.saturating_add(1); - } - } - false - }) - }) - }; + let instruction_trace_length = invoke_context + .transaction_context + .get_instruction_trace_length(); + let mut reverse_index_at_stack_height = 0; + let mut found_instruction_context = None; + for index_in_trace in (0..instruction_trace_length).rev() { + let instruction_context = question_mark!( + invoke_context + .transaction_context + .get_instruction_context_at_index_in_trace(index_in_trace) + .map_err(SyscallError::InstructionError), + result + ); + if instruction_context.get_stack_height() == TRANSACTION_LEVEL_STACK_HEIGHT + && stack_height > TRANSACTION_LEVEL_STACK_HEIGHT + { + break; + } + if instruction_context.get_stack_height() == stack_height { + if index.saturating_add(1) == reverse_index_at_stack_height { + found_instruction_context = Some(instruction_context); + break; + } + reverse_index_at_stack_height = reverse_index_at_stack_height.saturating_add(1); + } + } - if let Some(instruction_context) = instruction_context { + if let Some(instruction_context) = found_instruction_context { let ProcessedSiblingInstruction { data_len, accounts_len, @@ -2488,7 +2490,7 @@ mod tests { program_id, bpf_loader::id(), ); - let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN); + let mut heap = AlignedMemory::::zero_filled(100); let mut memory_mapping = MemoryMapping::new::( vec![ MemoryRegion::default(), @@ -2530,7 +2532,7 @@ mod tests { program_id, bpf_loader::id(), ); - let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN); + let mut heap = AlignedMemory::::zero_filled(100); let mut memory_mapping = MemoryMapping::new::( vec![ MemoryRegion::default(), @@ -2571,7 +2573,7 @@ mod tests { program_id, bpf_loader::id(), ); - let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN); + let mut heap = AlignedMemory::::zero_filled(100); let mut memory_mapping = MemoryMapping::new::( vec![ MemoryRegion::default(), @@ -2613,7 +2615,7 @@ mod tests { program_id, bpf_loader::id(), ); - let mut heap = AlignedMemory::new_with_size(100, HOST_ALIGN); + let mut heap = AlignedMemory::::zero_filled(100); let config = Config::default(); let mut memory_mapping = MemoryMapping::new::( vec![ diff --git a/programs/config/Cargo.toml b/programs/config/Cargo.toml index 1ad731836e..09a00af042 100644 --- a/programs/config/Cargo.toml +++ b/programs/config/Cargo.toml @@ -11,8 +11,8 @@ edition = "2021" [dependencies] bincode = "1.3.3" -chrono = { version = "0.4.11", features = ["serde"] } -serde = "1.0.138" +chrono = { version = "0.4.21", features = ["serde"] } +serde = "1.0.143" serde_derive = "1.0.103" solana-program-runtime = { path = "../../program-runtime", version = "=1.12.0" } solana-sdk = { path = "../../sdk", version = "=1.12.0" } diff --git a/programs/ed25519-tests/tests/process_transaction.rs b/programs/ed25519-tests/tests/process_transaction.rs index 0ef08e42fd..ac786a5e70 100644 --- a/programs/ed25519-tests/tests/process_transaction.rs +++ b/programs/ed25519-tests/tests/process_transaction.rs @@ -4,7 +4,6 @@ use { solana_program_test::*, solana_sdk::{ ed25519_instruction::new_ed25519_instruction, - feature_set, signature::Signer, transaction::{Transaction, TransactionError}, }, @@ -60,27 +59,3 @@ async fn test_failure() { )) ); } - -#[tokio::test] -async fn test_success_call_builtin_program() { - let mut program_test = ProgramTest::default(); - program_test.deactivate_feature(feature_set::prevent_calling_precompiles_as_programs::id()); - let mut context = program_test.start_with_context().await; - - let client = &mut context.banks_client; - let payer = &context.payer; - let recent_blockhash = context.last_blockhash; - - let privkey = ed25519_dalek::Keypair::generate(&mut thread_rng()); - let message_arr = b"hello"; - let instruction = new_ed25519_instruction(&privkey, message_arr); - - let transaction = Transaction::new_signed_with_payer( - &[instruction], - Some(&payer.pubkey()), - &[payer], - recent_blockhash, - ); - - assert_matches!(client.process_transaction(transaction).await, Ok(())); -} diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index 4ec998618f..50f28c1155 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" log = "0.4.17" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" solana-config-program = { path = "../config", version = "=1.12.0" } solana-frozen-abi = { path = "../../frozen-abi", version = "=1.12.0" } diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 84cd9a4f96..cb47d19e6b 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -2267,7 +2267,7 @@ mod tests { fn test_stake_delegate(feature_set: FeatureSet) { let mut vote_state = VoteState::default(); for i in 0..1000 { - vote_state.process_slot_vote_unchecked(i); + vote_state::process_slot_vote_unchecked(&mut vote_state, i); } let vote_state_credits = vote_state.credits(); let vote_address = solana_sdk::pubkey::new_rand(); diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index a3d55e3d10..00e45fd48b 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -30,7 +30,7 @@ use { stake_history::{StakeHistory, StakeHistoryEntry}, transaction_context::{BorrowedAccount, InstructionContext, TransactionContext}, }, - solana_vote_program::vote_state::{VoteState, VoteStateVersions}, + solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{collections::HashSet, convert::TryFrom}, }; @@ -1750,7 +1750,7 @@ fn do_create_account( ) -> AccountSharedData { let mut stake_account = AccountSharedData::new(lamports, StakeState::size_of(), &id()); - let vote_state = VoteState::from(vote_account).expect("vote_state"); + let vote_state = vote_state::from(vote_account).expect("vote_state"); let rent_exempt_reserve = rent.minimum_balance(stake_account.data().len()); diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index 8981ac8b7d..87ea0578a1 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -14,11 +14,12 @@ bincode = "1.3.3" log = "0.4.17" num-derive = "0.3" num-traits = "0.2" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" solana-frozen-abi = { path = "../../frozen-abi", version = "=1.12.0" } solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.12.0" } solana-metrics = { path = "../../metrics", version = "=1.12.0" } +solana-program = { path = "../../sdk/program", version = "=1.12.0" } solana-program-runtime = { path = "../../program-runtime", version = "=1.12.0" } solana-sdk = { path = "../../sdk", version = "=1.12.0" } thiserror = "1.0" diff --git a/programs/vote/src/lib.rs b/programs/vote/src/lib.rs index 1b55f96b42..d6f4bcb6a0 100644 --- a/programs/vote/src/lib.rs +++ b/programs/vote/src/lib.rs @@ -1,9 +1,6 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![allow(clippy::integer_arithmetic)] -pub mod authorized_voters; -pub mod vote_error; -pub mod vote_instruction; pub mod vote_processor; pub mod vote_state; pub mod vote_transaction; @@ -14,4 +11,7 @@ extern crate solana_metrics; #[macro_use] extern crate solana_frozen_abi_macro; -pub use solana_sdk::vote::program::{check_id, id}; +pub use solana_sdk::vote::{ + authorized_voters, error as vote_error, instruction as vote_instruction, + program::{check_id, id}, +}; diff --git a/programs/vote/src/vote_processor.rs b/programs/vote/src/vote_processor.rs index dae06c4d04..856f8f4467 100644 --- a/programs/vote/src/vote_processor.rs +++ b/programs/vote/src/vote_processor.rs @@ -1,12 +1,13 @@ //! Vote program processor use { - crate::{ - id, - vote_instruction::VoteInstruction, - vote_state::{self, VoteAuthorize, VoteStateUpdate}, - }, + crate::vote_state, log::*, + solana_program::vote::{ + instruction::VoteInstruction, + program::id, + state::{VoteAuthorize, VoteStateUpdate}, + }, solana_program_runtime::{ invoke_context::InvokeContext, sysvar_cache::get_sysvar_with_account_check, }, @@ -143,7 +144,7 @@ pub fn process_instruction( get_sysvar_with_account_check::slot_hashes(invoke_context, instruction_context, 1)?; let clock = get_sysvar_with_account_check::clock(invoke_context, instruction_context, 2)?; - vote_state::process_vote( + vote_state::process_vote_with_account( &mut me, &slot_hashes, &clock, @@ -264,7 +265,7 @@ mod tests { vote_switch, withdraw, VoteInstruction, }, vote_state::{ - Lockout, Vote, VoteAuthorize, VoteAuthorizeCheckedWithSeedArgs, + self, Lockout, Vote, VoteAuthorize, VoteAuthorizeCheckedWithSeedArgs, VoteAuthorizeWithSeedArgs, VoteInit, VoteState, VoteStateUpdate, VoteStateVersions, }, }, @@ -462,7 +463,7 @@ mod tests { let (vote_pubkey, vote_account) = create_test_account(); let vote_account_space = vote_account.data().len(); - let mut vote_state = VoteState::from(&vote_account).unwrap(); + let mut vote_state = vote_state::from(&vote_account).unwrap(); vote_state.authorized_withdrawer = vote_pubkey; vote_state.epoch_credits = Vec::new(); @@ -482,7 +483,7 @@ mod tests { let mut vote_account_with_epoch_credits = AccountSharedData::new(lamports, vote_account_space, &id()); let versioned = VoteStateVersions::new_current(vote_state); - VoteState::to(&versioned, &mut vote_account_with_epoch_credits); + vote_state::to(&versioned, &mut vote_account_with_epoch_credits); (vote_pubkey, vote_account_with_epoch_credits) } diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 48fcedbe84..5dffa9f918 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -1,13 +1,11 @@ //! Vote state, vote program //! Receive and processes votes from validators -#[cfg(test)] -use solana_sdk::epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET; +pub use solana_program::vote::state::{vote_state_versions::*, *}; use { - crate::{authorized_voters::AuthorizedVoters, id, vote_error::VoteError}, - bincode::{deserialize, serialize_into, ErrorKind}, log::*, serde_derive::{Deserialize, Serialize}, solana_metrics::datapoint_debug, + solana_program::vote::{error::VoteError, program::id}, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::{Epoch, Slot, UnixTimestamp}, @@ -16,7 +14,6 @@ use { instruction::InstructionError, pubkey::Pubkey, rent::Rent, - short_vec, slot_hashes::SlotHash, sysvar::clock::Clock, transaction_context::{BorrowedAccount, InstructionContext, TransactionContext}, @@ -28,21 +25,7 @@ use { }, }; -mod vote_state_0_23_5; -pub mod vote_state_versions; -pub use vote_state_versions::*; - -// Maximum number of votes to keep around, tightly coupled with epoch_schedule::MINIMUM_SLOTS_PER_EPOCH -pub const MAX_LOCKOUT_HISTORY: usize = 31; -pub const INITIAL_LOCKOUT: usize = 2; - -// Maximum number of credits history to keep around -pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64; - -// Offset of VoteState::prior_voters, for determining initialization status without deserialization -const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 82; - -#[frozen_abi(digest = "EYPXjH9Zn2vLzxyjHejkRkoTh4Tg4sirvb4FX9ye25qF")] +#[frozen_abi(digest = "8Xa47j7LCp99Q7CQeTz4KPWU8sZgGFpAJw2K4VbPgGh8")] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, AbiEnumVisitor, AbiExample)] pub enum VoteTransaction { Vote(Vote), @@ -160,1286 +143,550 @@ impl From for VoteTransaction { } } -#[frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH")] -#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] -pub struct Vote { - /// A stack of votes starting with the oldest vote - pub slots: Vec, - /// signature of the bank's state at the last slot - pub hash: Hash, - /// processing timestamp of last slot - pub timestamp: Option, -} - -impl Vote { - pub fn new(slots: Vec, hash: Hash) -> Self { - Self { - slots, - hash, - timestamp: None, - } - } +// utility function, used by Stakes, tests +pub fn from(account: &T) -> Option { + VoteState::deserialize(account.data()).ok() } -#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Copy, Clone, AbiExample)] -pub struct Lockout { - pub slot: Slot, - pub confirmation_count: u32, +// utility function, used by Stakes, tests +pub fn to(versioned: &VoteStateVersions, account: &mut T) -> Option<()> { + VoteState::serialize(versioned, account.data_as_mut_slice()).ok() } -impl Lockout { - pub fn new(slot: Slot) -> Self { - Self { - slot, - confirmation_count: 1, - } - } - - // The number of slots for which this vote is locked - pub fn lockout(&self) -> u64 { - (INITIAL_LOCKOUT as u64).pow(self.confirmation_count) +fn check_update_vote_state_slots_are_valid( + vote_state: &VoteState, + vote_state_update: &mut VoteStateUpdate, + slot_hashes: &[(Slot, Hash)], +) -> Result<(), VoteError> { + if vote_state_update.lockouts.is_empty() { + return Err(VoteError::EmptySlots); } - // The last slot at which a vote is still locked out. Validators should not - // vote on a slot in another fork which is less than or equal to this slot - // to avoid having their stake slashed. - pub fn last_locked_out_slot(&self) -> Slot { - self.slot + self.lockout() - } - - pub fn is_locked_out_at_slot(&self, slot: Slot) -> bool { - self.last_locked_out_slot() >= slot - } -} - -#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Copy, Clone, AbiExample)] -pub struct CompactLockout { - // Offset to the next vote, 0 if this is the last vote in the tower - pub offset: T, - // Confirmation count, guarenteed to be < 32 - pub confirmation_count: u8, -} - -impl CompactLockout { - pub fn new(offset: T) -> Self { - Self { - offset, - confirmation_count: 1, + // If the vote state update is not new enough, return + if let Some(last_vote_slot) = vote_state.votes.back().map(|lockout| lockout.slot) { + if vote_state_update.lockouts.back().unwrap().slot <= last_vote_slot { + return Err(VoteError::VoteTooOld); } } - // The number of slots for which this vote is locked - pub fn lockout(&self) -> u64 { - (INITIAL_LOCKOUT as u64).pow(self.confirmation_count.into()) - } -} + let last_vote_state_update_slot = vote_state_update + .lockouts + .back() + .expect("must be nonempty, checked above") + .slot; -#[frozen_abi(digest = "BctadFJjUKbvPJzr6TszbX6rBfQUNSRKpKKngkzgXgeY")] -#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] -pub struct VoteStateUpdate { - /// The proposed tower - pub lockouts: VecDeque, - /// The proposed root - pub root: Option, - /// signature of the bank's state at the last slot - pub hash: Hash, - /// processing timestamp of last slot - pub timestamp: Option, -} + if slot_hashes.is_empty() { + return Err(VoteError::SlotsMismatch); + } + let earliest_slot_hash_in_history = slot_hashes.last().unwrap().0; -impl From> for VoteStateUpdate { - fn from(recent_slots: Vec<(Slot, u32)>) -> Self { - let lockouts: VecDeque = recent_slots - .into_iter() - .map(|(slot, confirmation_count)| Lockout { - slot, - confirmation_count, - }) - .collect(); - Self { - lockouts, - root: None, - hash: Hash::default(), - timestamp: None, - } + // Check if the proposed vote is too old to be in the SlotHash history + if last_vote_state_update_slot < earliest_slot_hash_in_history { + // If this is the last slot in the vote update, it must be in SlotHashes, + // otherwise we have no way of confirming if the hash matches + return Err(VoteError::VoteTooOld); } -} -impl VoteStateUpdate { - pub fn new(lockouts: VecDeque, root: Option, hash: Hash) -> Self { - Self { - lockouts, - root, - hash, - timestamp: None, + // Check if the proposed root is too old + if let Some(new_proposed_root) = vote_state_update.root { + // If the root is less than the earliest slot hash in the history such that we + // cannot verify whether the slot was actually was on this fork, set the root + // to the current vote state root for safety. + if earliest_slot_hash_in_history > new_proposed_root { + vote_state_update.root = vote_state.root_slot; } } - pub fn slots(&self) -> Vec { - self.lockouts.iter().map(|lockout| lockout.slot).collect() - } -} + // index into the new proposed vote state's slots, starting with the root if it exists then + // we use this mutable root to fold the root slot case into this loop for performance + let mut check_root = vote_state_update.root; + let mut vote_state_update_index = 0; -/// Ignoring overhead, in a full `VoteStateUpdate` the lockouts take up -/// 31 * (64 + 32) = 2976 bits. -/// -/// In this schema we separate the votes into 3 separate lockout structures -/// and store offsets rather than slot number, allowing us to use smaller fields. -/// -/// In a full `CompactVoteStateUpdate` the lockouts take up -/// 64 + (32 + 8) * 16 + (16 + 8) * 8 + (8 + 8) * 6 = 992 bits -/// allowing us to greatly reduce block size. -#[frozen_abi(digest = "C8ZrdXqqF3VxgsoCxnqNaYJggV6rr9PC3rtmVudJFmqG")] -#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] -pub struct CompactVoteStateUpdate { - /// The proposed root, u64::MAX if there is no root - pub root: Slot, - /// The offset from the root (or 0 if no root) to the first vote - pub root_to_first_vote_offset: u64, - /// Part of the proposed tower, votes with confirmation_count > 15 - #[serde(with = "short_vec")] - pub lockouts_32: Vec>, - /// Part of the proposed tower, votes with 15 >= confirmation_count > 7 - #[serde(with = "short_vec")] - pub lockouts_16: Vec>, - /// Part of the proposed tower, votes with 7 >= confirmation_count - #[serde(with = "short_vec")] - pub lockouts_8: Vec>, - - /// Signature of the bank's state at the last slot - pub hash: Hash, - /// Processing timestamp of last slot - pub timestamp: Option, -} + // index into the slot_hashes, starting at the oldest known + // slot hash + let mut slot_hashes_index = slot_hashes.len(); -impl From> for CompactVoteStateUpdate { - fn from(recent_slots: Vec<(Slot, u32)>) -> Self { - let lockouts: VecDeque = recent_slots - .into_iter() - .map(|(slot, confirmation_count)| Lockout { - slot, - confirmation_count, - }) - .collect(); - Self::new(lockouts, None, Hash::default()) - } -} + let mut vote_state_update_indexes_to_filter = vec![]; -impl CompactVoteStateUpdate { - pub fn new(mut lockouts: VecDeque, root: Option, hash: Hash) -> Self { - if lockouts.is_empty() { - return Self::default(); - } - let mut cur_slot = root.unwrap_or(0u64); - let mut cur_confirmation_count = 0; - let offset = lockouts - .pop_front() - .map( - |Lockout { - slot, - confirmation_count, - }| { - assert!(confirmation_count < 32); - - let offset = slot - cur_slot; - cur_slot = slot; - cur_confirmation_count = confirmation_count; - offset - }, - ) - .expect("Tower should not be empty"); - let mut lockouts_32 = Vec::new(); - let mut lockouts_16 = Vec::new(); - let mut lockouts_8 = Vec::new(); - - for Lockout { - slot, - confirmation_count, - } in lockouts + // Note: + // + // 1) `vote_state_update.lockouts` is sorted from oldest/smallest vote to newest/largest + // vote, due to the way votes are applied to the vote state (newest votes + // pushed to the back). + // + // 2) Conversely, `slot_hashes` is sorted from newest/largest vote to + // the oldest/smallest vote + // + // Unlike for vote updates, vote state updates here can't only check votes older than the last vote + // because have to ensure that every slot is actually part of the history, not just the most + // recent ones + while vote_state_update_index < vote_state_update.lockouts.len() && slot_hashes_index > 0 { + let proposed_vote_slot = if let Some(root) = check_root { + root + } else { + vote_state_update.lockouts[vote_state_update_index].slot + }; + if check_root.is_none() + && vote_state_update_index > 0 + && proposed_vote_slot <= vote_state_update.lockouts[vote_state_update_index - 1].slot { - assert!(confirmation_count < 32); - let offset = slot - cur_slot; - if cur_confirmation_count > 15 { - lockouts_32.push(CompactLockout { - offset: offset.try_into().unwrap(), - confirmation_count: cur_confirmation_count.try_into().unwrap(), - }); - } else if cur_confirmation_count > 7 { - lockouts_16.push(CompactLockout { - offset: offset.try_into().unwrap(), - confirmation_count: cur_confirmation_count.try_into().unwrap(), - }); - } else { - lockouts_8.push(CompactLockout { - offset: offset.try_into().unwrap(), - confirmation_count: cur_confirmation_count.try_into().unwrap(), - }) + return Err(VoteError::SlotsNotOrdered); + } + let ancestor_slot = slot_hashes[slot_hashes_index - 1].0; + + // Find if this slot in the proposed vote state exists in the SlotHashes history + // to confirm if it was a valid ancestor on this fork + match proposed_vote_slot.cmp(&ancestor_slot) { + Ordering::Less => { + if slot_hashes_index == slot_hashes.len() { + // The vote slot does not exist in the SlotHashes history because it's too old, + // i.e. older than the oldest slot in the history. + assert!(proposed_vote_slot < earliest_slot_hash_in_history); + if !vote_state.contains_slot(proposed_vote_slot) && check_root.is_none() { + // If the vote slot is both: + // 1) Too old + // 2) Doesn't already exist in vote state + // + // Then filter it out + vote_state_update_indexes_to_filter.push(vote_state_update_index); + } + if check_root.is_some() { + // If the vote state update has a root < earliest_slot_hash_in_history + // then we use the current root. The only case where this can happen + // is if the current root itself is not in slot hashes. + assert!(vote_state.root_slot.unwrap() < earliest_slot_hash_in_history); + check_root = None; + } else { + vote_state_update_index += 1; + } + continue; + } else { + // If the vote slot is new enough to be in the slot history, + // but is not part of the slot history, then it must belong to another fork, + // which means this vote state update is invalid. + if check_root.is_some() { + return Err(VoteError::RootOnDifferentFork); + } else { + return Err(VoteError::SlotsMismatch); + } + } + } + Ordering::Greater => { + // Decrement `slot_hashes_index` to find newer slots in the SlotHashes history + slot_hashes_index -= 1; + continue; + } + Ordering::Equal => { + // Once the slot in `vote_state_update.lockouts` is found, bump to the next slot + // in `vote_state_update.lockouts` and continue. If we were checking the root, + // start checking the vote state instead. + if check_root.is_some() { + check_root = None; + } else { + vote_state_update_index += 1; + slot_hashes_index -= 1; + } } - - cur_slot = slot; - cur_confirmation_count = confirmation_count; - } - // Last vote should be at the top of tower, so we don't have to explicitly store it - assert!(cur_confirmation_count == 1); - Self { - root: root.unwrap_or(u64::MAX), - root_to_first_vote_offset: offset, - lockouts_32, - lockouts_16, - lockouts_8, - hash, - timestamp: None, } } - pub fn root(&self) -> Option { - if self.root == u64::MAX { - None - } else { - Some(self.root) - } + if vote_state_update_index != vote_state_update.lockouts.len() { + // The last vote slot in the update did not exist in SlotHashes + return Err(VoteError::SlotsMismatch); } - pub fn slots(&self) -> Vec { - std::iter::once(self.root_to_first_vote_offset) - .chain(self.lockouts_32.iter().map(|lockout| lockout.offset.into())) - .chain(self.lockouts_16.iter().map(|lockout| lockout.offset.into())) - .chain(self.lockouts_8.iter().map(|lockout| lockout.offset.into())) - .scan(self.root().unwrap_or(0), |prev_slot, offset| { - let slot = *prev_slot + offset; - *prev_slot = slot; - Some(slot) - }) - .collect() - } -} - -impl From for VoteStateUpdate { - fn from(vote_state_update: CompactVoteStateUpdate) -> Self { - let lockouts = vote_state_update - .lockouts_32 - .iter() - .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)) - .chain( - vote_state_update - .lockouts_16 - .iter() - .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)), - ) - .chain( - vote_state_update - .lockouts_8 - .iter() - .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)), - ) - .chain( - // To pick up the last element - std::iter::once((0, 1)), - ) - .scan( - vote_state_update.root().unwrap_or(0) + vote_state_update.root_to_first_vote_offset, - |slot, (offset, confirmation_count): (u64, u8)| { - let cur_slot = *slot; - *slot += offset; - Some(Lockout { - slot: cur_slot, - confirmation_count: confirmation_count.into(), - }) - }, - ) - .collect(); - Self { - lockouts, - root: vote_state_update.root(), - hash: vote_state_update.hash, - timestamp: vote_state_update.timestamp, - } - } -} + // This assertion must be true at this point because we can assume by now: + // 1) vote_state_update_index == vote_state_update.lockouts.len() + // 2) last_vote_state_update_slot >= earliest_slot_hash_in_history + // 3) !vote_state_update.lockouts.is_empty() + // + // 1) implies that during the last iteration of the loop above, + // `vote_state_update_index` was equal to `vote_state_update.lockouts.len() - 1`, + // and was then incremented to `vote_state_update.lockouts.len()`. + // This means in that last loop iteration, + // `proposed_vote_slot == + // vote_state_update.lockouts[vote_state_update.lockouts.len() - 1] == + // last_vote_state_update_slot`. + // + // Then we know the last comparison `match proposed_vote_slot.cmp(&ancestor_slot)` + // is equivalent to `match last_vote_state_update_slot.cmp(&ancestor_slot)`. The result + // of this match to increment `vote_state_update_index` must have been either: + // + // 1) The Equal case ran, in which case then we know this assertion must be true + // 2) The Less case ran, and more specifically the case + // `proposed_vote_slot < earliest_slot_hash_in_history` ran, which is equivalent to + // `last_vote_state_update_slot < earliest_slot_hash_in_history`, but this is impossible + // due to assumption 3) above. + assert_eq!( + last_vote_state_update_slot, + slot_hashes[slot_hashes_index].0 + ); -impl From for CompactVoteStateUpdate { - fn from(vote_state_update: VoteStateUpdate) -> Self { - CompactVoteStateUpdate::new( - vote_state_update.lockouts, - vote_state_update.root, + if slot_hashes[slot_hashes_index].1 != vote_state_update.hash { + // This means the newest vote in the slot has a match that + // doesn't match the expected hash for that slot on this + // fork + warn!( + "{} dropped vote {:?} failed to match hash {} {}", + vote_state.node_pubkey, + vote_state_update, vote_state_update.hash, - ) - } -} - -#[derive(Default, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] -pub struct VoteInit { - pub node_pubkey: Pubkey, - pub authorized_voter: Pubkey, - pub authorized_withdrawer: Pubkey, - pub commission: u8, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] -pub enum VoteAuthorize { - Voter, - Withdrawer, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub struct VoteAuthorizeWithSeedArgs { - pub authorization_type: VoteAuthorize, - pub current_authority_derived_key_owner: Pubkey, - pub current_authority_derived_key_seed: String, - pub new_authority: Pubkey, -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub struct VoteAuthorizeCheckedWithSeedArgs { - pub authorization_type: VoteAuthorize, - pub current_authority_derived_key_owner: Pubkey, - pub current_authority_derived_key_seed: String, -} - -#[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] -pub struct BlockTimestamp { - pub slot: Slot, - pub timestamp: UnixTimestamp, -} - -// this is how many epochs a voter can be remembered for slashing -const MAX_ITEMS: usize = 32; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] -pub struct CircBuf { - buf: [I; MAX_ITEMS], - /// next pointer - idx: usize, - is_empty: bool, -} - -impl Default for CircBuf { - fn default() -> Self { - Self { - buf: [I::default(); MAX_ITEMS], - idx: MAX_ITEMS - 1, - is_empty: true, - } - } -} - -impl CircBuf { - pub fn append(&mut self, item: I) { - // remember prior delegate and when we switched, to support later slashing - self.idx += 1; - self.idx %= MAX_ITEMS; - - self.buf[self.idx] = item; - self.is_empty = false; - } - - pub fn buf(&self) -> &[I; MAX_ITEMS] { - &self.buf + slot_hashes[slot_hashes_index].1 + ); + inc_new_counter_info!("dropped-vote-hash", 1); + return Err(VoteError::SlotHashMismatch); } - pub fn last(&self) -> Option<&I> { - if !self.is_empty { - Some(&self.buf[self.idx]) + // Filter out the irrelevant votes + let mut vote_state_update_index = 0; + let mut filter_votes_index = 0; + vote_state_update.lockouts.retain(|_lockout| { + let should_retain = if filter_votes_index == vote_state_update_indexes_to_filter.len() { + true + } else if vote_state_update_index == vote_state_update_indexes_to_filter[filter_votes_index] + { + filter_votes_index += 1; + false } else { - None - } - } -} - -#[frozen_abi(digest = "331ZmXrmsUcwbKhzR3C1UEU6uNwZr48ExE54JDKGWA4w")] -#[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] -pub struct VoteState { - /// the node that votes in this account - pub node_pubkey: Pubkey, - - /// the signer for withdrawals - pub authorized_withdrawer: Pubkey, - /// percentage (0-100) that represents what part of a rewards - /// payout should be given to this VoteAccount - pub commission: u8, - - pub votes: VecDeque, - - // This usually the last Lockout which was popped from self.votes. - // However, it can be arbitrary slot, when being used inside Tower - pub root_slot: Option, - - /// the signer for vote transactions - authorized_voters: AuthorizedVoters, - - /// history of prior authorized voters and the epochs for which - /// they were set, the bottom end of the range is inclusive, - /// the top of the range is exclusive - prior_voters: CircBuf<(Pubkey, Epoch, Epoch)>, + true + }; - /// history of how many credits earned by the end of each epoch - /// each tuple is (Epoch, credits, prev_credits) - pub epoch_credits: Vec<(Epoch, u64, u64)>, + vote_state_update_index += 1; + should_retain + }); - /// most recent timestamp submitted with a vote - pub last_timestamp: BlockTimestamp, + Ok(()) } -impl VoteState { - pub fn new(vote_init: &VoteInit, clock: &Clock) -> Self { - Self { - node_pubkey: vote_init.node_pubkey, - authorized_voters: AuthorizedVoters::new(clock.epoch, vote_init.authorized_voter), - authorized_withdrawer: vote_init.authorized_withdrawer, - commission: vote_init.commission, - ..VoteState::default() +fn check_slots_are_valid( + vote_state: &VoteState, + vote_slots: &[Slot], + vote_hash: &Hash, + slot_hashes: &[(Slot, Hash)], +) -> Result<(), VoteError> { + // index into the vote's slots, starting at the oldest + // slot + let mut i = 0; + + // index into the slot_hashes, starting at the oldest known + // slot hash + let mut j = slot_hashes.len(); + + // Note: + // + // 1) `vote_slots` is sorted from oldest/smallest vote to newest/largest + // vote, due to the way votes are applied to the vote state (newest votes + // pushed to the back). + // + // 2) Conversely, `slot_hashes` is sorted from newest/largest vote to + // the oldest/smallest vote + while i < vote_slots.len() && j > 0 { + // 1) increment `i` to find the smallest slot `s` in `vote_slots` + // where `s` >= `last_voted_slot` + if vote_state + .last_voted_slot() + .map_or(false, |last_voted_slot| vote_slots[i] <= last_voted_slot) + { + i += 1; + continue; } - } - pub fn get_authorized_voter(&self, epoch: Epoch) -> Option { - self.authorized_voters.get_authorized_voter(epoch) - } - - pub fn authorized_voters(&self) -> &AuthorizedVoters { - &self.authorized_voters - } - - pub fn prior_voters(&mut self) -> &CircBuf<(Pubkey, Epoch, Epoch)> { - &self.prior_voters - } - - pub fn get_rent_exempt_reserve(rent: &Rent) -> u64 { - rent.minimum_balance(VoteState::size_of()) - } - - /// Upper limit on the size of the Vote State - /// when votes.len() is MAX_LOCKOUT_HISTORY. - pub const fn size_of() -> usize { - 3731 // see test_vote_state_size_of. - } - - // utility function, used by Stakes, tests - pub fn from(account: &T) -> Option { - Self::deserialize(account.data()).ok() - } - - // utility function, used by Stakes, tests - pub fn to(versioned: &VoteStateVersions, account: &mut T) -> Option<()> { - Self::serialize(versioned, account.data_as_mut_slice()).ok() - } - - pub fn deserialize(input: &[u8]) -> Result { - deserialize::(input) - .map(|versioned| versioned.convert_to_current()) - .map_err(|_| InstructionError::InvalidAccountData) - } - - pub fn serialize( - versioned: &VoteStateVersions, - output: &mut [u8], - ) -> Result<(), InstructionError> { - serialize_into(output, versioned).map_err(|err| match *err { - ErrorKind::SizeLimit => InstructionError::AccountDataTooSmall, - _ => InstructionError::GenericError, - }) - } - - pub fn credits_from(account: &T) -> Option { - Self::from(account).map(|state| state.credits()) - } - - /// returns commission split as (voter_portion, staker_portion, was_split) tuple - /// - /// if commission calculation is 100% one way or other, - /// indicate with false for was_split - pub fn commission_split(&self, on: u64) -> (u64, u64, bool) { - match self.commission.min(100) { - 0 => (0, on, false), - 100 => (on, 0, false), - split => { - let on = u128::from(on); - // Calculate mine and theirs independently and symmetrically instead of - // using the remainder of the other to treat them strictly equally. - // This is also to cancel the rewarding if either of the parties - // should receive only fractional lamports, resulting in not being rewarded at all. - // Thus, note that we intentionally discard any residual fractional lamports. - let mine = on * u128::from(split) / 100u128; - let theirs = on * u128::from(100 - split) / 100u128; - - (mine as u64, theirs as u64, true) - } + // 2) Find the hash for this slot `s`. + if vote_slots[i] != slot_hashes[j - 1].0 { + // Decrement `j` to find newer slots + j -= 1; + continue; } - } - /// Returns if the vote state contains a slot `candidate_slot` - pub fn contains_slot(&self, candidate_slot: Slot) -> bool { - self.votes - .binary_search_by(|lockout| lockout.slot.cmp(&candidate_slot)) - .is_ok() + // 3) Once the hash for `s` is found, bump `s` to the next slot + // in `vote_slots` and continue. + i += 1; + j -= 1; } - #[cfg(test)] - fn get_max_sized_vote_state() -> VoteState { - let mut authorized_voters = AuthorizedVoters::default(); - for i in 0..=MAX_LEADER_SCHEDULE_EPOCH_OFFSET { - authorized_voters.insert(i, solana_sdk::pubkey::new_rand()); - } - - VoteState { - votes: VecDeque::from(vec![Lockout::default(); MAX_LOCKOUT_HISTORY]), - root_slot: Some(std::u64::MAX), - epoch_credits: vec![(0, 0, 0); MAX_EPOCH_CREDITS_HISTORY], - authorized_voters, - ..Self::default() - } + if j == slot_hashes.len() { + // This means we never made it to steps 2) or 3) above, otherwise + // `j` would have been decremented at least once. This means + // there are not slots in `vote_slots` greater than `last_voted_slot` + debug!( + "{} dropped vote slots {:?}, vote hash: {:?} slot hashes:SlotHash {:?}, too old ", + vote_state.node_pubkey, vote_slots, vote_hash, slot_hashes + ); + return Err(VoteError::VoteTooOld); } - - fn check_update_vote_state_slots_are_valid( - &self, - vote_state_update: &mut VoteStateUpdate, - slot_hashes: &[(Slot, Hash)], - ) -> Result<(), VoteError> { - if vote_state_update.lockouts.is_empty() { - return Err(VoteError::EmptySlots); - } - - // If the vote state update is not new enough, return - if let Some(last_vote_slot) = self.votes.back().map(|lockout| lockout.slot) { - if vote_state_update.lockouts.back().unwrap().slot <= last_vote_slot { - return Err(VoteError::VoteTooOld); - } - } - - let last_vote_state_update_slot = vote_state_update - .lockouts - .back() - .expect("must be nonempty, checked above") - .slot; - - if slot_hashes.is_empty() { - return Err(VoteError::SlotsMismatch); - } - let earliest_slot_hash_in_history = slot_hashes.last().unwrap().0; - - // Check if the proposed vote is too old to be in the SlotHash history - if last_vote_state_update_slot < earliest_slot_hash_in_history { - // If this is the last slot in the vote update, it must be in SlotHashes, - // otherwise we have no way of confirming if the hash matches - return Err(VoteError::VoteTooOld); - } - - // Check if the proposed root is too old - if let Some(new_proposed_root) = vote_state_update.root { - // If the root is less than the earliest slot hash in the history such that we - // cannot verify whether the slot was actually was on this fork, set the root - // to the current vote state root for safety. - if earliest_slot_hash_in_history > new_proposed_root { - vote_state_update.root = self.root_slot; - } - } - - // index into the new proposed vote state's slots, starting with the root if it exists then - // we use this mutable root to fold the root slot case into this loop for performance - let mut check_root = vote_state_update.root; - let mut vote_state_update_index = 0; - - // index into the slot_hashes, starting at the oldest known - // slot hash - let mut slot_hashes_index = slot_hashes.len(); - - let mut vote_state_update_indexes_to_filter = vec![]; - - // Note: - // - // 1) `vote_state_update.lockouts` is sorted from oldest/smallest vote to newest/largest - // vote, due to the way votes are applied to the vote state (newest votes - // pushed to the back). - // - // 2) Conversely, `slot_hashes` is sorted from newest/largest vote to - // the oldest/smallest vote - // - // Unlike for vote updates, vote state updates here can't only check votes older than the last vote - // because have to ensure that every slot is actually part of the history, not just the most - // recent ones - while vote_state_update_index < vote_state_update.lockouts.len() && slot_hashes_index > 0 { - let proposed_vote_slot = if let Some(root) = check_root { - root - } else { - vote_state_update.lockouts[vote_state_update_index].slot - }; - if check_root.is_none() - && vote_state_update_index > 0 - && proposed_vote_slot - <= vote_state_update.lockouts[vote_state_update_index - 1].slot - { - return Err(VoteError::SlotsNotOrdered); - } - let ancestor_slot = slot_hashes[slot_hashes_index - 1].0; - - // Find if this slot in the proposed vote state exists in the SlotHashes history - // to confirm if it was a valid ancestor on this fork - match proposed_vote_slot.cmp(&ancestor_slot) { - Ordering::Less => { - if slot_hashes_index == slot_hashes.len() { - // The vote slot does not exist in the SlotHashes history because it's too old, - // i.e. older than the oldest slot in the history. - assert!(proposed_vote_slot < earliest_slot_hash_in_history); - if !self.contains_slot(proposed_vote_slot) && check_root.is_none() { - // If the vote slot is both: - // 1) Too old - // 2) Doesn't already exist in vote state - // - // Then filter it out - vote_state_update_indexes_to_filter.push(vote_state_update_index); - } - if check_root.is_some() { - // If the vote state update has a root < earliest_slot_hash_in_history - // then we use the current root. The only case where this can happen - // is if the current root itself is not in slot hashes. - assert!(self.root_slot.unwrap() < earliest_slot_hash_in_history); - check_root = None; - } else { - vote_state_update_index += 1; - } - continue; - } else { - // If the vote slot is new enough to be in the slot history, - // but is not part of the slot history, then it must belong to another fork, - // which means this vote state update is invalid. - if check_root.is_some() { - return Err(VoteError::RootOnDifferentFork); - } else { - return Err(VoteError::SlotsMismatch); - } - } - } - Ordering::Greater => { - // Decrement `slot_hashes_index` to find newer slots in the SlotHashes history - slot_hashes_index -= 1; - continue; - } - Ordering::Equal => { - // Once the slot in `vote_state_update.lockouts` is found, bump to the next slot - // in `vote_state_update.lockouts` and continue. If we were checking the root, - // start checking the vote state instead. - if check_root.is_some() { - check_root = None; - } else { - vote_state_update_index += 1; - slot_hashes_index -= 1; - } - } - } - } - - if vote_state_update_index != vote_state_update.lockouts.len() { - // The last vote slot in the update did not exist in SlotHashes - return Err(VoteError::SlotsMismatch); - } - - // This assertion must be true at this point because we can assume by now: - // 1) vote_state_update_index == vote_state_update.lockouts.len() - // 2) last_vote_state_update_slot >= earliest_slot_hash_in_history - // 3) !vote_state_update.lockouts.is_empty() - // - // 1) implies that during the last iteration of the loop above, - // `vote_state_update_index` was equal to `vote_state_update.lockouts.len() - 1`, - // and was then incremented to `vote_state_update.lockouts.len()`. - // This means in that last loop iteration, - // `proposed_vote_slot == - // vote_state_update.lockouts[vote_state_update.lockouts.len() - 1] == - // last_vote_state_update_slot`. - // - // Then we know the last comparison `match proposed_vote_slot.cmp(&ancestor_slot)` - // is equivalent to `match last_vote_state_update_slot.cmp(&ancestor_slot)`. The result - // of this match to increment `vote_state_update_index` must have been either: - // - // 1) The Equal case ran, in which case then we know this assertion must be true - // 2) The Less case ran, and more specifically the case - // `proposed_vote_slot < earliest_slot_hash_in_history` ran, which is equivalent to - // `last_vote_state_update_slot < earliest_slot_hash_in_history`, but this is impossible - // due to assumption 3) above. - assert_eq!( - last_vote_state_update_slot, - slot_hashes[slot_hashes_index].0 + if i != vote_slots.len() { + // This means there existed some slot for which we couldn't find + // a matching slot hash in step 2) + info!( + "{} dropped vote slots {:?} failed to match slot hashes: {:?}", + vote_state.node_pubkey, vote_slots, slot_hashes, ); - - if slot_hashes[slot_hashes_index].1 != vote_state_update.hash { - // This means the newest vote in the slot has a match that - // doesn't match the expected hash for that slot on this - // fork - warn!( - "{} dropped vote {:?} failed to match hash {} {}", - self.node_pubkey, - vote_state_update, - vote_state_update.hash, - slot_hashes[slot_hashes_index].1 - ); - inc_new_counter_info!("dropped-vote-hash", 1); - return Err(VoteError::SlotHashMismatch); - } - - // Filter out the irrelevant votes - let mut vote_state_update_index = 0; - let mut filter_votes_index = 0; - vote_state_update.lockouts.retain(|_lockout| { - let should_retain = if filter_votes_index == vote_state_update_indexes_to_filter.len() { - true - } else if vote_state_update_index - == vote_state_update_indexes_to_filter[filter_votes_index] - { - filter_votes_index += 1; - false - } else { - true - }; - - vote_state_update_index += 1; - should_retain - }); - - Ok(()) + inc_new_counter_info!("dropped-vote-slot", 1); + return Err(VoteError::SlotsMismatch); } + if &slot_hashes[j].1 != vote_hash { + // This means the newest slot in the `vote_slots` has a match that + // doesn't match the expected hash for that slot on this + // fork + warn!( + "{} dropped vote slots {:?} failed to match hash {} {}", + vote_state.node_pubkey, vote_slots, vote_hash, slot_hashes[j].1 + ); + inc_new_counter_info!("dropped-vote-hash", 1); + return Err(VoteError::SlotHashMismatch); + } + Ok(()) +} - fn check_slots_are_valid( - &self, - vote_slots: &[Slot], - vote_hash: &Hash, - slot_hashes: &[(Slot, Hash)], - ) -> Result<(), VoteError> { - // index into the vote's slots, starting at the oldest - // slot - let mut i = 0; - - // index into the slot_hashes, starting at the oldest known - // slot hash - let mut j = slot_hashes.len(); - - // Note: - // - // 1) `vote_slots` is sorted from oldest/smallest vote to newest/largest - // vote, due to the way votes are applied to the vote state (newest votes - // pushed to the back). - // - // 2) Conversely, `slot_hashes` is sorted from newest/largest vote to - // the oldest/smallest vote - while i < vote_slots.len() && j > 0 { - // 1) increment `i` to find the smallest slot `s` in `vote_slots` - // where `s` >= `last_voted_slot` - if self - .last_voted_slot() - .map_or(false, |last_voted_slot| vote_slots[i] <= last_voted_slot) - { - i += 1; - continue; - } - - // 2) Find the hash for this slot `s`. - if vote_slots[i] != slot_hashes[j - 1].0 { - // Decrement `j` to find newer slots - j -= 1; - continue; +//`Ensurecheck_update_vote_state_slots_are_valid(&)` runs on the slots in `new_state` +// before `process_new_vote_state()` is called + +// This function should guarantee the following about `new_state`: +// +// 1) It's well ordered, i.e. the slots are sorted from smallest to largest, +// and the confirmations sorted from largest to smallest. +// 2) Confirmations `c` on any vote slot satisfy `0 < c <= MAX_LOCKOUT_HISTORY` +// 3) Lockouts are not expired by consecutive votes, i.e. for every consecutive +// `v_i`, `v_{i + 1}` satisfy `v_i.last_locked_out_slot() >= v_{i + 1}`. + +// We also guarantee that compared to the current vote state, `new_state` +// introduces no rollback. This means: +// +// 1) The last slot in `new_state` is always greater than any slot in the +// current vote state. +// +// 2) From 1), this means that for every vote `s` in the current state: +// a) If there exists an `s'` in `new_state` where `s.slot == s'.slot`, then +// we must guarantee `s.confirmations <= s'.confirmations` +// +// b) If there does not exist any such `s'` in `new_state`, then there exists +// some `t` that is the smallest vote in `new_state` where `t.slot > s.slot`. +// `t` must have expired/popped off s', so it must be guaranteed that +// `s.last_locked_out_slot() < t`. + +// Note these two above checks do not guarantee that the vote state being submitted +// is a vote state that could have been created by iteratively building a tower +// by processing one vote at a time. For instance, the tower: +// +// { slot 0, confirmations: 31 } +// { slot 1, confirmations: 30 } +// +// is a legal tower that could be submitted on top of a previously empty tower. However, +// there is no way to create this tower from the iterative process, because slot 1 would +// have to have at least one other slot on top of it, even if the first 30 votes were all +// popped off. +pub fn process_new_vote_state( + vote_state: &mut VoteState, + new_state: VecDeque, + new_root: Option, + timestamp: Option, + epoch: Epoch, + feature_set: Option<&FeatureSet>, +) -> Result<(), VoteError> { + assert!(!new_state.is_empty()); + if new_state.len() > MAX_LOCKOUT_HISTORY { + return Err(VoteError::TooManyVotes); + } + + match (new_root, vote_state.root_slot) { + (Some(new_root), Some(current_root)) => { + if new_root < current_root { + return Err(VoteError::RootRollBack); } - - // 3) Once the hash for `s` is found, bump `s` to the next slot - // in `vote_slots` and continue. - i += 1; - j -= 1; } - - if j == slot_hashes.len() { - // This means we never made it to steps 2) or 3) above, otherwise - // `j` would have been decremented at least once. This means - // there are not slots in `vote_slots` greater than `last_voted_slot` - debug!( - "{} dropped vote slots {:?}, vote hash: {:?} slot hashes:SlotHash {:?}, too old ", - self.node_pubkey, vote_slots, vote_hash, slot_hashes - ); - return Err(VoteError::VoteTooOld); + (None, Some(_)) => { + return Err(VoteError::RootRollBack); } - if i != vote_slots.len() { - // This means there existed some slot for which we couldn't find - // a matching slot hash in step 2) - info!( - "{} dropped vote slots {:?} failed to match slot hashes: {:?}", - self.node_pubkey, vote_slots, slot_hashes, - ); - inc_new_counter_info!("dropped-vote-slot", 1); - return Err(VoteError::SlotsMismatch); - } - if &slot_hashes[j].1 != vote_hash { - // This means the newest slot in the `vote_slots` has a match that - // doesn't match the expected hash for that slot on this - // fork - warn!( - "{} dropped vote slots {:?} failed to match hash {} {}", - self.node_pubkey, vote_slots, vote_hash, slot_hashes[j].1 - ); - inc_new_counter_info!("dropped-vote-hash", 1); - return Err(VoteError::SlotHashMismatch); - } - Ok(()) + _ => (), } - //`Ensure check_update_vote_state_slots_are_valid()` runs on the slots in `new_state` - // before `process_new_vote_state()` is called + let mut previous_vote: Option<&Lockout> = None; - // This function should guarantee the following about `new_state`: - // - // 1) It's well ordered, i.e. the slots are sorted from smallest to largest, - // and the confirmations sorted from largest to smallest. - // 2) Confirmations `c` on any vote slot satisfy `0 < c <= MAX_LOCKOUT_HISTORY` - // 3) Lockouts are not expired by consecutive votes, i.e. for every consecutive - // `v_i`, `v_{i + 1}` satisfy `v_i.last_locked_out_slot() >= v_{i + 1}`. - - // We also guarantee that compared to the current vote state, `new_state` - // introduces no rollback. This means: - // - // 1) The last slot in `new_state` is always greater than any slot in the - // current vote state. - // - // 2) From 1), this means that for every vote `s` in the current state: - // a) If there exists an `s'` in `new_state` where `s.slot == s'.slot`, then - // we must guarantee `s.confirmations <= s'.confirmations` - // - // b) If there does not exist any such `s'` in `new_state`, then there exists - // some `t` that is the smallest vote in `new_state` where `t.slot > s.slot`. - // `t` must have expired/popped off s', so it must be guaranteed that - // `s.last_locked_out_slot() < t`. - - // Note these two above checks do not guarantee that the vote state being submitted - // is a vote state that could have been created by iteratively building a tower - // by processing one vote at a time. For instance, the tower: - // - // { slot 0, confirmations: 31 } - // { slot 1, confirmations: 30 } - // - // is a legal tower that could be submitted on top of a previously empty tower. However, - // there is no way to create this tower from the iterative process, because slot 1 would - // have to have at least one other slot on top of it, even if the first 30 votes were all - // popped off. - pub fn process_new_vote_state( - &mut self, - new_state: VecDeque, - new_root: Option, - timestamp: Option, - epoch: Epoch, - feature_set: Option<&FeatureSet>, - ) -> Result<(), VoteError> { - assert!(!new_state.is_empty()); - if new_state.len() > MAX_LOCKOUT_HISTORY { - return Err(VoteError::TooManyVotes); - } - - match (new_root, self.root_slot) { - (Some(new_root), Some(current_root)) => { - if new_root < current_root { - return Err(VoteError::RootRollBack); - } - } - (None, Some(_)) => { - return Err(VoteError::RootRollBack); - } - _ => (), - } - - let mut previous_vote: Option<&Lockout> = None; - - // Check that all the votes in the new proposed state are: - // 1) Strictly sorted from oldest to newest vote - // 2) The confirmations are strictly decreasing - // 3) Not zero confirmation votes - for vote in &new_state { - if vote.confirmation_count == 0 { - return Err(VoteError::ZeroConfirmations); - } else if vote.confirmation_count > MAX_LOCKOUT_HISTORY as u32 { - return Err(VoteError::ConfirmationTooLarge); - } else if let Some(new_root) = new_root { - if vote.slot <= new_root + // Check that all the votes in the new proposed state are: + // 1) Strictly sorted from oldest to newest vote + // 2) The confirmations are strictly decreasing + // 3) Not zero confirmation votes + for vote in &new_state { + if vote.confirmation_count == 0 { + return Err(VoteError::ZeroConfirmations); + } else if vote.confirmation_count > MAX_LOCKOUT_HISTORY as u32 { + return Err(VoteError::ConfirmationTooLarge); + } else if let Some(new_root) = new_root { + if vote.slot <= new_root && // This check is necessary because // https://github.com/ryoqun/solana/blob/df55bfb46af039cbc597cd60042d49b9d90b5961/core/src/consensus.rs#L120 // always sets a root for even empty towers, which is then hard unwrapped here // https://github.com/ryoqun/solana/blob/df55bfb46af039cbc597cd60042d49b9d90b5961/core/src/consensus.rs#L776 new_root != Slot::default() - { - return Err(VoteError::SlotSmallerThanRoot); - } + { + return Err(VoteError::SlotSmallerThanRoot); } + } - if let Some(previous_vote) = previous_vote { - if previous_vote.slot >= vote.slot { - return Err(VoteError::SlotsNotOrdered); - } else if previous_vote.confirmation_count <= vote.confirmation_count { - return Err(VoteError::ConfirmationsNotOrdered); - } else if vote.slot > previous_vote.last_locked_out_slot() { - return Err(VoteError::NewVoteStateLockoutMismatch); - } + if let Some(previous_vote) = previous_vote { + if previous_vote.slot >= vote.slot { + return Err(VoteError::SlotsNotOrdered); + } else if previous_vote.confirmation_count <= vote.confirmation_count { + return Err(VoteError::ConfirmationsNotOrdered); + } else if vote.slot > previous_vote.last_locked_out_slot() { + return Err(VoteError::NewVoteStateLockoutMismatch); } - previous_vote = Some(vote); } + previous_vote = Some(vote); + } + // Find the first vote in the current vote state for a slot greater + // than the new proposed root + let mut current_vote_state_index = 0; + let mut new_vote_state_index = 0; + + // Count the number of slots at and before the new root within the current vote state lockouts. Start with 1 + // for the new root. The purpose of this is to know how many slots were rooted by this state update: + // - The new root was rooted + // - As were any slots that were in the current state but are not in the new state. The only slots which + // can be in this set are those oldest slots in the current vote state that are not present in the + // new vote state; these have been "popped off the back" of the tower and thus represent finalized slots + let mut finalized_slot_count = 1_u64; + + for current_vote in &vote_state.votes { // Find the first vote in the current vote state for a slot greater // than the new proposed root - let mut current_vote_state_index = 0; - let mut new_vote_state_index = 0; - - // Count the number of slots at and before the new root within the current vote state lockouts. Start with 1 - // for the new root. The purpose of this is to know how many slots were rooted by this state update: - // - The new root was rooted - // - As were any slots that were in the current state but are not in the new state. The only slots which - // can be in this set are those oldest slots in the current vote state that are not present in the - // new vote state; these have been "popped off the back" of the tower and thus represent finalized slots - let mut finalized_slot_count = 1_u64; - - for current_vote in &self.votes { - // Find the first vote in the current vote state for a slot greater - // than the new proposed root - if let Some(new_root) = new_root { - if current_vote.slot <= new_root { - current_vote_state_index += 1; - if current_vote.slot != new_root { - finalized_slot_count += 1; - } - continue; + if let Some(new_root) = new_root { + if current_vote.slot <= new_root { + current_vote_state_index += 1; + if current_vote.slot != new_root { + finalized_slot_count += 1; } + continue; } - - break; } - // All the votes in our current vote state that are missing from the new vote state - // must have been expired by later votes. Check that the lockouts match this assumption. - while current_vote_state_index < self.votes.len() && new_vote_state_index < new_state.len() - { - let current_vote = &self.votes[current_vote_state_index]; - let new_vote = &new_state[new_vote_state_index]; - - // If the current slot is less than the new proposed slot, then the - // new slot must have popped off the old slot, so check that the - // lockouts are corrects. - match current_vote.slot.cmp(&new_vote.slot) { - Ordering::Less => { - if current_vote.last_locked_out_slot() >= new_vote.slot { - return Err(VoteError::LockoutConflict); - } - current_vote_state_index += 1; - } - Ordering::Equal => { - // The new vote state should never have less lockout than - // the previous vote state for the same slot - if new_vote.confirmation_count < current_vote.confirmation_count { - return Err(VoteError::ConfirmationRollBack); - } + break; + } - current_vote_state_index += 1; - new_vote_state_index += 1; - } - Ordering::Greater => { - new_vote_state_index += 1; + // All the votes in our current vote state that are missing from the new vote state + // must have been expired by later votes. Check that the lockouts match this assumption. + while current_vote_state_index < vote_state.votes.len() + && new_vote_state_index < new_state.len() + { + let current_vote = &vote_state.votes[current_vote_state_index]; + let new_vote = &new_state[new_vote_state_index]; + + // If the current slot is less than the new proposed slot, then the + // new slot must have popped off the old slot, so check that the + // lockouts are corrects. + match current_vote.slot.cmp(&new_vote.slot) { + Ordering::Less => { + if current_vote.last_locked_out_slot() >= new_vote.slot { + return Err(VoteError::LockoutConflict); } + current_vote_state_index += 1; } - } + Ordering::Equal => { + // The new vote state should never have less lockout than + // the previous vote state for the same slot + if new_vote.confirmation_count < current_vote.confirmation_count { + return Err(VoteError::ConfirmationRollBack); + } - // `new_vote_state` passed all the checks, finalize the change by rewriting - // our state. - if self.root_slot != new_root { - // Award vote credits based on the number of slots that were voted on and have reached finality - if feature_set - .map(|feature_set| { - feature_set.is_active(&feature_set::vote_state_update_credit_per_dequeue::id()) - }) - .unwrap_or(false) - { - // For each finalized slot, there was one voted-on slot in the new vote state that was responsible for - // finalizing it. Each of those votes is awarded 1 credit. - self.increment_credits(epoch, finalized_slot_count); - } else { - self.increment_credits(epoch, 1); + current_vote_state_index += 1; + new_vote_state_index += 1; } - } - if let Some(timestamp) = timestamp { - let last_slot = new_state.back().unwrap().slot; - self.process_timestamp(last_slot, timestamp)?; - } - self.root_slot = new_root; - self.votes = new_state; - Ok(()) - } - - pub fn process_vote( - &mut self, - vote: &Vote, - slot_hashes: &[SlotHash], - epoch: Epoch, - feature_set: Option<&FeatureSet>, - ) -> Result<(), VoteError> { - if vote.slots.is_empty() { - return Err(VoteError::EmptySlots); - } - let filtered_vote_slots = feature_set.and_then(|feature_set| { - if feature_set.is_active(&filter_votes_outside_slot_hashes::id()) { - let earliest_slot_in_history = - slot_hashes.last().map(|(slot, _hash)| *slot).unwrap_or(0); - Some( - vote.slots - .iter() - .filter(|slot| **slot >= earliest_slot_in_history) - .cloned() - .collect::>(), - ) - } else { - None + Ordering::Greater => { + new_vote_state_index += 1; } - }); - - let vote_slots = filtered_vote_slots.as_ref().unwrap_or(&vote.slots); - if vote_slots.is_empty() { - return Err(VoteError::VotesTooOldAllFiltered); } - - self.check_slots_are_valid(vote_slots, &vote.hash, slot_hashes)?; - - vote_slots - .iter() - .for_each(|s| self.process_next_vote_slot(*s, epoch)); - Ok(()) } - pub fn process_next_vote_slot(&mut self, next_vote_slot: Slot, epoch: Epoch) { - // Ignore votes for slots earlier than we already have votes for - if self - .last_voted_slot() - .map_or(false, |last_voted_slot| next_vote_slot <= last_voted_slot) + // `new_vote_state` passed all the checks, finalize the change by rewriting + // our state. + if vote_state.root_slot != new_root { + // Award vote credits based on the number of slots that were voted on and have reached finality + if feature_set + .map(|feature_set| { + feature_set.is_active(&feature_set::vote_state_update_credit_per_dequeue::id()) + }) + .unwrap_or(false) { - return; - } - - let vote = Lockout::new(next_vote_slot); - - self.pop_expired_votes(next_vote_slot); - - // Once the stack is full, pop the oldest lockout and distribute rewards - if self.votes.len() == MAX_LOCKOUT_HISTORY { - let vote = self.votes.pop_front().unwrap(); - self.root_slot = Some(vote.slot); - - self.increment_credits(epoch, 1); - } - self.votes.push_back(vote); - self.double_lockouts(); - } - - /// increment credits, record credits for last epoch if new epoch - pub fn increment_credits(&mut self, epoch: Epoch, credits: u64) { - // increment credits, record by epoch - - // never seen a credit - if self.epoch_credits.is_empty() { - self.epoch_credits.push((epoch, 0, 0)); - } else if epoch != self.epoch_credits.last().unwrap().0 { - let (_, credits, prev_credits) = *self.epoch_credits.last().unwrap(); - - if credits != prev_credits { - // if credits were earned previous epoch - // append entry at end of list for the new epoch - self.epoch_credits.push((epoch, credits, credits)); - } else { - // else just move the current epoch - self.epoch_credits.last_mut().unwrap().0 = epoch; - } - - // Remove too old epoch_credits - if self.epoch_credits.len() > MAX_EPOCH_CREDITS_HISTORY { - self.epoch_credits.remove(0); - } - } - - self.epoch_credits.last_mut().unwrap().1 += credits; - } - - /// "unchecked" functions used by tests and Tower - pub fn process_vote_unchecked(&mut self, vote: Vote) { - let slot_hashes: Vec<_> = vote.slots.iter().rev().map(|x| (*x, vote.hash)).collect(); - let _ignored = self.process_vote(&vote, &slot_hashes, self.current_epoch(), None); - } - - #[cfg(test)] - pub fn process_slot_votes_unchecked(&mut self, slots: &[Slot]) { - for slot in slots { - self.process_slot_vote_unchecked(*slot); - } - } - - pub fn process_slot_vote_unchecked(&mut self, slot: Slot) { - self.process_vote_unchecked(Vote::new(vec![slot], Hash::default())); - } - - pub fn nth_recent_vote(&self, position: usize) -> Option<&Lockout> { - if position < self.votes.len() { - let pos = self.votes.len() - 1 - position; - self.votes.get(pos) + // For each finalized slot, there was one voted-on slot in the new vote state that was responsible for + // finalizing it. Each of those votes is awarded 1 credit. + vote_state.increment_credits(epoch, finalized_slot_count); } else { - None + vote_state.increment_credits(epoch, 1); } } - - pub fn last_lockout(&self) -> Option<&Lockout> { - self.votes.back() - } - - pub fn last_voted_slot(&self) -> Option { - self.last_lockout().map(|v| v.slot) - } - - // Upto MAX_LOCKOUT_HISTORY many recent unexpired - // vote slots pushed onto the stack. - pub fn tower(&self) -> Vec { - self.votes.iter().map(|v| v.slot).collect() - } - - pub fn current_epoch(&self) -> Epoch { - if self.epoch_credits.is_empty() { - 0 - } else { - self.epoch_credits.last().unwrap().0 - } + if let Some(timestamp) = timestamp { + let last_slot = new_state.back().unwrap().slot; + vote_state.process_timestamp(last_slot, timestamp)?; } + vote_state.root_slot = new_root; + vote_state.votes = new_state; + Ok(()) +} - /// Number of "credits" owed to this account from the mining pool. Submit this - /// VoteState to the Rewards program to trade credits for lamports. - pub fn credits(&self) -> u64 { - if self.epoch_credits.is_empty() { - 0 +pub fn process_vote( + vote_state: &mut VoteState, + vote: &Vote, + slot_hashes: &[SlotHash], + epoch: Epoch, + feature_set: Option<&FeatureSet>, +) -> Result<(), VoteError> { + if vote.slots.is_empty() { + return Err(VoteError::EmptySlots); + } + let filtered_vote_slots = feature_set.and_then(|feature_set| { + if feature_set.is_active(&filter_votes_outside_slot_hashes::id()) { + let earliest_slot_in_history = + slot_hashes.last().map(|(slot, _hash)| *slot).unwrap_or(0); + Some( + vote.slots + .iter() + .filter(|slot| **slot >= earliest_slot_in_history) + .cloned() + .collect::>(), + ) } else { - self.epoch_credits.last().unwrap().1 - } - } - - /// Number of "credits" owed to this account from the mining pool on a per-epoch basis, - /// starting from credits observed. - /// Each tuple of (Epoch, u64, u64) is read as (epoch, credits, prev_credits), where - /// credits for each epoch is credits - prev_credits; while redundant this makes - /// calculating rewards over partial epochs nice and simple - pub fn epoch_credits(&self) -> &Vec<(Epoch, u64, u64)> { - &self.epoch_credits - } - - fn set_new_authorized_voter( - &mut self, - authorized_pubkey: &Pubkey, - current_epoch: Epoch, - target_epoch: Epoch, - verify: F, - ) -> Result<(), InstructionError> - where - F: Fn(Pubkey) -> Result<(), InstructionError>, - { - let epoch_authorized_voter = self.get_and_update_authorized_voter(current_epoch)?; - verify(epoch_authorized_voter)?; - - // The offset in slots `n` on which the target_epoch - // (default value `DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET`) is - // calculated is the number of slots available from the - // first slot `S` of an epoch in which to set a new voter for - // the epoch at `S` + `n` - if self.authorized_voters.contains(target_epoch) { - return Err(VoteError::TooSoonToReauthorize.into()); - } - - // Get the latest authorized_voter - let (latest_epoch, latest_authorized_pubkey) = self - .authorized_voters - .last() - .ok_or(InstructionError::InvalidAccountData)?; - - // If we're not setting the same pubkey as authorized pubkey again, - // then update the list of prior voters to mark the expiration - // of the old authorized pubkey - if latest_authorized_pubkey != authorized_pubkey { - // Update the epoch ranges of authorized pubkeys that will be expired - let epoch_of_last_authorized_switch = - self.prior_voters.last().map(|range| range.2).unwrap_or(0); - - // target_epoch must: - // 1) Be monotonically increasing due to the clock always - // moving forward - // 2) not be equal to latest epoch otherwise this - // function would have returned TooSoonToReauthorize error - // above - assert!(target_epoch > *latest_epoch); - - // Commit the new state - self.prior_voters.append(( - *latest_authorized_pubkey, - epoch_of_last_authorized_switch, - target_epoch, - )); + None } + }); - self.authorized_voters - .insert(target_epoch, *authorized_pubkey); - - Ok(()) + let vote_slots = filtered_vote_slots.as_ref().unwrap_or(&vote.slots); + if vote_slots.is_empty() { + return Err(VoteError::VotesTooOldAllFiltered); } - fn get_and_update_authorized_voter( - &mut self, - current_epoch: Epoch, - ) -> Result { - let pubkey = self - .authorized_voters - .get_and_cache_authorized_voter_for_epoch(current_epoch) - .ok_or(InstructionError::InvalidAccountData)?; - self.authorized_voters - .purge_authorized_voters(current_epoch); - Ok(pubkey) - } - - // Pop all recent votes that are not locked out at the next vote slot. This - // allows validators to switch forks once their votes for another fork have - // expired. This also allows validators continue voting on recent blocks in - // the same fork without increasing lockouts. - fn pop_expired_votes(&mut self, next_vote_slot: Slot) { - while let Some(vote) = self.last_lockout() { - if !vote.is_locked_out_at_slot(next_vote_slot) { - self.votes.pop_back(); - } else { - break; - } - } - } + check_slots_are_valid(vote_state, vote_slots, &vote.hash, slot_hashes)?; - fn double_lockouts(&mut self) { - let stack_depth = self.votes.len(); - for (i, v) in self.votes.iter_mut().enumerate() { - // Don't increase the lockout for this vote until we get more confirmations - // than the max number of confirmations this vote has seen - if stack_depth > i + v.confirmation_count as usize { - v.confirmation_count += 1; - } - } - } + vote_slots + .iter() + .for_each(|s| vote_state.process_next_vote_slot(*s, epoch)); + Ok(()) +} - pub fn process_timestamp( - &mut self, - slot: Slot, - timestamp: UnixTimestamp, - ) -> Result<(), VoteError> { - if (slot < self.last_timestamp.slot || timestamp < self.last_timestamp.timestamp) - || (slot == self.last_timestamp.slot - && BlockTimestamp { slot, timestamp } != self.last_timestamp - && self.last_timestamp.slot != 0) - { - return Err(VoteError::TimestampTooOld); - } - self.last_timestamp = BlockTimestamp { slot, timestamp }; - Ok(()) - } +/// "unchecked" functions used by tests and Tower +pub fn process_vote_unchecked(vote_state: &mut VoteState, vote: Vote) { + let slot_hashes: Vec<_> = vote.slots.iter().rev().map(|x| (*x, vote.hash)).collect(); + let _ignored = process_vote( + vote_state, + &vote, + &slot_hashes, + vote_state.current_epoch(), + None, + ); +} - pub fn is_correct_size_and_initialized(data: &[u8]) -> bool { - const VERSION_OFFSET: usize = 4; - data.len() == VoteState::size_of() - && data[VERSION_OFFSET..VERSION_OFFSET + DEFAULT_PRIOR_VOTERS_OFFSET] - != [0; DEFAULT_PRIOR_VOTERS_OFFSET] +#[cfg(test)] +pub fn process_slot_votes_unchecked(vote_state: &mut VoteState, slots: &[Slot]) { + for slot in slots { + process_slot_vote_unchecked(vote_state, *slot); } } +pub fn process_slot_vote_unchecked(vote_state: &mut VoteState, slot: Slot) { + process_vote_unchecked(vote_state, Vote::new(vec![slot], Hash::default())); +} + /// Authorize the given pubkey to withdraw or sign votes. This may be called multiple times, /// but will implicitly withdraw authorization from the previously authorized /// key @@ -1642,7 +889,7 @@ fn verify_and_get_vote_state( Ok(vote_state) } -pub fn process_vote( +pub fn process_vote_with_account( vote_account: &mut BorrowedAccount, slot_hashes: &[SlotHash], clock: &Clock, @@ -1652,7 +899,13 @@ pub fn process_vote( ) -> Result<(), InstructionError> { let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; - vote_state.process_vote(vote, slot_hashes, clock.epoch, Some(feature_set))?; + process_vote( + &mut vote_state, + vote, + slot_hashes, + clock.epoch, + Some(feature_set), + )?; if let Some(timestamp) = vote.timestamp { vote.slots .iter() @@ -1672,8 +925,9 @@ pub fn process_vote_state_update( feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; - vote_state.check_update_vote_state_slots_are_valid(&mut vote_state_update, slot_hashes)?; - vote_state.process_new_vote_state( + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, slot_hashes)?; + process_new_vote_state( + &mut vote_state, vote_state_update.lockouts, vote_state_update.root, vote_state_update.timestamp, @@ -1703,7 +957,7 @@ pub fn create_account_with_authorized( ); let versioned = VoteStateVersions::new_current(vote_state); - VoteState::to(&versioned, &mut vote_account).unwrap(); + VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap(); vote_account } @@ -1729,18 +983,16 @@ mod tests { const MAX_RECENT_VOTES: usize = 16; - impl VoteState { - pub fn new_for_test(auth_pubkey: &Pubkey) -> Self { - Self::new( - &VoteInit { - node_pubkey: solana_sdk::pubkey::new_rand(), - authorized_voter: *auth_pubkey, - authorized_withdrawer: *auth_pubkey, - commission: 0, - }, - &Clock::default(), - ) - } + fn vote_state_new_for_test(auth_pubkey: &Pubkey) -> VoteState { + VoteState::new( + &VoteInit { + node_pubkey: solana_sdk::pubkey::new_rand(), + authorized_voter: *auth_pubkey, + authorized_withdrawer: *auth_pubkey, + commission: 0, + }, + &Clock::default(), + ) } fn create_test_account() -> (Pubkey, RefCell) { @@ -1758,38 +1010,6 @@ mod tests { ) } - #[test] - fn test_vote_serialize() { - let mut buffer: Vec = vec![0; VoteState::size_of()]; - let mut vote_state = VoteState::default(); - vote_state - .votes - .resize(MAX_LOCKOUT_HISTORY, Lockout::default()); - vote_state.root_slot = Some(1); - let versioned = VoteStateVersions::new_current(vote_state); - assert!(VoteState::serialize(&versioned, &mut buffer[0..4]).is_err()); - VoteState::serialize(&versioned, &mut buffer).unwrap(); - assert_eq!( - VoteState::deserialize(&buffer).unwrap(), - versioned.convert_to_current() - ); - } - - #[test] - fn test_voter_registration() { - let (vote_pubkey, vote_account) = create_test_account(); - - let vote_state: VoteState = StateMut::::state(&*vote_account.borrow()) - .unwrap() - .convert_to_current(); - assert_eq!(vote_state.authorized_voters.len(), 1); - assert_eq!( - *vote_state.authorized_voters.first().unwrap().1, - vote_pubkey - ); - assert!(vote_state.votes.is_empty()); - } - #[test] fn test_vote_lockout() { let (_vote_pubkey, vote_account) = create_test_account(); @@ -1800,7 +1020,7 @@ mod tests { .convert_to_current(); for i in 0..(MAX_LOCKOUT_HISTORY + 1) { - vote_state.process_slot_vote_unchecked((INITIAL_LOCKOUT as usize * i) as u64); + process_slot_vote_unchecked(&mut vote_state, (INITIAL_LOCKOUT as usize * i) as u64); } // The last vote should have been popped b/c it reached a depth of MAX_LOCKOUT_HISTORY @@ -1812,13 +1032,13 @@ mod tests { // the root_slot should change to the // second vote let top_vote = vote_state.votes.front().unwrap().slot; - vote_state - .process_slot_vote_unchecked(vote_state.last_lockout().unwrap().last_locked_out_slot()); + let slot = vote_state.last_lockout().unwrap().last_locked_out_slot(); + process_slot_vote_unchecked(&mut vote_state, slot); assert_eq!(Some(top_vote), vote_state.root_slot); // Expire everything except the first vote - vote_state - .process_slot_vote_unchecked(vote_state.votes.front().unwrap().last_locked_out_slot()); + let slot = vote_state.votes.front().unwrap().last_locked_out_slot(); + process_slot_vote_unchecked(&mut vote_state, slot); // First vote and new vote are both stored for a total of 2 votes assert_eq!(vote_state.votes.len(), 2); } @@ -1826,10 +1046,10 @@ mod tests { #[test] fn test_vote_double_lockout_after_expiration() { let voter_pubkey = solana_sdk::pubkey::new_rand(); - let mut vote_state = VoteState::new_for_test(&voter_pubkey); + let mut vote_state = vote_state_new_for_test(&voter_pubkey); for i in 0..3 { - vote_state.process_slot_vote_unchecked(i as u64); + process_slot_vote_unchecked(&mut vote_state, i as u64); } check_lockouts(&vote_state); @@ -1837,34 +1057,34 @@ mod tests { // Expire the third vote (which was a vote for slot 2). The height of the // vote stack is unchanged, so none of the previous votes should have // doubled in lockout - vote_state.process_slot_vote_unchecked((2 + INITIAL_LOCKOUT + 1) as u64); + process_slot_vote_unchecked(&mut vote_state, (2 + INITIAL_LOCKOUT + 1) as u64); check_lockouts(&vote_state); // Vote again, this time the vote stack depth increases, so the votes should // double for everybody - vote_state.process_slot_vote_unchecked((2 + INITIAL_LOCKOUT + 2) as u64); + process_slot_vote_unchecked(&mut vote_state, (2 + INITIAL_LOCKOUT + 2) as u64); check_lockouts(&vote_state); // Vote again, this time the vote stack depth increases, so the votes should // double for everybody - vote_state.process_slot_vote_unchecked((2 + INITIAL_LOCKOUT + 3) as u64); + process_slot_vote_unchecked(&mut vote_state, (2 + INITIAL_LOCKOUT + 3) as u64); check_lockouts(&vote_state); } #[test] fn test_expire_multiple_votes() { let voter_pubkey = solana_sdk::pubkey::new_rand(); - let mut vote_state = VoteState::new_for_test(&voter_pubkey); + let mut vote_state = vote_state_new_for_test(&voter_pubkey); for i in 0..3 { - vote_state.process_slot_vote_unchecked(i as u64); + process_slot_vote_unchecked(&mut vote_state, i as u64); } assert_eq!(vote_state.votes[0].confirmation_count, 3); // Expire the second and third votes let expire_slot = vote_state.votes[1].slot + vote_state.votes[1].lockout() + 1; - vote_state.process_slot_vote_unchecked(expire_slot); + process_slot_vote_unchecked(&mut vote_state, expire_slot); assert_eq!(vote_state.votes.len(), 2); // Check that the old votes expired @@ -1872,7 +1092,7 @@ mod tests { assert_eq!(vote_state.votes[1].slot, expire_slot); // Process one more vote - vote_state.process_slot_vote_unchecked(expire_slot + 1); + process_slot_vote_unchecked(&mut vote_state, expire_slot + 1); // Confirmation count for the older first vote should remain unchanged assert_eq!(vote_state.votes[0].confirmation_count, 3); @@ -1885,29 +1105,29 @@ mod tests { #[test] fn test_vote_credits() { let voter_pubkey = solana_sdk::pubkey::new_rand(); - let mut vote_state = VoteState::new_for_test(&voter_pubkey); + let mut vote_state = vote_state_new_for_test(&voter_pubkey); for i in 0..MAX_LOCKOUT_HISTORY { - vote_state.process_slot_vote_unchecked(i as u64); + process_slot_vote_unchecked(&mut vote_state, i as u64); } assert_eq!(vote_state.credits(), 0); - vote_state.process_slot_vote_unchecked(MAX_LOCKOUT_HISTORY as u64 + 1); + process_slot_vote_unchecked(&mut vote_state, MAX_LOCKOUT_HISTORY as u64 + 1); assert_eq!(vote_state.credits(), 1); - vote_state.process_slot_vote_unchecked(MAX_LOCKOUT_HISTORY as u64 + 2); + process_slot_vote_unchecked(&mut vote_state, MAX_LOCKOUT_HISTORY as u64 + 2); assert_eq!(vote_state.credits(), 2); - vote_state.process_slot_vote_unchecked(MAX_LOCKOUT_HISTORY as u64 + 3); + process_slot_vote_unchecked(&mut vote_state, MAX_LOCKOUT_HISTORY as u64 + 3); assert_eq!(vote_state.credits(), 3); } #[test] fn test_duplicate_vote() { let voter_pubkey = solana_sdk::pubkey::new_rand(); - let mut vote_state = VoteState::new_for_test(&voter_pubkey); - vote_state.process_slot_vote_unchecked(0); - vote_state.process_slot_vote_unchecked(1); - vote_state.process_slot_vote_unchecked(0); + let mut vote_state = vote_state_new_for_test(&voter_pubkey); + process_slot_vote_unchecked(&mut vote_state, 0); + process_slot_vote_unchecked(&mut vote_state, 1); + process_slot_vote_unchecked(&mut vote_state, 0); assert_eq!(vote_state.nth_recent_vote(0).unwrap().slot, 1); assert_eq!(vote_state.nth_recent_vote(1).unwrap().slot, 0); assert!(vote_state.nth_recent_vote(2).is_none()); @@ -1916,9 +1136,9 @@ mod tests { #[test] fn test_nth_recent_vote() { let voter_pubkey = solana_sdk::pubkey::new_rand(); - let mut vote_state = VoteState::new_for_test(&voter_pubkey); + let mut vote_state = vote_state_new_for_test(&voter_pubkey); for i in 0..MAX_LOCKOUT_HISTORY { - vote_state.process_slot_vote_unchecked(i as u64); + process_slot_vote_unchecked(&mut vote_state, i as u64); } for i in 0..(MAX_LOCKOUT_HISTORY - 1) { assert_eq!( @@ -1947,12 +1167,12 @@ mod tests { #[test] fn test_process_missed_votes() { let account_a = solana_sdk::pubkey::new_rand(); - let mut vote_state_a = VoteState::new_for_test(&account_a); + let mut vote_state_a = vote_state_new_for_test(&account_a); let account_b = solana_sdk::pubkey::new_rand(); - let mut vote_state_b = VoteState::new_for_test(&account_b); + let mut vote_state_b = vote_state_new_for_test(&account_b); // process some votes on account a - (0..5).for_each(|i| vote_state_a.process_slot_vote_unchecked(i as u64)); + (0..5).for_each(|i| process_slot_vote_unchecked(&mut vote_state_a, i as u64)); assert_ne!(recent_votes(&vote_state_a), recent_votes(&vote_state_b)); // as long as b has missed less than "NUM_RECENT" votes both accounts should be in sync @@ -1961,11 +1181,23 @@ mod tests { let slot_hashes: Vec<_> = vote.slots.iter().rev().map(|x| (*x, vote.hash)).collect(); assert_eq!( - vote_state_a.process_vote(&vote, &slot_hashes, 0, Some(&FeatureSet::default())), + process_vote( + &mut vote_state_a, + &vote, + &slot_hashes, + 0, + Some(&FeatureSet::default()) + ), Ok(()) ); assert_eq!( - vote_state_b.process_vote(&vote, &slot_hashes, 0, Some(&FeatureSet::default())), + process_vote( + &mut vote_state_b, + &vote, + &slot_hashes, + 0, + Some(&FeatureSet::default()) + ), Ok(()) ); assert_eq!(recent_votes(&vote_state_a), recent_votes(&vote_state_b)); @@ -1978,12 +1210,24 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(0, vote.hash)]; assert_eq!( - vote_state.process_vote(&vote, &slot_hashes, 0, Some(&FeatureSet::default())), + process_vote( + &mut vote_state, + &vote, + &slot_hashes, + 0, + Some(&FeatureSet::default()) + ), Ok(()) ); let recent = recent_votes(&vote_state); assert_eq!( - vote_state.process_vote(&vote, &slot_hashes, 0, Some(&FeatureSet::default())), + process_vote( + &mut vote_state, + &vote, + &slot_hashes, + 0, + Some(&FeatureSet::default()) + ), Err(VoteError::VoteTooOld) ); assert_eq!(recent, recent_votes(&vote_state)); @@ -1995,7 +1239,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); assert_eq!( - vote_state.check_slots_are_valid(&vote.slots, &vote.hash, &[]), + check_slots_are_valid(&vote_state, &vote.slots, &vote.hash, &[]), Err(VoteError::VoteTooOld) ); } @@ -2007,7 +1251,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - vote_state.check_slots_are_valid(&vote.slots, &vote.hash, &slot_hashes), + check_slots_are_valid(&vote_state, &vote.slots, &vote.hash, &slot_hashes), Ok(()) ); } @@ -2019,7 +1263,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), hash(vote.hash.as_ref()))]; assert_eq!( - vote_state.check_slots_are_valid(&vote.slots, &vote.hash, &slot_hashes), + check_slots_are_valid(&vote_state, &vote.slots, &vote.hash, &slot_hashes), Err(VoteError::SlotHashMismatch) ); } @@ -2031,7 +1275,7 @@ mod tests { let vote = Vote::new(vec![1], Hash::default()); let slot_hashes: Vec<_> = vec![(0, vote.hash)]; assert_eq!( - vote_state.check_slots_are_valid(&vote.slots, &vote.hash, &slot_hashes), + check_slots_are_valid(&vote_state, &vote.slots, &vote.hash, &slot_hashes), Err(VoteError::SlotsMismatch) ); } @@ -2043,550 +1287,160 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - vote_state.process_vote(&vote, &slot_hashes, 0, Some(&FeatureSet::default())), + process_vote( + &mut vote_state, + &vote, + &slot_hashes, + 0, + Some(&FeatureSet::default()) + ), Ok(()) ); assert_eq!( - vote_state.check_slots_are_valid(&vote.slots, &vote.hash, &slot_hashes), + check_slots_are_valid(&vote_state, &vote.slots, &vote.hash, &slot_hashes), Err(VoteError::VoteTooOld) ); } #[test] fn test_check_slots_are_valid_next_vote() { - let mut vote_state = VoteState::default(); - - let vote = Vote::new(vec![0], Hash::default()); - let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; - assert_eq!( - vote_state.process_vote(&vote, &slot_hashes, 0, Some(&FeatureSet::default())), - Ok(()) - ); - - let vote = Vote::new(vec![0, 1], Hash::default()); - let slot_hashes: Vec<_> = vec![(1, vote.hash), (0, vote.hash)]; - assert_eq!( - vote_state.check_slots_are_valid(&vote.slots, &vote.hash, &slot_hashes), - Ok(()) - ); - } - - #[test] - fn test_check_slots_are_valid_next_vote_only() { - let mut vote_state = VoteState::default(); - - let vote = Vote::new(vec![0], Hash::default()); - let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; - assert_eq!( - vote_state.process_vote(&vote, &slot_hashes, 0, Some(&FeatureSet::default())), - Ok(()) - ); - - let vote = Vote::new(vec![1], Hash::default()); - let slot_hashes: Vec<_> = vec![(1, vote.hash), (0, vote.hash)]; - assert_eq!( - vote_state.check_slots_are_valid(&vote.slots, &vote.hash, &slot_hashes), - Ok(()) - ); - } - #[test] - fn test_process_vote_empty_slots() { - let mut vote_state = VoteState::default(); - - let vote = Vote::new(vec![], Hash::default()); - assert_eq!( - vote_state.process_vote(&vote, &[], 0, Some(&FeatureSet::default())), - Err(VoteError::EmptySlots) - ); - } - - #[test] - fn test_vote_state_commission_split() { - let vote_state = VoteState::default(); - - assert_eq!(vote_state.commission_split(1), (0, 1, false)); - - let mut vote_state = VoteState { - commission: std::u8::MAX, - ..VoteState::default() - }; - assert_eq!(vote_state.commission_split(1), (1, 0, false)); - - vote_state.commission = 99; - assert_eq!(vote_state.commission_split(10), (9, 0, true)); - - vote_state.commission = 1; - assert_eq!(vote_state.commission_split(10), (0, 9, true)); - - vote_state.commission = 50; - let (voter_portion, staker_portion, was_split) = vote_state.commission_split(10); - - assert_eq!((voter_portion, staker_portion, was_split), (5, 5, true)); - } - - #[test] - fn test_vote_state_epoch_credits() { - let mut vote_state = VoteState::default(); - - assert_eq!(vote_state.credits(), 0); - assert_eq!(vote_state.epoch_credits().clone(), vec![]); - - let mut expected = vec![]; - let mut credits = 0; - let epochs = (MAX_EPOCH_CREDITS_HISTORY + 2) as u64; - for epoch in 0..epochs { - for _j in 0..epoch { - vote_state.increment_credits(epoch, 1); - credits += 1; - } - expected.push((epoch, credits, credits - epoch)); - } - - while expected.len() > MAX_EPOCH_CREDITS_HISTORY { - expected.remove(0); - } - - assert_eq!(vote_state.credits(), credits); - assert_eq!(vote_state.epoch_credits().clone(), expected); - } - - #[test] - fn test_vote_state_epoch0_no_credits() { - let mut vote_state = VoteState::default(); - - assert_eq!(vote_state.epoch_credits().len(), 0); - vote_state.increment_credits(1, 1); - assert_eq!(vote_state.epoch_credits().len(), 1); - - vote_state.increment_credits(2, 1); - assert_eq!(vote_state.epoch_credits().len(), 2); - } - - #[test] - fn test_vote_state_increment_credits() { - let mut vote_state = VoteState::default(); - - let credits = (MAX_EPOCH_CREDITS_HISTORY + 2) as u64; - for i in 0..credits { - vote_state.increment_credits(i as u64, 1); - } - assert_eq!(vote_state.credits(), credits); - assert!(vote_state.epoch_credits().len() <= MAX_EPOCH_CREDITS_HISTORY); - } - - // Test vote credit updates after "one credit per slot" feature is enabled - #[test] - fn test_vote_state_update_increment_credits() { - // Create a new Votestate - let mut vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); - - // Test data: a sequence of groups of votes to simulate having been cast, after each group a vote - // state update is compared to "normal" vote processing to ensure that credits are earned equally - let test_vote_groups: Vec> = vec![ - // Initial set of votes that don't dequeue any slots, so no credits earned - vec![1, 2, 3, 4, 5, 6, 7, 8], - vec![ - 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, - ], - // Now a single vote which should result in the first root and first credit earned - vec![32], - // Now another vote, should earn one credit - vec![33], - // Two votes in sequence - vec![34, 35], - // 3 votes in sequence - vec![36, 37, 38], - // 30 votes in sequence - vec![ - 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, - ], - // 31 votes in sequence - vec![ - 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, - 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, - ], - // Votes with expiry - vec![100, 101, 106, 107, 112, 116, 120, 121, 122, 124], - // More votes with expiry of a large number of votes - vec![200, 201], - vec![ - 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 221, 222, 223, 224, 225, 226, - ], - vec![227, 228, 229, 230, 231, 232, 233, 234, 235, 236], - ]; - - let mut feature_set = FeatureSet::default(); - feature_set.activate(&feature_set::vote_state_update_credit_per_dequeue::id(), 1); - - for vote_group in test_vote_groups { - // Duplicate vote_state so that the new vote can be applied - let mut vote_state_after_vote = vote_state.clone(); - - vote_state_after_vote.process_vote_unchecked(Vote { - slots: vote_group.clone(), - hash: Hash::new_unique(), - timestamp: None, - }); - - // Now use the resulting new vote state to perform a vote state update on vote_state - assert_eq!( - vote_state.process_new_vote_state( - vote_state_after_vote.votes, - vote_state_after_vote.root_slot, - None, - 0, - Some(&feature_set) - ), - Ok(()) - ); - - // And ensure that the credits earned were the same - assert_eq!( - vote_state.epoch_credits, - vote_state_after_vote.epoch_credits - ); - } - } - - #[test] - fn test_vote_process_timestamp() { - let (slot, timestamp) = (15, 1_575_412_285); - let mut vote_state = VoteState { - last_timestamp: BlockTimestamp { slot, timestamp }, - ..VoteState::default() - }; - - assert_eq!( - vote_state.process_timestamp(slot - 1, timestamp + 1), - Err(VoteError::TimestampTooOld) - ); - assert_eq!( - vote_state.last_timestamp, - BlockTimestamp { slot, timestamp } - ); - assert_eq!( - vote_state.process_timestamp(slot + 1, timestamp - 1), - Err(VoteError::TimestampTooOld) - ); - assert_eq!( - vote_state.process_timestamp(slot, timestamp + 1), - Err(VoteError::TimestampTooOld) - ); - assert_eq!(vote_state.process_timestamp(slot, timestamp), Ok(())); - assert_eq!( - vote_state.last_timestamp, - BlockTimestamp { slot, timestamp } - ); - assert_eq!(vote_state.process_timestamp(slot + 1, timestamp), Ok(())); - assert_eq!( - vote_state.last_timestamp, - BlockTimestamp { - slot: slot + 1, - timestamp - } - ); - assert_eq!( - vote_state.process_timestamp(slot + 2, timestamp + 1), - Ok(()) - ); - assert_eq!( - vote_state.last_timestamp, - BlockTimestamp { - slot: slot + 2, - timestamp: timestamp + 1 - } - ); - - // Test initial vote - vote_state.last_timestamp = BlockTimestamp::default(); - assert_eq!(vote_state.process_timestamp(0, timestamp), Ok(())); - } - - #[test] - fn test_get_and_update_authorized_voter() { - let original_voter = solana_sdk::pubkey::new_rand(); - let mut vote_state = VoteState::new( - &VoteInit { - node_pubkey: original_voter, - authorized_voter: original_voter, - authorized_withdrawer: original_voter, - commission: 0, - }, - &Clock::default(), - ); - - // If no new authorized voter was set, the same authorized voter - // is locked into the next epoch - assert_eq!( - vote_state.get_and_update_authorized_voter(1).unwrap(), - original_voter - ); - - // Try to get the authorized voter for epoch 5, implies - // the authorized voter for epochs 1-4 were unchanged - assert_eq!( - vote_state.get_and_update_authorized_voter(5).unwrap(), - original_voter - ); - - // Authorized voter for expired epoch 0..5 should have been - // purged and no longer queryable - assert_eq!(vote_state.authorized_voters.len(), 1); - for i in 0..5 { - assert!(vote_state - .authorized_voters - .get_authorized_voter(i) - .is_none()); - } - - // Set an authorized voter change at slot 7 - let new_authorized_voter = solana_sdk::pubkey::new_rand(); - vote_state - .set_new_authorized_voter(&new_authorized_voter, 5, 7, |_| Ok(())) - .unwrap(); - - // Try to get the authorized voter for epoch 6, unchanged - assert_eq!( - vote_state.get_and_update_authorized_voter(6).unwrap(), - original_voter - ); - - // Try to get the authorized voter for epoch 7 and onwards, should - // be the new authorized voter - for i in 7..10 { - assert_eq!( - vote_state.get_and_update_authorized_voter(i).unwrap(), - new_authorized_voter - ); - } - assert_eq!(vote_state.authorized_voters.len(), 1); - } - - #[test] - fn test_set_new_authorized_voter() { - let original_voter = solana_sdk::pubkey::new_rand(); - let epoch_offset = 15; - let mut vote_state = VoteState::new( - &VoteInit { - node_pubkey: original_voter, - authorized_voter: original_voter, - authorized_withdrawer: original_voter, - commission: 0, - }, - &Clock::default(), - ); - - assert!(vote_state.prior_voters.last().is_none()); - - let new_voter = solana_sdk::pubkey::new_rand(); - // Set a new authorized voter - vote_state - .set_new_authorized_voter(&new_voter, 0, epoch_offset, |_| Ok(())) - .unwrap(); - - assert_eq!(vote_state.prior_voters.idx, 0); - assert_eq!( - vote_state.prior_voters.last(), - Some(&(original_voter, 0, epoch_offset)) - ); - - // Trying to set authorized voter for same epoch again should fail - assert_eq!( - vote_state.set_new_authorized_voter(&new_voter, 0, epoch_offset, |_| Ok(())), - Err(VoteError::TooSoonToReauthorize.into()) - ); - - // Setting the same authorized voter again should succeed - vote_state - .set_new_authorized_voter(&new_voter, 2, 2 + epoch_offset, |_| Ok(())) - .unwrap(); + let mut vote_state = VoteState::default(); - // Set a third and fourth authorized voter - let new_voter2 = solana_sdk::pubkey::new_rand(); - vote_state - .set_new_authorized_voter(&new_voter2, 3, 3 + epoch_offset, |_| Ok(())) - .unwrap(); - assert_eq!(vote_state.prior_voters.idx, 1); + let vote = Vote::new(vec![0], Hash::default()); + let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - vote_state.prior_voters.last(), - Some(&(new_voter, epoch_offset, 3 + epoch_offset)) + process_vote( + &mut vote_state, + &vote, + &slot_hashes, + 0, + Some(&FeatureSet::default()) + ), + Ok(()) ); - let new_voter3 = solana_sdk::pubkey::new_rand(); - vote_state - .set_new_authorized_voter(&new_voter3, 6, 6 + epoch_offset, |_| Ok(())) - .unwrap(); - assert_eq!(vote_state.prior_voters.idx, 2); + let vote = Vote::new(vec![0, 1], Hash::default()); + let slot_hashes: Vec<_> = vec![(1, vote.hash), (0, vote.hash)]; assert_eq!( - vote_state.prior_voters.last(), - Some(&(new_voter2, 3 + epoch_offset, 6 + epoch_offset)) + check_slots_are_valid(&vote_state, &vote.slots, &vote.hash, &slot_hashes), + Ok(()) ); - - // Check can set back to original voter - vote_state - .set_new_authorized_voter(&original_voter, 9, 9 + epoch_offset, |_| Ok(())) - .unwrap(); - - // Run with these voters for a while, check the ranges of authorized - // voters is correct - for i in 9..epoch_offset { - assert_eq!( - vote_state.get_and_update_authorized_voter(i).unwrap(), - original_voter - ); - } - for i in epoch_offset..3 + epoch_offset { - assert_eq!( - vote_state.get_and_update_authorized_voter(i).unwrap(), - new_voter - ); - } - for i in 3 + epoch_offset..6 + epoch_offset { - assert_eq!( - vote_state.get_and_update_authorized_voter(i).unwrap(), - new_voter2 - ); - } - for i in 6 + epoch_offset..9 + epoch_offset { - assert_eq!( - vote_state.get_and_update_authorized_voter(i).unwrap(), - new_voter3 - ); - } - for i in 9 + epoch_offset..=10 + epoch_offset { - assert_eq!( - vote_state.get_and_update_authorized_voter(i).unwrap(), - original_voter - ); - } } #[test] - fn test_authorized_voter_is_locked_within_epoch() { - let original_voter = solana_sdk::pubkey::new_rand(); - let mut vote_state = VoteState::new( - &VoteInit { - node_pubkey: original_voter, - authorized_voter: original_voter, - authorized_withdrawer: original_voter, - commission: 0, - }, - &Clock::default(), - ); + fn test_check_slots_are_valid_next_vote_only() { + let mut vote_state = VoteState::default(); - // Test that it's not possible to set a new authorized - // voter within the same epoch, even if none has been - // explicitly set before - let new_voter = solana_sdk::pubkey::new_rand(); + let vote = Vote::new(vec![0], Hash::default()); + let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - vote_state.set_new_authorized_voter(&new_voter, 1, 1, |_| Ok(())), - Err(VoteError::TooSoonToReauthorize.into()) + process_vote( + &mut vote_state, + &vote, + &slot_hashes, + 0, + Some(&FeatureSet::default()) + ), + Ok(()) ); - assert_eq!(vote_state.get_authorized_voter(1), Some(original_voter)); - - // Set a new authorized voter for a future epoch + let vote = Vote::new(vec![1], Hash::default()); + let slot_hashes: Vec<_> = vec![(1, vote.hash), (0, vote.hash)]; assert_eq!( - vote_state.set_new_authorized_voter(&new_voter, 1, 2, |_| Ok(())), + check_slots_are_valid(&vote_state, &vote.slots, &vote.hash, &slot_hashes), Ok(()) ); + } + #[test] + fn test_process_vote_empty_slots() { + let mut vote_state = VoteState::default(); - // Test that it's not possible to set a new authorized - // voter within the same epoch, even if none has been - // explicitly set before + let vote = Vote::new(vec![], Hash::default()); assert_eq!( - vote_state.set_new_authorized_voter(&original_voter, 3, 3, |_| Ok(())), - Err(VoteError::TooSoonToReauthorize.into()) + process_vote(&mut vote_state, &vote, &[], 0, Some(&FeatureSet::default())), + Err(VoteError::EmptySlots) ); - - assert_eq!(vote_state.get_authorized_voter(3), Some(new_voter)); } + // Test vote credit updates after "one credit per slot" feature is enabled #[test] - fn test_vote_state_size_of() { - let vote_state = VoteState::get_max_sized_vote_state(); - let vote_state = VoteStateVersions::new_current(vote_state); - let size = bincode::serialized_size(&vote_state).unwrap(); - assert_eq!(VoteState::size_of() as u64, size); - } + fn test_vote_state_update_increment_credits() { + // Create a new Votestate + let mut vote_state = VoteState::new(&VoteInit::default(), &Clock::default()); - #[test] - fn test_vote_state_max_size() { - let mut max_sized_data = vec![0; VoteState::size_of()]; - let vote_state = VoteState::get_max_sized_vote_state(); - let (start_leader_schedule_epoch, _) = vote_state.authorized_voters.last().unwrap(); - let start_current_epoch = - start_leader_schedule_epoch - MAX_LEADER_SCHEDULE_EPOCH_OFFSET + 1; - - let mut vote_state = Some(vote_state); - for i in start_current_epoch..start_current_epoch + 2 * MAX_LEADER_SCHEDULE_EPOCH_OFFSET { - vote_state.as_mut().map(|vote_state| { - vote_state.set_new_authorized_voter( - &solana_sdk::pubkey::new_rand(), - i, - i + MAX_LEADER_SCHEDULE_EPOCH_OFFSET, - |_| Ok(()), - ) - }); - - let versioned = VoteStateVersions::new_current(vote_state.take().unwrap()); - VoteState::serialize(&versioned, &mut max_sized_data).unwrap(); - vote_state = Some(versioned.convert_to_current()); - } - } + // Test data: a sequence of groups of votes to simulate having been cast, after each group a vote + // state update is compared to "normal" vote processing to ensure that credits are earned equally + let test_vote_groups: Vec> = vec![ + // Initial set of votes that don't dequeue any slots, so no credits earned + vec![1, 2, 3, 4, 5, 6, 7, 8], + vec![ + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, + ], + // Now a single vote which should result in the first root and first credit earned + vec![32], + // Now another vote, should earn one credit + vec![33], + // Two votes in sequence + vec![34, 35], + // 3 votes in sequence + vec![36, 37, 38], + // 30 votes in sequence + vec![ + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, + ], + // 31 votes in sequence + vec![ + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + ], + // Votes with expiry + vec![100, 101, 106, 107, 112, 116, 120, 121, 122, 124], + // More votes with expiry of a large number of votes + vec![200, 201], + vec![ + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, + ], + vec![227, 228, 229, 230, 231, 232, 233, 234, 235, 236], + ]; - #[test] - fn test_default_vote_state_is_uninitialized() { - // The default `VoteState` is stored to de-initialize a zero-balance vote account, - // so must remain such that `VoteStateVersions::is_uninitialized()` returns true - // when called on a `VoteStateVersions` that stores it - assert!(VoteStateVersions::new_current(VoteState::default()).is_uninitialized()); - } + let mut feature_set = FeatureSet::default(); + feature_set.activate(&feature_set::vote_state_update_credit_per_dequeue::id(), 1); - #[test] - fn test_is_correct_size_and_initialized() { - // Check all zeroes - let mut vote_account_data = vec![0; VoteState::size_of()]; - assert!(!VoteState::is_correct_size_and_initialized( - &vote_account_data - )); - - // Check default VoteState - let default_account_state = VoteStateVersions::new_current(VoteState::default()); - VoteState::serialize(&default_account_state, &mut vote_account_data).unwrap(); - assert!(!VoteState::is_correct_size_and_initialized( - &vote_account_data - )); - - // Check non-zero data shorter than offset index used - let short_data = vec![1; DEFAULT_PRIOR_VOTERS_OFFSET]; - assert!(!VoteState::is_correct_size_and_initialized(&short_data)); - - // Check non-zero large account - let mut large_vote_data = vec![1; 2 * VoteState::size_of()]; - let default_account_state = VoteStateVersions::new_current(VoteState::default()); - VoteState::serialize(&default_account_state, &mut large_vote_data).unwrap(); - assert!(!VoteState::is_correct_size_and_initialized( - &vote_account_data - )); - - // Check populated VoteState - let account_state = VoteStateVersions::new_current(VoteState::new( - &VoteInit { - node_pubkey: Pubkey::new_unique(), - authorized_voter: Pubkey::new_unique(), - authorized_withdrawer: Pubkey::new_unique(), - commission: 0, - }, - &Clock::default(), - )); - VoteState::serialize(&account_state, &mut vote_account_data).unwrap(); - assert!(VoteState::is_correct_size_and_initialized( - &vote_account_data - )); + for vote_group in test_vote_groups { + // Duplicate vote_state so that the new vote can be applied + let mut vote_state_after_vote = vote_state.clone(); + + process_vote_unchecked( + &mut vote_state_after_vote, + Vote { + slots: vote_group.clone(), + hash: Hash::new_unique(), + timestamp: None, + }, + ); + + // Now use the resulting new vote state to perform a vote state update on vote_state + assert_eq!( + process_new_vote_state( + &mut vote_state, + vote_state_after_vote.votes, + vote_state_after_vote.root_slot, + None, + 0, + Some(&feature_set) + ), + Ok(()) + ); + + // And ensure that the credits earned were the same + assert_eq!( + vote_state.epoch_credits, + vote_state_after_vote.epoch_credits + ); + } } #[test] @@ -2599,14 +1453,9 @@ mod tests { }) .collect(); + let current_epoch = vote_state1.current_epoch(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None, - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None,), Err(VoteError::TooManyVotes) ); } @@ -2615,24 +1464,26 @@ mod tests { fn test_process_new_vote_state_root_rollback() { let mut vote_state1 = VoteState::default(); for i in 0..MAX_LOCKOUT_HISTORY + 2 { - vote_state1.process_slot_vote_unchecked(i as Slot); + process_slot_vote_unchecked(&mut vote_state1, i as Slot); } assert_eq!(vote_state1.root_slot.unwrap(), 1); // Update vote_state2 with a higher slot so that `process_new_vote_state` // doesn't panic. let mut vote_state2 = vote_state1.clone(); - vote_state2.process_slot_vote_unchecked(MAX_LOCKOUT_HISTORY as Slot + 3); + process_slot_vote_unchecked(&mut vote_state2, MAX_LOCKOUT_HISTORY as Slot + 3); // Trying to set a lesser root should error let lesser_root = Some(0); + let current_epoch = vote_state2.current_epoch(); assert_eq!( - vote_state1.process_new_vote_state( + process_new_vote_state( + &mut vote_state1, vote_state2.votes.clone(), lesser_root, None, - vote_state2.current_epoch(), + current_epoch, None, ), Err(VoteError::RootRollBack) @@ -2641,11 +1492,12 @@ mod tests { // Trying to set root to None should error let none_root = None; assert_eq!( - vote_state1.process_new_vote_state( + process_new_vote_state( + &mut vote_state1, vote_state2.votes.clone(), none_root, None, - vote_state2.current_epoch(), + current_epoch, None, ), Err(VoteError::RootRollBack) @@ -2655,6 +1507,7 @@ mod tests { #[test] fn test_process_new_vote_state_zero_confirmations() { let mut vote_state1 = VoteState::default(); + let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ Lockout { @@ -2669,13 +1522,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None, - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None,), Err(VoteError::ZeroConfirmations) ); @@ -2692,13 +1539,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None, - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None,), Err(VoteError::ZeroConfirmations) ); } @@ -2706,6 +1547,7 @@ mod tests { #[test] fn test_process_new_vote_state_confirmations_too_large() { let mut vote_state1 = VoteState::default(); + let current_epoch = vote_state1.current_epoch(); let good_votes: VecDeque = vec![Lockout { slot: 0, @@ -2714,9 +1556,15 @@ mod tests { .into_iter() .collect(); - vote_state1 - .process_new_vote_state(good_votes, None, None, vote_state1.current_epoch(), None) - .unwrap(); + process_new_vote_state( + &mut vote_state1, + good_votes, + None, + None, + current_epoch, + None, + ) + .unwrap(); let mut vote_state1 = VoteState::default(); let bad_votes: VecDeque = vec![Lockout { @@ -2726,13 +1574,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), Err(VoteError::ConfirmationTooLarge) ); } @@ -2740,6 +1582,7 @@ mod tests { #[test] fn test_process_new_vote_state_slot_smaller_than_root() { let mut vote_state1 = VoteState::default(); + let current_epoch = vote_state1.current_epoch(); let root_slot = 5; let bad_votes: VecDeque = vec![ @@ -2755,11 +1598,12 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( + process_new_vote_state( + &mut vote_state1, bad_votes, Some(root_slot), None, - vote_state1.current_epoch(), + current_epoch, None, ), Err(VoteError::SlotSmallerThanRoot) @@ -2778,11 +1622,12 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( + process_new_vote_state( + &mut vote_state1, bad_votes, Some(root_slot), None, - vote_state1.current_epoch(), + current_epoch, None, ), Err(VoteError::SlotSmallerThanRoot) @@ -2792,6 +1637,7 @@ mod tests { #[test] fn test_process_new_vote_state_slots_not_ordered() { let mut vote_state1 = VoteState::default(); + let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ Lockout { @@ -2806,13 +1652,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), Err(VoteError::SlotsNotOrdered) ); @@ -2829,13 +1669,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), Err(VoteError::SlotsNotOrdered) ); } @@ -2843,6 +1677,7 @@ mod tests { #[test] fn test_process_new_vote_state_confirmations_not_ordered() { let mut vote_state1 = VoteState::default(); + let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ Lockout { @@ -2857,13 +1692,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), Err(VoteError::ConfirmationsNotOrdered) ); @@ -2880,13 +1709,7 @@ mod tests { .into_iter() .collect(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None), Err(VoteError::ConfirmationsNotOrdered) ); } @@ -2894,6 +1717,7 @@ mod tests { #[test] fn test_process_new_vote_state_new_vote_state_lockout_mismatch() { let mut vote_state1 = VoteState::default(); + let current_epoch = vote_state1.current_epoch(); let bad_votes: VecDeque = vec![ Lockout { @@ -2910,13 +1734,7 @@ mod tests { // Slot 7 should have expired slot 0 assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - None, - None, - vote_state1.current_epoch(), - None, - ), + process_new_vote_state(&mut vote_state1, bad_votes, None, None, current_epoch, None,), Err(VoteError::NewVoteStateLockoutMismatch) ); } @@ -2924,6 +1742,7 @@ mod tests { #[test] fn test_process_new_vote_state_confirmation_rollback() { let mut vote_state1 = VoteState::default(); + let current_epoch = vote_state1.current_epoch(); let votes: VecDeque = vec![ Lockout { slot: 0, @@ -2936,9 +1755,7 @@ mod tests { ] .into_iter() .collect(); - vote_state1 - .process_new_vote_state(votes, None, None, vote_state1.current_epoch(), None) - .unwrap(); + process_new_vote_state(&mut vote_state1, votes, None, None, current_epoch, None).unwrap(); let votes: VecDeque = vec![ Lockout { @@ -2960,13 +1777,7 @@ mod tests { // Should error because newer vote state should not have lower confirmation the same slot // 1 assert_eq!( - vote_state1.process_new_vote_state( - votes, - None, - None, - vote_state1.current_epoch(), - None - ), + process_new_vote_state(&mut vote_state1, votes, None, None, current_epoch, None), Err(VoteError::ConfirmationRollBack) ); } @@ -2975,7 +1786,7 @@ mod tests { fn test_process_new_vote_state_root_progress() { let mut vote_state1 = VoteState::default(); for i in 0..MAX_LOCKOUT_HISTORY { - vote_state1.process_slot_vote_unchecked(i as u64); + process_slot_vote_unchecked(&mut vote_state1, i as u64); } assert!(vote_state1.root_slot.is_none()); @@ -2988,18 +1799,18 @@ mod tests { // to `vote_state2`, which has a newer root, which // should succeed. for new_vote in MAX_LOCKOUT_HISTORY + 1..=MAX_LOCKOUT_HISTORY + 2 { - vote_state2.process_slot_vote_unchecked(new_vote as Slot); + process_slot_vote_unchecked(&mut vote_state2, new_vote as Slot); assert_ne!(vote_state1.root_slot, vote_state2.root_slot); - vote_state1 - .process_new_vote_state( - vote_state2.votes.clone(), - vote_state2.root_slot, - None, - vote_state2.current_epoch(), - None, - ) - .unwrap(); + process_new_vote_state( + &mut vote_state1, + vote_state2.votes.clone(), + vote_state2.root_slot, + None, + vote_state2.current_epoch(), + None, + ) + .unwrap(); assert_eq!(vote_state1, vote_state2); } @@ -3026,7 +1837,7 @@ mod tests { // Construct on-chain vote state let mut vote_state1 = VoteState::default(); - vote_state1.process_slot_votes_unchecked(&[1, 2, 5]); + process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 5]); assert_eq!( vote_state1 .votes @@ -3038,7 +1849,7 @@ mod tests { // Construct local tower state let mut vote_state2 = VoteState::default(); - vote_state2.process_slot_votes_unchecked(&[1, 2, 3, 5, 7]); + process_slot_votes_unchecked(&mut vote_state2, &[1, 2, 3, 5, 7]); assert_eq!( vote_state2 .votes @@ -3049,15 +1860,15 @@ mod tests { ); // See that on-chain vote state can update properly - vote_state1 - .process_new_vote_state( - vote_state2.votes.clone(), - vote_state2.root_slot, - None, - vote_state2.current_epoch(), - None, - ) - .unwrap(); + process_new_vote_state( + &mut vote_state1, + vote_state2.votes.clone(), + vote_state2.root_slot, + None, + vote_state2.current_epoch(), + None, + ) + .unwrap(); assert_eq!(vote_state1, vote_state2); } @@ -3066,7 +1877,7 @@ mod tests { fn test_process_new_vote_state_lockout_violation() { // Construct on-chain vote state let mut vote_state1 = VoteState::default(); - vote_state1.process_slot_votes_unchecked(&[1, 2, 4, 5]); + process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 4, 5]); assert_eq!( vote_state1 .votes @@ -3079,7 +1890,7 @@ mod tests { // Construct conflicting tower state. Vote 4 is missing, // but 5 should not have popped off vote 4. let mut vote_state2 = VoteState::default(); - vote_state2.process_slot_votes_unchecked(&[1, 2, 3, 5, 7]); + process_slot_votes_unchecked(&mut vote_state2, &[1, 2, 3, 5, 7]); assert_eq!( vote_state2 .votes @@ -3091,7 +1902,8 @@ mod tests { // See that on-chain vote state can update properly assert_eq!( - vote_state1.process_new_vote_state( + process_new_vote_state( + &mut vote_state1, vote_state2.votes.clone(), vote_state2.root_slot, None, @@ -3106,7 +1918,7 @@ mod tests { fn test_process_new_vote_state_lockout_violation2() { // Construct on-chain vote state let mut vote_state1 = VoteState::default(); - vote_state1.process_slot_votes_unchecked(&[1, 2, 5, 6, 7]); + process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 5, 6, 7]); assert_eq!( vote_state1 .votes @@ -3119,7 +1931,7 @@ mod tests { // Construct a new vote state. Violates on-chain state because 8 // should not have popped off 7 let mut vote_state2 = VoteState::default(); - vote_state2.process_slot_votes_unchecked(&[1, 2, 3, 5, 6, 8]); + process_slot_votes_unchecked(&mut vote_state2, &[1, 2, 3, 5, 6, 8]); assert_eq!( vote_state2 .votes @@ -3132,7 +1944,8 @@ mod tests { // Both vote states contain `5`, but `5` is not part of the common prefix // of both vote states. However, the violation should still be detected. assert_eq!( - vote_state1.process_new_vote_state( + process_new_vote_state( + &mut vote_state1, vote_state2.votes.clone(), vote_state2.root_slot, None, @@ -3147,7 +1960,7 @@ mod tests { fn test_process_new_vote_state_expired_ancestor_not_removed() { // Construct on-chain vote state let mut vote_state1 = VoteState::default(); - vote_state1.process_slot_votes_unchecked(&[1, 2, 3, 9]); + process_slot_votes_unchecked(&mut vote_state1, &[1, 2, 3, 9]); assert_eq!( vote_state1 .votes @@ -3160,7 +1973,7 @@ mod tests { // Example: {1: lockout 8, 9: lockout 2}, vote on 10 will not pop off 1 // because 9 is not popped off yet let mut vote_state2 = vote_state1.clone(); - vote_state2.process_slot_vote_unchecked(10); + process_slot_vote_unchecked(&mut vote_state2, 10); // Slot 1 has been expired by 10, but is kept alive by its descendant // 9 which has not been expired yet. @@ -3176,22 +1989,22 @@ mod tests { ); // Should be able to update vote_state1 - vote_state1 - .process_new_vote_state( - vote_state2.votes.clone(), - vote_state2.root_slot, - None, - vote_state2.current_epoch(), - None, - ) - .unwrap(); + process_new_vote_state( + &mut vote_state1, + vote_state2.votes.clone(), + vote_state2.root_slot, + None, + vote_state2.current_epoch(), + None, + ) + .unwrap(); assert_eq!(vote_state1, vote_state2,); } #[test] fn test_process_new_vote_current_state_contains_bigger_slots() { let mut vote_state1 = VoteState::default(); - vote_state1.process_slot_votes_unchecked(&[6, 7, 8]); + process_slot_votes_unchecked(&mut vote_state1, &[6, 7, 8]); assert_eq!( vote_state1 .votes @@ -3217,14 +2030,9 @@ mod tests { .collect(); let root = Some(1); + let current_epoch = vote_state1.current_epoch(); assert_eq!( - vote_state1.process_new_vote_state( - bad_votes, - root, - None, - vote_state1.current_epoch(), - None - ), + process_new_vote_state(&mut vote_state1, bad_votes, root, None, current_epoch, None), Err(VoteError::LockoutConflict) ); @@ -3241,15 +2049,16 @@ mod tests { .into_iter() .collect(); - vote_state1 - .process_new_vote_state( - good_votes.clone(), - root, - None, - vote_state1.current_epoch(), - None, - ) - .unwrap(); + let current_epoch = vote_state1.current_epoch(); + process_new_vote_state( + &mut vote_state1, + good_votes.clone(), + root, + None, + current_epoch, + None, + ) + .unwrap(); assert_eq!(vote_state1.votes, good_votes); } @@ -3267,7 +2076,7 @@ mod tests { // error with `VotesTooOldAllFiltered` let slot_hashes = vec![(3, Hash::new_unique()), (2, Hash::new_unique())]; assert_eq!( - vote_state.process_vote(&vote, &slot_hashes, 0, Some(&feature_set),), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, Some(&feature_set),), Err(VoteError::VotesTooOldAllFiltered) ); @@ -3281,9 +2090,7 @@ mod tests { .1; let vote = Vote::new(vec![old_vote_slot, vote_slot], vote_slot_hash); - vote_state - .process_vote(&vote, &slot_hashes, 0, Some(&feature_set)) - .unwrap(); + process_vote(&mut vote_state, &vote, &slot_hashes, 0, Some(&feature_set)).unwrap(); assert_eq!( vote_state.votes.into_iter().collect::>(), vec![Lockout { @@ -3310,9 +2117,14 @@ mod tests { .find(|(slot, _hash)| slot == vote_slots.last().unwrap()) .unwrap() .1; - vote_state - .process_vote(&Vote::new(vote_slots, vote_hash), slot_hashes, 0, None) - .unwrap(); + process_vote( + &mut vote_state, + &Vote::new(vote_slots, vote_hash), + slot_hashes, + 0, + None, + ) + .unwrap(); } vote_state @@ -3326,7 +2138,8 @@ mod tests { // Test with empty vote state update, should return EmptySlots error let mut vote_state_update = VoteStateUpdate::from(vec![]); assert_eq!( - empty_vote_state.check_update_vote_state_slots_are_valid( + check_update_vote_state_slots_are_valid( + &empty_vote_state, &mut vote_state_update, &empty_slot_hashes ), @@ -3336,7 +2149,8 @@ mod tests { // Test with non-empty vote state update, should return SlotsMismatch since nothing exists in SlotHashes let mut vote_state_update = VoteStateUpdate::from(vec![(0, 1)]); assert_eq!( - empty_vote_state.check_update_vote_state_slots_are_valid( + check_update_vote_state_slots_are_valid( + &empty_vote_state, &mut vote_state_update, &empty_slot_hashes ), @@ -3354,8 +2168,11 @@ mod tests { // should return error `VoteTooOld` let mut vote_state_update = VoteStateUpdate::from(vec![(latest_vote, 1)]); assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::VoteTooOld), ); @@ -3366,8 +2183,11 @@ mod tests { let slot_hashes = build_slot_hashes(vec![earliest_slot_in_history]); let mut vote_state_update = VoteStateUpdate::from(vec![(earliest_slot_in_history - 1, 1)]); assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::VoteTooOld), ); } @@ -3390,8 +2210,7 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(earliest_slot_in_history, 1)]); vote_state_update.hash = earliest_slot_in_history_hash; vote_state_update.root = Some(earliest_slot_in_history - 1); - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes) + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, &slot_hashes) .unwrap(); assert!(vote_state.root_slot.is_none()); assert_eq!(vote_state_update.root, vote_state.root_slot); @@ -3403,8 +2222,7 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(earliest_slot_in_history, 1)]); vote_state_update.hash = earliest_slot_in_history_hash; vote_state_update.root = Some(earliest_slot_in_history - 1); - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes) + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, &slot_hashes) .unwrap(); assert_eq!(vote_state.root_slot, Some(0)); assert_eq!(vote_state_update.root, vote_state.root_slot); @@ -3425,8 +2243,11 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(2, 2), (1, 3), (vote_slot, 1)]); vote_state_update.hash = vote_slot_hash; assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::SlotsNotOrdered), ); @@ -3434,8 +2255,11 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(2, 2), (2, 2), (vote_slot, 1)]); vote_state_update.hash = vote_slot_hash; assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::SlotsNotOrdered), ); } @@ -3461,8 +2285,7 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(missing_older_than_history_slot, 2), (vote_slot, 3)]); vote_state_update.hash = vote_slot_hash; - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes) + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, &slot_hashes) .unwrap(); // Check the earlier slot was filtered out @@ -3478,14 +2301,6 @@ mod tests { ); } - #[test] - fn test_minimum_balance() { - let rent = solana_sdk::rent::Rent::default(); - let minimum_balance = rent.minimum_balance(VoteState::size_of()); - // golden, may need updating when vote_state grows - assert!(minimum_balance as f64 / 10f64.powf(9.0) < 0.04) - } - #[test] fn test_check_update_vote_state_older_than_history_slots_not_filtered() { let slot_hashes = build_slot_hashes(vec![1, 2, 3, 4]); @@ -3507,8 +2322,7 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(existing_older_than_history_slot, 2), (vote_slot, 3)]); vote_state_update.hash = vote_slot_hash; - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes) + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, &slot_hashes) .unwrap(); // Check the earlier slot was *NOT* filtered out assert_eq!(vote_state_update.lockouts.len(), 2); @@ -3564,8 +2378,7 @@ mod tests { (vote_slot, 1), ]); vote_state_update.hash = vote_slot_hash; - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes) + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, &slot_hashes) .unwrap(); assert_eq!(vote_state_update.lockouts.len(), 3); assert_eq!( @@ -3614,8 +2427,11 @@ mod tests { VoteStateUpdate::from(vec![(missing_vote_slot, 2), (vote_slot, 3)]); vote_state_update.hash = vote_slot_hash; assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::SlotsMismatch), ); @@ -3630,8 +2446,11 @@ mod tests { ]); vote_state_update.hash = vote_slot_hash; assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::SlotsMismatch), ); } @@ -3660,8 +2479,11 @@ mod tests { vote_state_update.hash = vote_slot_hash; vote_state_update.root = Some(new_root); assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::RootOnDifferentFork), ); } @@ -3681,8 +2503,11 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(8, 2), (missing_vote_slot, 3)]); vote_state_update.hash = vote_slot_hash; assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::SlotsMismatch), ); } @@ -3706,8 +2531,7 @@ mod tests { let mut vote_state_update = VoteStateUpdate::from(vec![(2, 4), (4, 3), (6, 2), (vote_slot, 1)]); vote_state_update.hash = vote_slot_hash; - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes) + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, &slot_hashes) .unwrap(); // Nothing in the update should have been filtered out @@ -3755,8 +2579,7 @@ mod tests { .1; let mut vote_state_update = VoteStateUpdate::from(vec![(4, 2), (vote_slot, 1)]); vote_state_update.hash = vote_slot_hash; - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes) + check_update_vote_state_slots_are_valid(&vote_state, &mut vote_state_update, &slot_hashes) .unwrap(); // Nothing in the update should have been filtered out @@ -3793,8 +2616,11 @@ mod tests { VoteStateUpdate::from(vec![(2, 4), (4, 3), (6, 2), (vote_slot, 1)]); vote_state_update.hash = vote_slot_hash; assert_eq!( - vote_state - .check_update_vote_state_slots_are_valid(&mut vote_state_update, &slot_hashes), + check_update_vote_state_slots_are_valid( + &vote_state, + &mut vote_state_update, + &slot_hashes + ), Err(VoteError::SlotHashMismatch), ); } diff --git a/programs/vote/src/vote_transaction.rs b/programs/vote/src/vote_transaction.rs index cf84ebc01e..4ff5970879 100644 --- a/programs/vote/src/vote_transaction.rs +++ b/programs/vote/src/vote_transaction.rs @@ -1,5 +1,5 @@ use { - crate::{vote_instruction, vote_state::Vote}, + solana_program::vote::{self, state::Vote}, solana_sdk::{ clock::Slot, hash::Hash, @@ -19,14 +19,14 @@ pub fn new_vote_transaction( ) -> Transaction { let votes = Vote::new(slots, bank_hash); let vote_ix = if let Some(switch_proof_hash) = switch_proof_hash { - vote_instruction::vote_switch( + vote::instruction::vote_switch( &vote_keypair.pubkey(), &authorized_voter_keypair.pubkey(), votes, switch_proof_hash, ) } else { - vote_instruction::vote( + vote::instruction::vote( &vote_keypair.pubkey(), &authorized_voter_keypair.pubkey(), votes, diff --git a/rbpf-cli/Cargo.toml b/rbpf-cli/Cargo.toml index 1b426d89fb..16cf97e9b5 100644 --- a/rbpf-cli/Cargo.toml +++ b/rbpf-cli/Cargo.toml @@ -11,10 +11,10 @@ publish = false [dependencies] clap = { version = "3.1.5", features = ["cargo"] } -serde = "1.0.138" -serde_json = "1.0.81" +serde = "1.0.143" +serde_json = "1.0.83" solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.12.0" } solana-logger = { path = "../logger", version = "=1.12.0" } solana-program-runtime = { path = "../program-runtime", version = "=1.12.0" } solana-sdk = { path = "../sdk", version = "=1.12.0" } -solana_rbpf = "=0.2.31" +solana_rbpf = "=0.2.32" diff --git a/remote-wallet/Cargo.toml b/remote-wallet/Cargo.toml index 15e1e4c992..32d1206f8e 100644 --- a/remote-wallet/Cargo.toml +++ b/remote-wallet/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/solana-remote-wallet" [dependencies] console = "0.15.0" -dialoguer = "0.10.1" +dialoguer = "0.10.2" hidapi = { version = "1.4.1", default-features = false, optional = true } log = "0.4.17" num-derive = { version = "0.3" } diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index 18111cbc22..8cd38a9077 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -17,8 +17,8 @@ crossbeam-channel = "0.5" futures-util = "0.3.21" log = "0.4.17" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } -serde = "1.0.138" -serde_json = "1.0.81" +serde = "1.0.143" +serde_json = "1.0.83" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-client = { path = "../client", version = "=1.12.0" } solana-rpc = { path = "../rpc", version = "=1.12.0" } diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 89df239f67..d828e2dad8 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -234,6 +234,7 @@ fn test_rpc_slot_updates() { } #[test] +#[ignore] // TODO (LB): this test is flaky in master fn test_rpc_subscriptions() { solana_logger::setup(); @@ -374,7 +375,13 @@ fn test_rpc_subscriptions() { } // Wait for all signature subscriptions - let deadline = Instant::now() + Duration::from_secs(15); + /* Set a large 30-sec timeout here because the timing of the above tokio process is + * highly non-deterministic. The test was too flaky at 15-second timeout. Debugging + * show occasional multi-second delay which could come from multiple sources -- other + * tokio tasks, tokio scheduler, OS scheduler. The async nature makes it hard to + * track down the origin of the delay. + */ + let deadline = Instant::now() + Duration::from_secs(30); while !signature_set.is_empty() { let timeout = deadline.saturating_duration_since(Instant::now()); match status_receiver.recv_timeout(timeout) { @@ -396,7 +403,7 @@ fn test_rpc_subscriptions() { } } - let deadline = Instant::now() + Duration::from_secs(5); + let deadline = Instant::now() + Duration::from_secs(60); let mut account_notifications = transactions.len(); while account_notifications > 0 { let timeout = deadline.saturating_duration_since(Instant::now()); diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 00cf7f362c..3c55e8b25a 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -21,13 +21,13 @@ jsonrpc-core-client = { version = "18.0.0" } jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" jsonrpc-pubsub = "18.0.0" -libc = "0.2.126" +libc = "0.2.131" log = "0.4.17" rayon = "1.5.3" regex = "1.5.6" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" -serde_json = "1.0.81" +serde_json = "1.0.83" soketto = "0.7" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-client = { path = "../client", version = "=1.12.0" } @@ -50,14 +50,14 @@ solana-transaction-status = { path = "../transaction-status", version = "=1.12.0 solana-version = { path = "../version", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } stream-cancel = "0.8.1" thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } tokio-util = { version = "0.6", features = ["codec", "compat"] } [dev-dependencies] -serial_test = "0.8.0" +serial_test = "0.9.0" solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.12.0" } solana-net-utils = { path = "../net-utils", version = "=1.12.0" } solana-stake-program = { path = "../programs/stake", version = "=1.12.0" } diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 3dc5645e60..3dbc25473c 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -71,7 +71,7 @@ impl OptimisticallyConfirmedBankTracker { let mut last_notified_confirmed_slot: Slot = 0; let mut highest_confirmed_slot: Slot = 0; let thread_hdl = Builder::new() - .name("solana-optimistic-bank-tracker".to_string()) + .name("solOpConfBnkTrk".to_string()) .spawn(move || loop { if exit_.load(Ordering::Relaxed) { break; diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 22ea75ae05..3205d1634c 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -129,7 +129,7 @@ fn new_response(bank: &Bank, value: T) -> RpcResponse { /// Wrapper for rpc return types of methods that provide responses both with and without context. /// Main purpose of this is to fix methods that lack context information in their return type, /// without breaking backwards compatibility. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(untagged)] pub enum OptionalContext { Context(RpcResponse), @@ -3815,9 +3815,7 @@ pub mod rpc_full { } if !skip_preflight { - if let Err(e) = verify_transaction(&transaction, &preflight_bank.feature_set) { - return Err(e); - } + verify_transaction(&transaction, &preflight_bank.feature_set)?; match meta.health.check() { RpcHealthStatus::Ok => (), @@ -4888,7 +4886,7 @@ pub mod tests { }, solana_vote_program::{ vote_instruction, - vote_state::{Vote, VoteInit, VoteStateVersions, MAX_LOCKOUT_HISTORY}, + vote_state::{self, Vote, VoteInit, VoteStateVersions, MAX_LOCKOUT_HISTORY}, }, spl_token_2022::{ extension::{ @@ -5208,7 +5206,7 @@ pub mod tests { let balance = bank.get_minimum_balance_for_rent_exemption(space); let mut vote_account = AccountSharedData::new(balance, space, &solana_vote_program::id()); - VoteState::to(&versioned, &mut vote_account).unwrap(); + vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account(vote_pubkey, &vote_account); } diff --git a/rpc/src/rpc_completed_slots_service.rs b/rpc/src/rpc_completed_slots_service.rs index 919f66a98d..fb1c20f319 100644 --- a/rpc/src/rpc_completed_slots_service.rs +++ b/rpc/src/rpc_completed_slots_service.rs @@ -24,7 +24,7 @@ impl RpcCompletedSlotsService { exit: Arc, ) -> JoinHandle<()> { Builder::new() - .name("solana-rpc-completed-slots-service".to_string()) + .name("solRpcComplSlot".to_string()) .spawn(move || loop { // received exit signal, shutdown the service if exit.load(Ordering::Relaxed) { diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 4efc2b0aa1..9b5139a9ef 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -85,7 +85,7 @@ impl PubSubService { let (trigger, tripwire) = Tripwire::new(); let thread_hdl = Builder::new() - .name("solana-pubsub".to_string()) + .name("solRpcPubSub".to_string()) .spawn(move || { let runtime = tokio::runtime::Builder::new_multi_thread() .worker_threads(pubsub_config.worker_threads) @@ -416,6 +416,6 @@ mod tests { let (_trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); let thread = pubsub_service.thread_hdl.thread(); - assert_eq!(thread.name().unwrap(), "solana-pubsub"); + assert_eq!(thread.name().unwrap(), "solRpcPubSub"); } } diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index defbe917b4..8c801f63e8 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -355,7 +355,7 @@ impl JsonRpcService { leader_schedule_cache: Arc, connection_cache: Arc, current_transaction_status_slot: Arc, - ) -> Self { + ) -> Result { info!("rpc bound to {:?}", rpc_addr); info!("rpc configuration: {:?}", config); let rpc_threads = 1.max(config.rpc_threads); @@ -383,7 +383,7 @@ impl JsonRpcService { tokio::runtime::Builder::new_multi_thread() .worker_threads(rpc_threads) .on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap()) - .thread_name("sol-rpc-el") + .thread_name("solRpcEl") .enable_all() .build() .expect("Runtime"), @@ -481,7 +481,7 @@ impl JsonRpcService { let (close_handle_sender, close_handle_receiver) = unbounded(); let thread_hdl = Builder::new() - .name("solana-jsonrpc".to_string()) + .name("solJsonRpcSvc".to_string()) .spawn(move || { renice_this_thread(rpc_niceness_adj).unwrap(); @@ -526,28 +526,29 @@ impl JsonRpcService { e, rpc_addr.port() ); + close_handle_sender.send(Err(e.to_string())).unwrap(); return; } let server = server.unwrap(); - close_handle_sender.send(server.close_handle()).unwrap(); + close_handle_sender.send(Ok(server.close_handle())).unwrap(); server.wait(); exit_bigtable_ledger_upload_service.store(true, Ordering::Relaxed); }) .unwrap(); - let close_handle = close_handle_receiver.recv().unwrap(); + let close_handle = close_handle_receiver.recv().unwrap()?; let close_handle_ = close_handle.clone(); validator_exit .write() .unwrap() .register_exit(Box::new(move || close_handle_.close())); - Self { + Ok(Self { thread_hdl, #[cfg(test)] request_processor: test_request_processor, close_handle: Some(close_handle), - } + }) } pub fn exit(&mut self) { @@ -642,9 +643,10 @@ mod tests { Arc::new(LeaderScheduleCache::default()), connection_cache, Arc::new(AtomicU64::default()), - ); + ) + .expect("assume successful JsonRpcService start"); let thread = rpc_service.thread_hdl.thread(); - assert_eq!(thread.name().unwrap(), "solana-jsonrpc"); + assert_eq!(thread.name().unwrap(), "solJsonRpcSvc"); assert_eq!( 10_000, diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index bd9fe33746..1b5023fe5f 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -632,11 +632,11 @@ impl RpcSubscriptions { } else { Some( Builder::new() - .name("solana-rpc-notifications".to_string()) + .name("solRpcNotifier".to_string()) .spawn(move || { let pool = rayon::ThreadPoolBuilder::new() .num_threads(notification_threads) - .thread_name(|i| format!("sol-sub-notif-{}", i)) + .thread_name(|i| format!("solRpcNotify{:02}", i)) .build() .unwrap(); pool.install(|| { @@ -1001,10 +1001,7 @@ impl RpcSubscriptions { let mut slots_to_notify: Vec<_> = (*w_last_unnotified_slot..slot).collect(); let ancestors = bank.proper_ancestors_set(); - slots_to_notify = slots_to_notify - .into_iter() - .filter(|slot| ancestors.contains(slot)) - .collect(); + slots_to_notify.retain(|slot| ancestors.contains(slot)); slots_to_notify.push(slot); for s in slots_to_notify { // To avoid skipping a slot that fails this condition, diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index b9fdfb9bf9..4d393a0126 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -37,7 +37,7 @@ impl TransactionStatusService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-transaction-status-writer".to_string()) + .name("solTxStatusWrtr".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index c9c1e64992..7fba440380 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -32,12 +32,12 @@ memmap2 = "0.5.3" num-derive = { version = "0.3" } num-traits = { version = "0.2" } num_cpus = "1.13.1" -once_cell = "1.12.0" +once_cell = "1.13.0" ouroboros = "0.15.0" rand = "0.7.0" rayon = "1.5.3" regex = "1.5.6" -serde = { version = "1.0.138", features = ["rc"] } +serde = { version = "1.0.143", features = ["rc"] } serde_derive = "1.0.103" solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.12.0" } solana-bucket-map = { path = "../bucket_map", version = "=1.12.0" } diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index ec4eea2fef..7160c2efae 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -178,7 +178,7 @@ fn bench_delete_dependencies(bencher: &mut Bencher) { accounts.add_root(i); } bencher.iter(|| { - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); }); } diff --git a/runtime/benches/status_cache.rs b/runtime/benches/status_cache.rs index c207a71246..a722020d6b 100644 --- a/runtime/benches/status_cache.rs +++ b/runtime/benches/status_cache.rs @@ -28,26 +28,12 @@ fn bench_status_cache_serialize(bencher: &mut Bencher) { status_cache.insert(&blockhash, &sig, 0, Ok(())); } } + assert!(status_cache.roots().contains(&0)); bencher.iter(|| { - let _ = serialize(&status_cache.slot_deltas(&[0])).unwrap(); + let _ = serialize(&status_cache.root_slot_deltas()).unwrap(); }); } -#[bench] -fn bench_status_cache_slot_deltas(bencher: &mut Bencher) { - let mut status_cache = BankStatusCache::default(); - - // fill the status cache - let slots: Vec<_> = (42..).take(MAX_CACHE_ENTRIES).collect(); - for slot in &slots { - for _ in 0..5 { - status_cache.insert(&Hash::new_unique(), Hash::new_unique(), *slot, Ok(())); - } - } - - bencher.iter(|| test::black_box(status_cache.slot_deltas(&slots))); -} - #[bench] fn bench_status_cache_root_slot_deltas(bencher: &mut Bencher) { let mut status_cache = BankStatusCache::default(); diff --git a/runtime/src/account_rent_state.rs b/runtime/src/account_rent_state.rs index 629502caf4..74cbc5b81a 100644 --- a/runtime/src/account_rent_state.rs +++ b/runtime/src/account_rent_state.rs @@ -104,7 +104,7 @@ pub(crate) fn check_rent_state( .get_account_at_index(index) .expect(expect_msg) .borrow(), - include_account_index_in_err.then(|| index), + include_account_index_in_err.then_some(index), prevent_crediting_accounts_that_end_rent_paying, )?; } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 0ec1939e45..3d52dde019 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -34,9 +34,7 @@ use { account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{BankId, Slot, INITIAL_RENT_EPOCH}, - feature_set::{ - self, add_set_compute_unit_price_ix, use_default_units_in_fee_calculation, FeatureSet, - }, + feature_set::{self, use_default_units_in_fee_calculation, FeatureSet}, fee::FeeStructure, genesis_config::ClusterType, hash::Hash, @@ -442,7 +440,7 @@ impl Accounts { payer_account, feature_set .is_active(&feature_set::include_account_index_in_rent_error::ID) - .then(|| payer_index), + .then_some(payer_index), feature_set .is_active(&feature_set::prevent_crediting_accounts_that_end_rent_paying::id()), ) @@ -554,7 +552,6 @@ impl Accounts { tx.message(), lamports_per_signature, fee_structure, - feature_set.is_active(&add_set_compute_unit_price_ix::id()), feature_set.is_active(&use_default_units_in_fee_calculation::id()), ) } else { @@ -797,6 +794,9 @@ impl Accounts { ) -> u64 { let use_index = false; let is_startup = true; + self.accounts_db + .verify_accounts_hash_in_bg + .wait_for_complete(); self.accounts_db .update_accounts_hash_with_index_option( use_index, @@ -1125,9 +1125,11 @@ impl Accounts { pub fn lock_accounts<'a>( &self, txs: impl Iterator, + tx_account_lock_limit: usize, ) -> Vec> { - let tx_account_locks_results: Vec> = - txs.map(|tx| tx.get_account_locks()).collect(); + let tx_account_locks_results: Vec> = txs + .map(|tx| tx.get_account_locks(tx_account_lock_limit)) + .collect(); self.lock_accounts_inner( tx_account_locks_results, &HashSet::default(), @@ -1138,10 +1140,12 @@ impl Accounts { pub fn lock_accounts_sequential_with_results<'a>( &self, txs: impl Iterator, + tx_account_lock_limit: usize, account_locks_override: Option>, ) -> Vec> { - let tx_account_locks_results: Vec> = - txs.map(|tx| tx.get_account_locks()).collect(); + let tx_account_locks_results: Vec> = txs + .map(|tx| tx.get_account_locks(tx_account_lock_limit)) + .collect(); self.lock_accounts_sequential_inner(tx_account_locks_results, account_locks_override) } @@ -1151,13 +1155,14 @@ impl Accounts { &self, txs: impl Iterator, results: impl Iterator>, + tx_account_lock_limit: usize, additional_read_locks: &HashSet, additional_write_locks: &HashSet, ) -> Vec> { let tx_account_locks_results: Vec> = txs .zip(results) .map(|(tx, result)| match result { - Ok(()) => tx.get_account_locks(), + Ok(()) => tx.get_account_locks(tx_account_lock_limit), Err(err) => Err(err.clone()), }) .collect(); @@ -1739,7 +1744,6 @@ mod tests { lamports_per_signature, &FeeStructure::default(), true, - true, ); assert_eq!(fee, lamports_per_signature); @@ -2574,7 +2578,7 @@ mod tests { }; let tx = new_sanitized_tx(&[&keypair], message, Hash::default()); - let results = accounts.lock_accounts([tx].iter()); + let results = accounts.lock_accounts([tx].iter(), MAX_TX_ACCOUNT_LOCKS); assert_eq!(results[0], Err(TransactionError::AccountLoadedTwice)); } @@ -2607,7 +2611,7 @@ mod tests { }; let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; - let results = accounts.lock_accounts(txs.iter()); + let results = accounts.lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); assert_eq!(results[0], Ok(())); accounts.unlock_accounts(txs.iter(), &results); } @@ -2629,7 +2633,7 @@ mod tests { }; let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; - let results = accounts.lock_accounts(txs.iter()); + let results = accounts.lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); assert_eq!(results[0], Err(TransactionError::TooManyAccountLocks)); } } @@ -2668,7 +2672,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); - let results0 = accounts.lock_accounts([tx.clone()].iter()); + let results0 = accounts.lock_accounts([tx.clone()].iter(), MAX_TX_ACCOUNT_LOCKS); assert!(results0[0].is_ok()); assert_eq!( @@ -2703,7 +2707,7 @@ mod tests { ); let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; - let results1 = accounts.lock_accounts(txs.iter()); + let results1 = accounts.lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable @@ -2730,7 +2734,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair1], message, Hash::default()); - let results2 = accounts.lock_accounts([tx].iter()); + let results2 = accounts.lock_accounts([tx].iter(), MAX_TX_ACCOUNT_LOCKS); assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable // Check that read-only lock with zero references is deleted @@ -2799,7 +2803,9 @@ mod tests { let exit_clone = exit_clone.clone(); loop { let txs = vec![writable_tx.clone()]; - let results = accounts_clone.clone().lock_accounts(txs.iter()); + let results = accounts_clone + .clone() + .lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); for result in results.iter() { if result.is_ok() { counter_clone.clone().fetch_add(1, Ordering::SeqCst); @@ -2814,7 +2820,9 @@ mod tests { let counter_clone = counter; for _ in 0..5 { let txs = vec![readonly_tx.clone()]; - let results = accounts_arc.clone().lock_accounts(txs.iter()); + let results = accounts_arc + .clone() + .lock_accounts(txs.iter(), MAX_TX_ACCOUNT_LOCKS); if results[0].is_ok() { let counter_value = counter_clone.clone().load(Ordering::SeqCst); thread::sleep(time::Duration::from_millis(50)); @@ -2860,7 +2868,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); - let results0 = accounts.lock_accounts([tx].iter()); + let results0 = accounts.lock_accounts([tx].iter(), MAX_TX_ACCOUNT_LOCKS); assert!(results0[0].is_ok()); // Instruction program-id account demoted to readonly @@ -2954,6 +2962,7 @@ mod tests { let results = accounts.lock_accounts_with_results( txs.iter(), qos_results.iter(), + MAX_TX_ACCOUNT_LOCKS, &HashSet::default(), &HashSet::default(), ); @@ -3144,7 +3153,7 @@ mod tests { } } info!("done..cleaning.."); - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); } fn load_accounts_no_store( diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index a0695e3373..41ed6eb8ce 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -254,11 +254,7 @@ impl SnapshotRequestHandler { }; let mut clean_time = Measure::start("clean_time"); - // Don't clean the slot we're snapshotting because it may have zero-lamport - // accounts that were included in the bank delta hash when the bank was frozen, - // and if we clean them here, the newly created snapshot's hash may not match - // the frozen hash. - snapshot_root_bank.clean_accounts(true, false, *last_full_snapshot_slot); + snapshot_root_bank.clean_accounts(*last_full_snapshot_slot); clean_time.stop(); if accounts_db_caching_enabled { @@ -370,6 +366,7 @@ impl SnapshotRequestHandler { SnapshotError::MismatchedBaseSlot(..) => true, SnapshotError::NoSnapshotArchives => true, SnapshotError::MismatchedSlotHash(..) => true, + SnapshotError::VerifySlotDeltas(..) => true, } } } @@ -464,7 +461,7 @@ impl AccountsBackgroundService { let mut total_remove_slots_time = 0; let mut last_expiration_check_time = Instant::now(); let t_background = Builder::new() - .name("solana-bg-accounts".to_string()) + .name("solBgAccounts".to_string()) .spawn(move || { let mut stats = StatsManager::new(); let mut last_snapshot_end_time = None; @@ -563,7 +560,7 @@ impl AccountsBackgroundService { // slots >= bank.slot() bank.force_flush_accounts_cache(); } - bank.clean_accounts(true, false, last_full_snapshot_slot); + bank.clean_accounts(last_full_snapshot_slot); last_cleaned_block_height = bank.block_height(); } } diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 23ab3090ac..99af8d6bb0 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1709,7 +1709,7 @@ pub fn make_min_priority_thread_pool() -> ThreadPool { // Use lower thread count to reduce priority. let num_threads = quarter_thread_count(); rayon::ThreadPoolBuilder::new() - .thread_name(|i| format!("solana-cleanup-accounts-{}", i)) + .thread_name(|i| format!("solAccountsLo{:02}", i)) .num_threads(num_threads) .build() .unwrap() @@ -1822,14 +1822,7 @@ impl<'a, T: Fn(Slot) -> Option + Sync + Send + Clone> AppendVecScan for Sc // when we are scanning with bin ranges, we don't need to use exact bin numbers. Subtract to make first bin we care about at index 0. self.pubkey_to_bin_index -= self.bin_range.start; - let raw_lamports = loaded_account.lamports(); - let zero_raw_lamports = raw_lamports == 0; - let balance = if zero_raw_lamports { - crate::accounts_hash::ZERO_RAW_LAMPORTS_SENTINEL - } else { - raw_lamports - }; - + let balance = loaded_account.lamports(); let loaded_hash = loaded_account.loaded_hash(); let new_hash = ExpectedRentCollection::maybe_rehash_skipped_rewrite( loaded_account, @@ -1904,7 +1897,7 @@ impl AccountsDb { num_hash_scan_passes: Option, ) -> Self { let num_threads = get_thread_count(); - const MAX_READ_ONLY_CACHE_DATA_SIZE: usize = 200_000_000; + const MAX_READ_ONLY_CACHE_DATA_SIZE: usize = 400_000_000; // 400M bytes let mut temp_accounts_hash_cache_path = None; let accounts_hash_cache_path = accounts_hash_cache_path.unwrap_or_else(|| { @@ -1959,7 +1952,7 @@ impl AccountsDb { file_size: DEFAULT_FILE_SIZE, thread_pool: rayon::ThreadPoolBuilder::new() .num_threads(num_threads) - .thread_name(|i| format!("solana-db-accounts-{}", i)) + .thread_name(|i| format!("solAccounts{:02}", i)) .stack_size(ACCOUNTS_STACK_SIZE) .build() .unwrap(), @@ -2152,11 +2145,11 @@ impl AccountsDb { .expect("Cluster type must be set at initialization") } - /// Reclaim older states of accounts older than max_clean_root for AccountsDb bloat mitigation + /// Reclaim older states of accounts older than max_clean_root_inclusive for AccountsDb bloat mitigation fn clean_accounts_older_than_root( &self, purges: Vec, - max_clean_root: Option, + max_clean_root_inclusive: Option, ancient_account_cleans: &AtomicU64, ) -> ReclaimResult { if purges.is_empty() { @@ -2174,14 +2167,17 @@ impl AccountsDb { .filter_map(|pubkeys: &[Pubkey]| { let mut reclaims = Vec::new(); for pubkey in pubkeys { - self.accounts_index - .clean_rooted_entries(pubkey, &mut reclaims, max_clean_root); + self.accounts_index.clean_rooted_entries( + pubkey, + &mut reclaims, + max_clean_root_inclusive, + ); } (!reclaims.is_empty()).then(|| { // figure out how many ancient accounts have been reclaimed let old_reclaims = reclaims .iter() - .filter_map(|(slot, _)| (slot < &one_epoch_old).then(|| 1)) + .filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1)) .sum(); ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed); reclaims @@ -2225,16 +2221,47 @@ impl AccountsDb { .fetch_add(measure.as_us(), Ordering::Relaxed); } + /// increment store_counts to non-zero for all stores that can not be deleted. + /// a store cannot be deleted if: + /// 1. one of the pubkeys in the store has account info to a store whose store count is not going to zero + /// 2. a pubkey we were planning to remove is not removing all stores that contain the account fn calc_delete_dependencies( purges: &HashMap, RefCount)>, store_counts: &mut HashMap)>, + min_store_id: Option, ) { // Another pass to check if there are some filtered accounts which // do not match the criteria of deleting all appendvecs which contain them // then increment their storage count. let mut already_counted = HashSet::new(); + let mut failed_store_id = None; for (pubkey, (account_infos, ref_count_from_storage)) in purges.iter() { - let no_delete = if account_infos.len() as RefCount != *ref_count_from_storage { + let all_stores_being_deleted = + account_infos.len() as RefCount == *ref_count_from_storage; + if all_stores_being_deleted { + let mut delete = true; + for (_slot, account_info) in account_infos { + let store_id = account_info.store_id(); + let count = store_counts.get(&store_id).unwrap().0; + debug!( + "calc_delete_dependencies() + storage id: {}, + count len: {}", + store_id, count, + ); + if count != 0 { + // one of the pubkeys in the store has account info to a store whose store count is not going to zero + failed_store_id = Some(store_id); + delete = false; + break; + } + } + if delete { + // this pubkey can be deleted from all stores it is in + continue; + } + } else { + // a pubkey we were planning to remove is not removing all stores that contain the account debug!( "calc_delete_dependencies(), pubkey: {}, @@ -2246,45 +2273,37 @@ impl AccountsDb { account_infos.len(), ref_count_from_storage, ); - true - } else { - let mut no_delete = false; - for (_slot, account_info) in account_infos { - debug!( - "calc_delete_dependencies() - storage id: {}, - count len: {}", - account_info.store_id(), - store_counts.get(&account_info.store_id()).unwrap().0, - ); - if store_counts.get(&account_info.store_id()).unwrap().0 != 0 { - no_delete = true; - break; - } + } + + // increment store_counts to non-zero for all stores that can not be deleted. + let mut pending_store_ids = HashSet::new(); + for (_slot, account_info) in account_infos { + if !already_counted.contains(&account_info.store_id()) { + pending_store_ids.insert(account_info.store_id()); } - no_delete - }; - if no_delete { - let mut pending_store_ids = HashSet::new(); - for (_bank_id, account_info) in account_infos { - if !already_counted.contains(&account_info.store_id()) { - pending_store_ids.insert(account_info.store_id()); + } + while !pending_store_ids.is_empty() { + let id = pending_store_ids.iter().next().cloned().unwrap(); + if Some(id) == min_store_id { + if let Some(failed_store_id) = failed_store_id.take() { + info!("calc_delete_dependencies, oldest store is not able to be deleted because of {pubkey} in store {failed_store_id}"); + } else { + info!("calc_delete_dependencies, oldest store is not able to be deleted because of {pubkey}, account infos len: {}, ref count: {ref_count_from_storage}", account_infos.len()); } } - while !pending_store_ids.is_empty() { - let id = pending_store_ids.iter().next().cloned().unwrap(); - pending_store_ids.remove(&id); - if !already_counted.insert(id) { - continue; - } - store_counts.get_mut(&id).unwrap().0 += 1; - let affected_pubkeys = &store_counts.get(&id).unwrap().1; - for key in affected_pubkeys { - for (_slot, account_info) in &purges.get(key).unwrap().0 { - if !already_counted.contains(&account_info.store_id()) { - pending_store_ids.insert(account_info.store_id()); - } + pending_store_ids.remove(&id); + if !already_counted.insert(id) { + continue; + } + // the point of all this code: increment the store count to non-zero + store_counts.get_mut(&id).unwrap().0 += 1; + + let affected_pubkeys = &store_counts.get(&id).unwrap().1; + for key in affected_pubkeys { + for (_slot, account_info) in &purges.get(key).unwrap().0 { + if !already_counted.contains(&account_info.store_id()) { + pending_store_ids.insert(account_info.store_id()); } } } @@ -2313,7 +2332,7 @@ impl AccountsDb { fn start_background_hasher(&mut self) { let (sender, receiver) = unbounded(); Builder::new() - .name("solana-db-store-hasher-accounts".to_string()) + .name("solDbStoreHashr".to_string()) .spawn(move || { Self::background_hasher(receiver); }) @@ -2394,12 +2413,12 @@ impl AccountsDb { /// Collect all the uncleaned slots, up to a max slot /// /// Search through the uncleaned Pubkeys and return all the slots, up to a maximum slot. - fn collect_uncleaned_slots_up_to_slot(&self, max_slot: Slot) -> Vec { + fn collect_uncleaned_slots_up_to_slot(&self, max_slot_inclusive: Slot) -> Vec { self.uncleaned_pubkeys .iter() .filter_map(|entry| { let slot = *entry.key(); - (slot <= max_slot).then(|| slot) + (slot <= max_slot_inclusive).then_some(slot) }) .collect() } @@ -2426,9 +2445,9 @@ impl AccountsDb { /// fn remove_uncleaned_slots_and_collect_pubkeys_up_to_slot( &self, - max_slot: Slot, + max_slot_inclusive: Slot, ) -> Vec> { - let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot); + let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot_inclusive); self.remove_uncleaned_slots_and_collect_pubkeys(uncleaned_slots) } @@ -2437,28 +2456,60 @@ impl AccountsDb { // dirty_stores - set of stores which had accounts removed or recently rooted fn construct_candidate_clean_keys( &self, - max_clean_root: Option, + max_clean_root_inclusive: Option, + is_startup: bool, last_full_snapshot_slot: Option, timings: &mut CleanKeyTimings, - ) -> Vec { + ) -> (Vec, Option) { let mut dirty_store_processing_time = Measure::start("dirty_store_processing"); - let max_slot = max_clean_root.unwrap_or_else(|| self.accounts_index.max_root_inclusive()); + let max_slot_inclusive = + max_clean_root_inclusive.unwrap_or_else(|| self.accounts_index.max_root_inclusive()); let mut dirty_stores = Vec::with_capacity(self.dirty_stores.len()); - self.dirty_stores.retain(|(slot, _store_id), store| { - if *slot > max_slot { + // find the oldest append vec older than one epoch old + // we'll add logging if that append vec cannot be marked dead + let mut min_dirty_slot = self.get_accounts_hash_complete_one_epoch_old(); + let mut min_dirty_store_id = None; + self.dirty_stores.retain(|(slot, store_id), store| { + if *slot > max_slot_inclusive { true } else { + if *slot < min_dirty_slot { + min_dirty_slot = *slot; + min_dirty_store_id = Some(*store_id); + } dirty_stores.push((*slot, store.clone())); false } }); let dirty_stores_len = dirty_stores.len(); let pubkeys = DashSet::new(); - timings.oldest_dirty_slot = max_slot.saturating_add(1); - for (slot, store) in dirty_stores { - timings.oldest_dirty_slot = std::cmp::min(timings.oldest_dirty_slot, slot); - store.accounts.account_iter().for_each(|account| { - pubkeys.insert(account.meta.pubkey); + let mut dirty_store_routine = || { + let chunk_size = 1.max(dirty_stores_len.saturating_div(rayon::current_num_threads())); + let oldest_dirty_slots: Vec = dirty_stores + .par_chunks(chunk_size) + .map(|dirty_store_chunk| { + let mut oldest_dirty_slot = max_slot_inclusive.saturating_add(1); + dirty_store_chunk.iter().for_each(|(slot, store)| { + oldest_dirty_slot = oldest_dirty_slot.min(*slot); + store.accounts.account_iter().for_each(|account| { + pubkeys.insert(account.meta.pubkey); + }); + }); + oldest_dirty_slot + }) + .collect(); + timings.oldest_dirty_slot = *oldest_dirty_slots + .iter() + .min() + .unwrap_or(&max_slot_inclusive.saturating_add(1)); + }; + + if is_startup { + // Free to consume all the cores during startup + dirty_store_routine(); + } else { + self.thread_pool_clean.install(|| { + dirty_store_routine(); }); } trace!( @@ -2471,7 +2522,8 @@ impl AccountsDb { timings.dirty_store_processing_us += dirty_store_processing_time.as_us(); let mut collect_delta_keys = Measure::start("key_create"); - let delta_keys = self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot); + let delta_keys = + self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot_inclusive); collect_delta_keys.stop(); timings.collect_delta_keys_us += collect_delta_keys.as_us(); @@ -2503,7 +2555,7 @@ impl AccountsDb { self.zero_lamport_accounts_to_purge_after_full_snapshot .retain(|(slot, pubkey)| { let is_candidate_for_clean = - max_slot >= *slot && last_full_snapshot_slot >= *slot; + max_slot_inclusive >= *slot && last_full_snapshot_slot >= *slot; if is_candidate_for_clean { pubkeys.push(*pubkey); } @@ -2511,7 +2563,12 @@ impl AccountsDb { }); } - pubkeys + (pubkeys, min_dirty_store_id) + } + + /// Call clean_accounts() with the common parameters that tests/benches use. + pub fn clean_accounts_for_tests(&self) { + self.clean_accounts(None, false, None) } // Purge zero lamport accounts and older rooted account states as garbage @@ -2538,8 +2595,9 @@ impl AccountsDb { self.report_store_stats(); let mut key_timings = CleanKeyTimings::default(); - let mut pubkeys = self.construct_candidate_clean_keys( + let (mut pubkeys, min_dirty_store_id) = self.construct_candidate_clean_keys( max_clean_root, + is_startup, last_full_snapshot_slot, &mut key_timings, ); @@ -2732,7 +2790,11 @@ impl AccountsDb { store_counts_time.stop(); let mut calc_deps_time = Measure::start("calc_deps"); - Self::calc_delete_dependencies(&purges_zero_lamports, &mut store_counts); + Self::calc_delete_dependencies( + &purges_zero_lamports, + &mut store_counts, + min_dirty_store_id, + ); calc_deps_time.stop(); let mut purge_filter = Measure::start("purge_filter"); @@ -2968,35 +3030,35 @@ impl AccountsDb { } /// During clean, some zero-lamport accounts that are marked for purge should *not* actually - /// get purged. Filter out those accounts here. + /// get purged. Filter out those accounts here by removing them from 'purges_zero_lamports' /// /// When using incremental snapshots, do not purge zero-lamport accounts if the slot is higher /// than the last full snapshot slot. This is to protect against the following scenario: /// /// ```text - /// A full snapshot is taken, and it contains an account with a non-zero balance. Later, - /// that account's goes to zero. Evntually cleaning runs, and before, this account would be - /// cleaned up. Finally, an incremental snapshot is taken. + /// A full snapshot is taken, including account 'alpha' with a non-zero balance. In a later slot, + /// alpha's lamports go to zero. Eventually, cleaning runs. Without this change, + /// alpha would be cleaned up and removed completely. Finally, an incremental snapshot is taken. /// - /// Later, the incremental (and full) snapshot is used to rebuild the bank and accounts - /// database (e.x. if the node restarts). The full snapshot _does_ contain the account (from - /// above) and its balance is non-zero, however, since the account was cleaned up in a later - /// slot, the incremental snapshot does not contain any info about this account, thus, the - /// accounts database will contain the old info from this account, which has its old non-zero + /// Later, the incremental and full snapshots are used to rebuild the bank and accounts + /// database (e.x. if the node restarts). The full snapshot _does_ contain alpha + /// and its balance is non-zero. However, since alpha was cleaned up in a slot after the full + /// snapshot slot (due to having zero lamports), the incremental snapshot would not contain alpha. + /// Thus, the accounts database will contain the old, incorrect info for alpha with a non-zero /// balance. Very bad! /// ``` /// /// This filtering step can be skipped if there is no `last_full_snapshot_slot`, or if the - /// `max_clean_root` is less-than-or-equal-to the `last_full_snapshot_slot`. + /// `max_clean_root_inclusive` is less-than-or-equal-to the `last_full_snapshot_slot`. fn filter_zero_lamport_clean_for_incremental_snapshots( &self, - max_clean_root: Option, + max_clean_root_inclusive: Option, last_full_snapshot_slot: Option, store_counts: &HashMap)>, purges_zero_lamports: &mut HashMap, RefCount)>, ) { - let should_filter_for_incremental_snapshots = - max_clean_root.unwrap_or(Slot::MAX) > last_full_snapshot_slot.unwrap_or(Slot::MAX); + let should_filter_for_incremental_snapshots = max_clean_root_inclusive.unwrap_or(Slot::MAX) + > last_full_snapshot_slot.unwrap_or(Slot::MAX); assert!( last_full_snapshot_slot.is_some() || !should_filter_for_incremental_snapshots, "if filtering for incremental snapshots, then snapshots should be enabled", @@ -3651,6 +3713,25 @@ impl AccountsDb { }) } + /// unref each account in 'accounts' that already exists in 'ancient_store' + fn unref_accounts_already_in_storage( + &self, + accounts: &[(&Pubkey, &StoredAccountMeta<'_>, u64)], + ancient_store: &Arc, + ) { + // make a hashset of all keys we're about to add to this storage + let mut accounts_to_add = accounts.iter().map(|entry| entry.0).collect::>(); + // for each key that we're about to add that already exists in this storage, we need to unref. The account was in a different storage. + // Now it is being put into an ancient storage, but it is already there, so maintain max of 1 ref per storage in the accounts index. + ancient_store.accounts.account_iter().for_each(|account| { + // remove here is so we don't unref the same key more than once in this loop if it is in the existing storage 2 times already + let key = &account.meta.pubkey; + if accounts_to_add.remove(key) { + self.accounts_index.unref_from_storage(key); + } + }) + } + /// helper function to cleanup call to 'store_accounts_frozen' fn store_ancient_accounts( &self, @@ -3658,8 +3739,14 @@ impl AccountsDb { ancient_store: &Arc, accounts: &AccountsToStore, storage_selector: StorageSelector, + unref_if_already_exists: bool, ) -> StoreAccountsTiming { let (accounts, hashes) = accounts.get(storage_selector); + + if unref_if_already_exists { + self.unref_accounts_already_in_storage(accounts, ancient_store); + } + self.store_accounts_frozen( (ancient_slot, accounts), Some(hashes), @@ -3678,7 +3765,7 @@ impl AccountsDb { ) -> Option { self.get_storages_for_slot(slot).and_then(|all_storages| { self.should_move_to_ancient_append_vec(&all_storages, current_ancient, slot) - .then(|| all_storages) + .then_some(all_storages) }) } @@ -3713,10 +3800,6 @@ impl AccountsDb { } if is_ancient(accounts) { - if current_ancient.is_some() { - info!("ancient_append_vec: shrinking full ancient: {}", slot); - } - // this slot is ancient and can become the 'current' ancient for other slots to be squashed into *current_ancient = Some((slot, Arc::clone(storage))); return false; // we're done with this slot - this slot IS the ancient append vec @@ -3738,14 +3821,7 @@ impl AccountsDb { let mut current_ancient = None; let mut dropped_roots = vec![]; - if let Some(first_slot) = sorted_slots.first() { - info!( - "ancient_append_vec: combine_ancient_slots first slot: {}, num_roots: {}", - first_slot, - sorted_slots.len() - ); - } - + let len = sorted_slots.len(); for slot in sorted_slots { let old_storages = match self.get_storages_to_move_to_ancient_append_vec(slot, &mut current_ancient) { @@ -3758,7 +3834,11 @@ impl AccountsDb { if guard.is_none() { // we are now doing interesting work in squashing ancient - guard = Some(self.active_stats.activate(ActiveStatItem::SquashAncient)) + guard = Some(self.active_stats.activate(ActiveStatItem::SquashAncient)); + info!( + "ancient_append_vec: combine_ancient_slots first slot: {}, num_roots: {}", + slot, len + ); } // this code is copied from shrink. I would like to combine it into a helper function, but the borrow checker has defeated my efforts so far. @@ -3840,6 +3920,8 @@ impl AccountsDb { ancient_store, &to_store, StorageSelector::Primary, + // we are adding accounts to an existing append vec from a different slot. We need to unref each account that exists already in 'ancient_store'. + slot != ancient_slot, ); // handle accounts from 'slot' which did not fit into the current ancient append vec @@ -3866,6 +3948,7 @@ impl AccountsDb { ancient_store, &to_store, StorageSelector::Overflow, + false, // we do not want to unref any accounts. these remaining accounts are going into a new append vec, so we need to keep the refs they already have ); store_accounts_timing.store_accounts_elapsed = timing.store_accounts_elapsed; store_accounts_timing.update_index_elapsed = timing.update_index_elapsed; @@ -5314,7 +5397,7 @@ impl AccountsDb { // with the same slot. let is_being_flushed = !currently_contended_slots.insert(*remove_slot); // If the cache is currently flushing this slot, add it to the list - is_being_flushed.then(|| remove_slot) + is_being_flushed.then_some(remove_slot) }) .cloned() .collect(); @@ -5546,7 +5629,8 @@ impl AccountsDb { .fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed); } - pub fn flush_accounts_cache_slot(&self, slot: Slot) { + #[cfg(test)] + pub(crate) fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) { self.flush_slot_cache(slot); } @@ -6145,7 +6229,7 @@ impl AccountsDb { AccountsHash::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::()) } - fn calculate_accounts_hash( + pub fn calculate_accounts_hash( &self, max_slot: Slot, config: &CalcAccountsHashConfig<'_>, @@ -6653,20 +6737,23 @@ impl AccountsDb { } /// storages are sorted by slot and have range info. - /// if we know slots_per_epoch, then add all stores older than slots_per_epoch to dirty_stores so clean visits these slots - fn mark_old_slots_as_dirty(&self, storages: &SortedStorages, slots_per_epoch: Option) { - if let Some(slots_per_epoch) = slots_per_epoch { - let max = storages.max_slot_inclusive(); - let acceptable_straggler_slot_count = 100; // do nothing special for these old stores which will likely get cleaned up shortly - let sub = slots_per_epoch + acceptable_straggler_slot_count; - let in_epoch_range_start = max.saturating_sub(sub); - for (slot, storages) in storages.iter_range(..in_epoch_range_start) { - if let Some(storages) = storages { - storages.iter().for_each(|store| { + /// add all stores older than slots_per_epoch to dirty_stores so clean visits these slots + fn mark_old_slots_as_dirty(&self, storages: &SortedStorages, slots_per_epoch: Slot) { + let max = storages.max_slot_inclusive(); + let acceptable_straggler_slot_count = 100; // do nothing special for these old stores which will likely get cleaned up shortly + let sub = slots_per_epoch + acceptable_straggler_slot_count; + let in_epoch_range_start = max.saturating_sub(sub); + for (slot, storages) in storages.iter_range(..in_epoch_range_start) { + if let Some(storages) = storages { + storages.iter().for_each(|store| { + if !is_ancient(&store.accounts) { + // ancient stores are managed separately - we expect them to be old and keeping accounts + // We can expect the normal processes will keep them cleaned. + // If we included them here then ALL accounts in ALL ancient append vecs will be visited by clean each time. self.dirty_stores .insert((slot, store.append_vec_id()), store.clone()); - }); - } + } + }); } } } @@ -6871,13 +6958,18 @@ impl AccountsDb { /// normal code path returns the common cache path /// when called after a failure has been detected, redirect the cache storage to a separate folder for debugging later - fn get_cache_hash_data(&self, config: &CalcAccountsHashConfig<'_>) -> CacheHashData { + fn get_cache_hash_data( + &self, + config: &CalcAccountsHashConfig<'_>, + slot: Slot, + ) -> CacheHashData { if !config.store_detailed_debug_info_on_failure { CacheHashData::new(&self.accounts_hash_cache_path) } else { // this path executes when we are failing with a hash mismatch let mut new = self.accounts_hash_cache_path.clone(); new.push("failed_calculate_accounts_hash_cache"); + new.push(slot.to_string()); let _ = std::fs::remove_dir_all(&new); CacheHashData::new(&new) } @@ -6895,7 +6987,12 @@ impl AccountsDb { stats.oldest_root = storages.range().start; - self.mark_old_slots_as_dirty(storages, Some(config.epoch_schedule.slots_per_epoch)); + assert!( + !(config.store_detailed_debug_info_on_failure && config.use_write_cache), + "cannot accurately capture all data for debugging if accounts cache is being used" + ); + + self.mark_old_slots_as_dirty(storages, config.epoch_schedule.slots_per_epoch); let (num_hash_scan_passes, bins_per_pass) = Self::bins_per_pass(self.num_hash_scan_passes); let use_bg_thread_pool = config.use_bg_thread_pool; @@ -6903,7 +7000,7 @@ impl AccountsDb { let mut previous_pass = PreviousPass::default(); let mut final_result = (Hash::default(), 0); - let cache_hash_data = self.get_cache_hash_data(config); + let cache_hash_data = self.get_cache_hash_data(config, storages.max_slot_inclusive()); for pass in 0..num_hash_scan_passes { let bounds = Range { @@ -7562,17 +7659,15 @@ impl AccountsDb { stores .into_par_iter() .map(|store| { - let accounts = store.all_accounts(); let slot = store.slot(); - accounts - .into_iter() + store + .accounts + .account_iter() .map(|account| (slot, account.meta.pubkey)) - .collect::>() - }) - .reduce(HashSet::new, |mut reduced, store_pubkeys| { - reduced.extend(store_pubkeys); - reduced + .collect::>() }) + .flatten() + .collect::>() }) }; self.remove_dead_slots_metadata( @@ -7647,7 +7742,7 @@ impl AccountsDb { fn report_store_timings(&self) { if self.stats.last_store_report.should_update(1000) { - let (read_only_cache_hits, read_only_cache_misses) = + let (read_only_cache_hits, read_only_cache_misses, read_only_cache_evicts) = self.read_only_accounts_cache.get_and_reset_stats(); datapoint_info!( "accounts_db_store_timings", @@ -7714,6 +7809,11 @@ impl AccountsDb { read_only_cache_misses, i64 ), + ( + "read_only_accounts_cache_evicts", + read_only_cache_evicts, + i64 + ), ( "calc_stored_meta_us", self.stats.calc_stored_meta.swap(0, Ordering::Relaxed), @@ -10000,7 +10100,7 @@ pub mod tests { // overwrite old rooted account version; only the r_slot_0_stores.count() should be // decremented db.store_uncached(2, &[(&pubkeys[0], &account)]); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); { let slot_0_stores = &db.storage.get_slot_stores(0).unwrap(); let slot_1_stores = &db.storage.get_slot_stores(1).unwrap(); @@ -10439,7 +10539,7 @@ pub mod tests { //slot is gone accounts.print_accounts_stats("pre-clean"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.map.get(&0).is_none()); //new value is there @@ -10522,7 +10622,7 @@ pub mod tests { // Slot 1 should be removed, slot 0 cannot be removed because it still has // the latest update for pubkey 2 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.get_slot_stores(0).is_some()); assert!(accounts.storage.get_slot_stores(1).is_none()); @@ -10557,7 +10657,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Slots 0 and 1 should each have been cleaned because all of their // accounts are zero lamports assert!(accounts.storage.get_slot_stores(0).is_none()); @@ -10571,7 +10671,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0 assert!(accounts.storage.get_slot_stores(2).is_none()); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); @@ -10598,7 +10698,7 @@ pub mod tests { // Slot 0 should be removed, and // zero-lamport account should be cleaned - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.get_slot_stores(0).is_none()); assert!(accounts.storage.get_slot_stores(1).is_none()); @@ -10641,7 +10741,7 @@ pub mod tests { assert_eq!(accounts.alive_account_count_in_slot(0), 1); assert_eq!(accounts.alive_account_count_in_slot(1), 1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //now old state is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10675,7 +10775,7 @@ pub mod tests { accounts.print_accounts_stats(""); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //Old state behind zero-lamport account is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10792,7 +10892,7 @@ pub mod tests { accounts.account_indexes.keys = None; } - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //both zero lamport and normal accounts are cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10883,7 +10983,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); //now uncleaned roots are cleaned up - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); } @@ -10900,7 +11000,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); //now uncleaned roots are cleaned up - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); } @@ -10912,7 +11012,7 @@ pub mod tests { // Create 100 accounts in slot 0 create_account(&accounts, &mut pubkeys, 0, 100, 0, 0); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); check_accounts(&accounts, &pubkeys, 0, 100, 1); // do some updates to those accounts and re-check @@ -10948,7 +11048,7 @@ pub mod tests { // Modify first 20 of the accounts from slot 0 in slot 2 modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Overwrite account 31 from slot 0 with lamports=0 into slot 2. // Slot 2 should now have 20 + 1 = 21 accounts let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -10962,7 +11062,7 @@ pub mod tests { accounts.add_root(latest_slot); assert!(check_storage(&accounts, 2, 31)); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // The first 20 accounts of slot 0 have been updated in slot 2, as well as // accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and // slot 2 respectively), so only 78 accounts are left in slot 0's storage entries. @@ -11102,7 +11202,7 @@ pub mod tests { accounts.print_accounts_stats("pre_purge"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post_purge"); @@ -11167,7 +11267,7 @@ pub mod tests { info!("ancestors: {:?}", ancestors); let hash = accounts.update_accounts_hash_test(current_slot, &ancestors); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( accounts.update_accounts_hash_test(current_slot, &ancestors), @@ -11234,7 +11334,7 @@ pub mod tests { accounts.print_accounts_stats("accounts"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("accounts_post_purge"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); @@ -11320,7 +11420,7 @@ pub mod tests { fn test_accounts_purge_chained_purge_before_snapshot_restore() { solana_logger::setup(); with_chained_zero_lamport_accounts(|accounts, current_slot| { - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } @@ -11331,7 +11431,7 @@ pub mod tests { with_chained_zero_lamport_accounts(|accounts, current_slot| { let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_accounts_stats("after_reconstruct"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } @@ -12095,7 +12195,7 @@ pub mod tests { accounts.print_count_and_status("before reconstruct"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_count_and_status("before purge zero"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_count_and_status("after purge zero"); assert_load_account(&accounts, current_slot, pubkey, old_lamport); @@ -12156,7 +12256,7 @@ pub mod tests { accounts.print_accounts_stats("Post-B pre-clean"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); info!("post B"); accounts.print_accounts_stats("Post-B"); @@ -12196,7 +12296,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("Post-D clean"); @@ -12286,7 +12386,7 @@ pub mod tests { current_slot += 1; assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( // Removed one reference from the dead slot (reference only counted once @@ -12311,9 +12411,9 @@ pub mod tests { // If step C and step D should be purged, snapshot restore would cause // pubkey1 to be revived as the state of step A. // So, prevent that from happening by introducing refcount - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); info!("pubkey: {}", pubkey1); accounts.print_accounts_stats("pre_clean"); @@ -12328,10 +12428,10 @@ pub mod tests { accounts.add_root(current_slot); // Do clean - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // 2nd clean needed to clean-up pubkey1 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Ensure pubkey2 is cleaned from the index finally assert_not_load_account(&accounts, current_slot, pubkey1); @@ -12472,7 +12572,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -12561,7 +12661,7 @@ pub mod tests { } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -12846,7 +12946,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -12966,7 +13066,7 @@ pub mod tests { store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1]))); store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2]))); store_counts.insert(3, (1, HashSet::from_iter(vec![key2]))); - AccountsDb::calc_delete_dependencies(&purges, &mut store_counts); + AccountsDb::calc_delete_dependencies(&purges, &mut store_counts, None); let mut stores: Vec<_> = store_counts.keys().cloned().collect(); stores.sort_unstable(); for store in &stores { @@ -13056,7 +13156,7 @@ pub mod tests { accounts.flush_accounts_cache(true, None); // clear out the dirty keys - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // flush 1 accounts.get_accounts_delta_hash(1); @@ -13068,11 +13168,11 @@ pub mod tests { // clean to remove pubkey1 from 0, // shrink to shrink pubkey1 from 0 // then another clean to remove pubkey1 from slot 1 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.shrink_candidate_slots(); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post-clean"); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); @@ -13100,12 +13200,12 @@ pub mod tests { accounts.store_uncached(1, &[(key, &account)]); } accounts.add_root(1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.shrink_all_slots(false, None); // Clean again to flush the dirty stores // and allow them to be recycled in the next step - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post-shrink"); let num_stores = accounts.recycle_stores.read().unwrap().entry_count(); assert!(num_stores > 0); @@ -13425,9 +13525,9 @@ pub mod tests { db.add_root(0); db.add_root(1); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.add_root(2); assert_eq!(db.read_only_accounts_cache.cache_len(), 0); @@ -13475,7 +13575,7 @@ pub mod tests { db.add_root(1); // Clean should not remove anything yet as nothing has been flushed - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &Ancestors::default(), @@ -13491,7 +13591,7 @@ pub mod tests { // Flush, then clean again. Should not need another root to initiate the cleaning // because `accounts_index.uncleaned_roots` should be correct db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); assert!(db .do_load( &Ancestors::default(), @@ -13556,7 +13656,7 @@ pub mod tests { // Flush, then clean. Should not need another root to initiate the cleaning // because `accounts_index.uncleaned_roots` should be correct db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); // The `zero_lamport_account_key` is still alive in slot 1, so refcount for the // pubkey should be 2 @@ -13716,7 +13816,7 @@ pub mod tests { // Run clean, unrooted slot 1 should not be purged, and still readable from the cache, // because we're still doing a scan on it. - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &scan_ancestors, @@ -13730,7 +13830,7 @@ pub mod tests { // When the scan is over, clean should not panic and should not purge something // still in the cache. scan_tracker.exit().unwrap(); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &scan_ancestors, @@ -14332,7 +14432,7 @@ pub mod tests { // Checking that the uncleaned_pubkeys are not pre-maturely removed // such that when the slots are rooted, and can actually be cleaned, then the // delta keys are still there. - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean1"); // Check stores > 0 @@ -14347,12 +14447,12 @@ pub mod tests { db.store_uncached(2, &[(&account_key1, &account3)]); db.get_accounts_delta_hash(2); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean2"); // root slots 1 db.add_root(1); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean3"); @@ -14361,7 +14461,7 @@ pub mod tests { db.add_root(3); // Check that we can clean where max_root=3 and slot=2 is not rooted - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); assert!(db.uncleaned_pubkeys.is_empty()); @@ -15176,7 +15276,7 @@ pub mod tests { // The later rooted zero-lamport update to `shared_key` cannot be cleaned // because it is kept alive by the unrooted slot. - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts .accounts_index .get_account_read_entry(&shared_key) @@ -15186,7 +15286,7 @@ pub mod tests { accounts.purge_slot(slot0, 0, true); // Now clean should clean up the remaining key - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts .accounts_index .get_account_read_entry(&shared_key) diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index c36a95d3e0..251050816a 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -18,7 +18,6 @@ use { }, }, }; -pub const ZERO_RAW_LAMPORTS_SENTINEL: u64 = std::u64::MAX; pub const MERKLE_FANOUT: usize = 16; #[derive(Default, Debug)] @@ -844,7 +843,7 @@ impl AccountsHash { ); // add lamports, get hash as long as the lamports are > 0 - if item.lamports != ZERO_RAW_LAMPORTS_SENTINEL + if item.lamports != 0 && (!filler_accounts_enabled || !self.is_filler_account(&item.pubkey)) { overall_sum = Self::checked_cast_for_capitalization( @@ -1042,7 +1041,7 @@ pub mod tests { // 2nd key - zero lamports, so will be removed let key = Pubkey::new(&[12u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); let accounts_hash = AccountsHash::default(); @@ -1116,7 +1115,7 @@ pub mod tests { // 2nd key - zero lamports, so will be removed let key = Pubkey::new(&[12u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); let mut previous_pass = PreviousPass::default(); @@ -1395,10 +1394,13 @@ pub mod tests { #[test] fn test_accountsdb_de_dup_accounts_zero_chunks() { - let vec = [vec![vec![CalculateHashIntermediate::default()]]]; + let vec = [vec![vec![CalculateHashIntermediate { + lamports: 1, + ..CalculateHashIntermediate::default() + }]]]; let (hashes, lamports, _) = AccountsHash::default().de_dup_accounts_in_parallel(&vec, 0); assert_eq!(vec![&Hash::default()], hashes); - assert_eq!(lamports, 0); + assert_eq!(lamports, 1); } #[test] @@ -1653,7 +1655,7 @@ pub mod tests { assert_eq!(result, (vec![&val.hash], val.lamports as u64, 1)); // zero original lamports, higher version - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); // has to be after previous entry since account_maps are in slot order let vecs = vec![vec![account_maps.to_vec()]]; diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index a2bd30d35d..159d90f9db 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -412,7 +412,8 @@ impl PreAllocatedAccountMapEntry { account_info: T, storage: &Arc>, ) -> AccountMapEntry { - let ref_count = if account_info.is_cached() { 0 } else { 1 }; + let is_cached = account_info.is_cached(); + let ref_count = if is_cached { 0 } else { 1 }; let meta = AccountMapEntryMeta::new_dirty(storage); Arc::new(AccountMapEntryInner::new( vec![(slot, account_info)], @@ -1285,7 +1286,7 @@ impl AccountsIndex { &self, ancestors: Option<&Ancestors>, slice: SlotSlice, - max_root: Option, + max_root_inclusive: Option, ) -> Option { let mut current_max = 0; let mut rv = None; @@ -1300,11 +1301,11 @@ impl AccountsIndex { } } - let max_root = max_root.unwrap_or(Slot::MAX); + let max_root_inclusive = max_root_inclusive.unwrap_or(Slot::MAX); let mut tracker = None; for (i, (slot, _t)) in slice.iter().rev().enumerate() { - if (rv.is_none() || *slot > current_max) && *slot <= max_root { + if (rv.is_none() || *slot > current_max) && *slot <= max_root_inclusive { let lock = match tracker { Some(inner) => inner, None => self.roots_tracker.read().unwrap(), @@ -1690,14 +1691,16 @@ impl AccountsIndex { reclaims: &mut SlotList, max_clean_root: Option, ) { - let roots_tracker = &self.roots_tracker.read().unwrap(); - let newest_root_in_slot_list = Self::get_newest_root_in_slot_list( - &roots_tracker.alive_roots, - slot_list, - max_clean_root, - ); - let max_clean_root = - max_clean_root.unwrap_or_else(|| roots_tracker.alive_roots.max_inclusive()); + let newest_root_in_slot_list; + let max_clean_root = { + let roots_tracker = &self.roots_tracker.read().unwrap(); + newest_root_in_slot_list = Self::get_newest_root_in_slot_list( + &roots_tracker.alive_roots, + slot_list, + max_clean_root, + ); + max_clean_root.unwrap_or_else(|| roots_tracker.alive_roots.max_inclusive()) + }; slot_list.retain(|(slot, value)| { let should_purge = @@ -1714,11 +1717,11 @@ impl AccountsIndex { &self, pubkey: &Pubkey, reclaims: &mut SlotList, - max_clean_root: Option, + max_clean_root_inclusive: Option, ) { let mut is_slot_list_empty = false; self.slot_list_mut(pubkey, |slot_list| { - self.purge_older_root_entries(slot_list, reclaims, max_clean_root); + self.purge_older_root_entries(slot_list, reclaims, max_clean_root_inclusive); is_slot_list_empty = slot_list.is_empty(); }); diff --git a/runtime/src/accounts_index_storage.rs b/runtime/src/accounts_index_storage.rs index 16fe3b5c0c..155adade44 100644 --- a/runtime/src/accounts_index_storage.rs +++ b/runtime/src/accounts_index_storage.rs @@ -72,7 +72,7 @@ impl BgThreads { // note that using rayon here causes us to exhaust # rayon threads and many tests running in parallel deadlock Builder::new() - .name("solana-idx-flusher".to_string()) + .name(format!("solIdxFlusher{:02}", idx)) .spawn(move || { storage_.background(exit_, in_mem_, can_advance_age); }) diff --git a/runtime/src/ancestors.rs b/runtime/src/ancestors.rs index 42730efd98..9712f1fdbb 100644 --- a/runtime/src/ancestors.rs +++ b/runtime/src/ancestors.rs @@ -65,10 +65,6 @@ impl Ancestors { self.ancestors.get_all() } - pub fn get(&self, slot: &Slot) -> bool { - self.ancestors.contains(slot) - } - pub fn remove(&mut self, slot: &Slot) { self.ancestors.remove(slot); } @@ -182,10 +178,10 @@ pub mod tests { let key = item.0; min = std::cmp::min(min, *key); max = std::cmp::max(max, *key); - assert!(ancestors.get(key)); + assert!(ancestors.contains_key(key)); } for slot in min - 1..max + 2 { - assert_eq!(ancestors.get(&slot), hashset.contains(&slot)); + assert_eq!(ancestors.contains_key(&slot), hashset.contains(&slot)); } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 9e02c1ab5b..2d7040c14f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -111,8 +111,7 @@ use { epoch_schedule::EpochSchedule, feature, feature_set::{ - self, add_set_compute_unit_price_ix, default_units_per_instruction, - disable_fee_calculator, enable_early_verification_of_account_modifications, + self, disable_fee_calculator, enable_early_verification_of_account_modifications, use_default_units_in_fee_calculation, FeatureSet, }, fee::FeeStructure, @@ -122,7 +121,7 @@ use { hash::{extend_and_hash, hashv, Hash}, incinerator, inflation::Inflation, - instruction::CompiledInstruction, + instruction::{CompiledInstruction, TRANSACTION_LEVEL_STACK_HEIGHT}, lamports::LamportsError, message::{AccountKeys, SanitizedMessage}, native_loader, @@ -141,13 +140,11 @@ use { sysvar::{self, Sysvar, SysvarId}, timing::years_as_slots, transaction::{ - MessageHash, Result, SanitizedTransaction, Transaction, - TransactionError::{self}, - TransactionVerificationMode, VersionedTransaction, + MessageHash, Result, SanitizedTransaction, Transaction, TransactionError, + TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, }, transaction_context::{ - ExecutionRecord, InstructionTrace, TransactionAccount, TransactionContext, - TransactionReturnData, + ExecutionRecord, TransactionAccount, TransactionContext, TransactionReturnData, }, }, solana_stake_program::stake_state::{ @@ -242,6 +239,25 @@ impl RentDebit { } } +/// Incremental snapshots only calculate their accounts hash based on the account changes WITHIN the incremental slot range. +/// So, we need to keep track of the full snapshot expected accounts hash results. +/// We also need to keep track of the hash and capitalization specific to the incremental snapshot slot range. +/// The capitalization we calculate for the incremental slot will NOT be consistent with the bank's capitalization. +/// It is not feasible to calculate a capitalization delta that is correct given just incremental slots account data and the full snapshot's capitalization. +#[derive(Serialize, Deserialize, AbiExample, Clone, Debug, Default, PartialEq, Eq)] +pub struct BankIncrementalSnapshotPersistence { + /// slot of full snapshot + pub full_slot: Slot, + /// accounts hash from the full snapshot + pub full_hash: Hash, + /// capitalization from the full snapshot + pub full_capitalization: u64, + /// hash of the accounts in the incremental snapshot slot range, including zero-lamport accounts + pub incremental_hash: Hash, + /// capitalization of the accounts in the incremental snapshot slot range + pub incremental_capitalization: u64, +} + #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct RentDebits(HashMap); impl RentDebits { @@ -817,40 +833,50 @@ pub type InnerInstructions = Vec; /// a transaction pub type InnerInstructionsList = Vec; -/// Convert from an InstructionTrace to InnerInstructionsList +/// Extract the InnerInstructionsList from a TransactionContext pub fn inner_instructions_list_from_instruction_trace( - instruction_trace: &InstructionTrace, + transaction_context: &TransactionContext, ) -> InnerInstructionsList { - instruction_trace - .iter() - .map(|inner_instructions_trace| { - inner_instructions_trace - .iter() - .skip(1) - .map(|instruction_context| { - CompiledInstruction::new_from_raw_parts( - instruction_context - .get_index_of_program_account_in_transaction( - instruction_context - .get_number_of_program_accounts() - .saturating_sub(1), - ) - .unwrap_or_default() as u8, - instruction_context.get_instruction_data().to_vec(), - (0..instruction_context.get_number_of_instruction_accounts()) - .map(|instruction_account_index| { - instruction_context - .get_index_of_instruction_account_in_transaction( - instruction_account_index, - ) - .unwrap_or_default() as u8 - }) - .collect(), - ) - }) - .collect() - }) - .collect() + debug_assert!(transaction_context + .get_instruction_context_at_index_in_trace(0) + .map(|instruction_context| instruction_context.get_stack_height() + == TRANSACTION_LEVEL_STACK_HEIGHT) + .unwrap_or(true)); + let mut outer_instructions = Vec::new(); + for index_in_trace in 0..transaction_context.get_instruction_trace_length() { + if let Ok(instruction_context) = + transaction_context.get_instruction_context_at_index_in_trace(index_in_trace) + { + if instruction_context.get_stack_height() == TRANSACTION_LEVEL_STACK_HEIGHT { + outer_instructions.push(Vec::new()); + } else if let Some(inner_instructions) = outer_instructions.last_mut() { + inner_instructions.push(CompiledInstruction::new_from_raw_parts( + instruction_context + .get_index_of_program_account_in_transaction( + instruction_context + .get_number_of_program_accounts() + .saturating_sub(1), + ) + .unwrap_or_default() as u8, + instruction_context.get_instruction_data().to_vec(), + (0..instruction_context.get_number_of_instruction_accounts()) + .map(|instruction_account_index| { + instruction_context + .get_index_of_instruction_account_in_transaction( + instruction_account_index, + ) + .unwrap_or_default() as u8 + }) + .collect(), + )); + } else { + debug_assert!(false); + } + } else { + debug_assert!(false); + } + } + outer_instructions } /// A list of log messages emitted during a transaction @@ -1050,6 +1076,7 @@ pub struct BankFieldsToDeserialize { pub(crate) epoch_stakes: HashMap, pub(crate) is_delta: bool, pub(crate) accounts_data_len: u64, + pub(crate) incremental_snapshot_persistence: Option, } // Bank's common fields shared by all supported snapshot versions for serialization. @@ -1157,6 +1184,7 @@ impl PartialEq for Bank { accounts_data_size_delta_on_chain: _, accounts_data_size_delta_off_chain: _, fee_structure: _, + incremental_snapshot_persistence: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this ParitalEq is accordingly updated. @@ -1410,6 +1438,8 @@ pub struct Bank { /// Transaction fee structure pub fee_structure: FeeStructure, + + pub incremental_snapshot_persistence: Option, } struct VoteWithStakeDelegations { @@ -1540,6 +1570,7 @@ impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { + incremental_snapshot_persistence: None, rewrites_skipped_this_slot: Rewrites::default(), rc: BankRc::new(accounts, Slot::default()), status_cache: Arc::>::default(), @@ -1839,6 +1870,7 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Bank { + incremental_snapshot_persistence: None, rewrites_skipped_this_slot: Rewrites::default(), rc, status_cache, @@ -2200,6 +2232,7 @@ impl Bank { } let feature_set = new(); let mut bank = Self { + incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rewrites_skipped_this_slot: Rewrites::default(), rc: bank_rc, status_cache: new(), @@ -2333,7 +2366,7 @@ impl Bank { hash: *self.hash.read().unwrap(), parent_hash: self.parent_hash, parent_slot: self.parent_slot, - hard_forks: &*self.hard_forks, + hard_forks: &self.hard_forks, transaction_count: self.transaction_count.load(Relaxed), tick_height: self.tick_height.load(Relaxed), signature_count: self.signature_count.load(Relaxed), @@ -2775,13 +2808,11 @@ impl Bank { "distributed inflation: {} (rounded from: {})", validator_rewards_paid, validator_rewards ); - // TODO: staked_nodes forces an eager stakes calculation. remove it! - let (num_stake_accounts, num_vote_accounts, num_staked_nodes) = { + let (num_stake_accounts, num_vote_accounts) = { let stakes = self.stakes_cache.stakes(); ( stakes.stake_delegations().len(), stakes.vote_accounts().len(), - stakes.staked_nodes().len(), ) }; self.capitalization @@ -2808,7 +2839,6 @@ impl Bank { ("post_capitalization", self.capitalization(), i64), ("num_stake_accounts", num_stake_accounts as i64, i64), ("num_vote_accounts", num_vote_accounts as i64, i64), - ("num_staked_nodes", num_staked_nodes as i64, i64) ); } @@ -2846,9 +2876,26 @@ impl Bank { None => { invalid_stake_keys .insert(*stake_pubkey, InvalidCacheEntryReason::Missing); + invalid_cached_stake_accounts.fetch_add(1, Relaxed); return; } }; + if cached_stake_account.account() != &stake_account { + invalid_cached_stake_accounts.fetch_add(1, Relaxed); + let cached_stake_account = cached_stake_account.account(); + if cached_stake_account.lamports() == stake_account.lamports() + && cached_stake_account.data() == stake_account.data() + && cached_stake_account.owner() == stake_account.owner() + && cached_stake_account.executable() == stake_account.executable() + { + invalid_cached_stake_accounts_rent_epoch.fetch_add(1, Relaxed); + } else { + debug!( + "cached stake account mismatch: {}: {:?}, {:?}", + stake_pubkey, stake_account, cached_stake_account + ); + } + } let stake_account = match StakeAccount::<()>::try_from(stake_account) { Ok(stake_account) => stake_account, Err(stake_account::Error::InvalidOwner { .. }) => { @@ -2871,33 +2918,6 @@ impl Bank { return; } }; - if cached_stake_account != &stake_account { - invalid_cached_stake_accounts.fetch_add(1, Relaxed); - let mut cached_account = cached_stake_account.account().clone(); - // We could have collected rent on the loaded account already in this new epoch (we could be at partition_index 12, for example). - // So, we may need to adjust the rent_epoch of the cached account. So, update rent_epoch and compare just the accounts. - ExpectedRentCollection::maybe_update_rent_epoch_on_load( - &mut cached_account, - &SlotInfoInEpoch::new_small(self.slot()), - &SlotInfoInEpoch::new_small(self.slot()), - self.epoch_schedule(), - self.rent_collector(), - stake_pubkey, - &self.rewrites_skipped_this_slot, - ); - if &cached_account != stake_account.account() { - info!( - "cached stake account mismatch: {}: {:?}, {:?}", - stake_pubkey, - cached_account, - stake_account.account() - ); - } else { - // track how many of 'invalid_cached_stake_accounts' were due to rent_epoch changes - // subtract these to find real invalid cached accounts - invalid_cached_stake_accounts_rent_epoch.fetch_add(1, Relaxed); - } - } let stake_delegation = (*stake_pubkey, stake_account); let mut vote_delegations = if let Some(vote_delegations) = vote_with_stake_delegations_map.get_mut(vote_pubkey) @@ -2907,16 +2927,12 @@ impl Bank { let cached_vote_account = cached_vote_accounts.get(vote_pubkey); let vote_account = match self.get_account_with_fixed_root(vote_pubkey) { Some(vote_account) => { - match cached_vote_account { - Some(cached_vote_account) - if cached_vote_account == &vote_account => {} - _ => { - invalid_cached_vote_accounts.fetch_add(1, Relaxed); - } - }; if vote_account.owner() != &solana_vote_program::id() { invalid_vote_keys .insert(*vote_pubkey, InvalidCacheEntryReason::WrongOwner); + if cached_vote_account.is_some() { + invalid_cached_vote_accounts.fetch_add(1, Relaxed); + } return; } vote_account @@ -2938,9 +2954,18 @@ impl Bank { } else { invalid_vote_keys .insert(*vote_pubkey, InvalidCacheEntryReason::BadState); + if cached_vote_account.is_some() { + invalid_cached_vote_accounts.fetch_add(1, Relaxed); + } return; }; - + match cached_vote_account { + Some(cached_vote_account) + if cached_vote_account.account() == &vote_account => {} + _ => { + invalid_cached_vote_accounts.fetch_add(1, Relaxed); + } + }; vote_with_stake_delegations_map .entry(*vote_pubkey) .or_insert_with(|| VoteWithStakeDelegations { @@ -3106,7 +3131,7 @@ impl Bank { thread_pool: &ThreadPool, metrics: &mut RewardsMetrics, update_rewards_from_cached_accounts: bool, - ) -> f64 { + ) { let stake_history = self.stakes_cache.stakes().history().clone(); let vote_with_stake_delegations_map = { let mut m = Measure::start("load_vote_and_stake_accounts_us"); @@ -3172,7 +3197,7 @@ impl Bank { metrics.calculate_points_us.fetch_add(m.as_us(), Relaxed); if points == 0 { - return 0.0; + return; } // pay according to point value @@ -3256,17 +3281,29 @@ impl Bank { m.stop(); metrics.redeem_rewards_us += m.as_us(); + self.store_stake_accounts(&stake_rewards, metrics); + let vote_rewards = self.store_vote_accounts(vote_account_rewards, metrics); + self.update_reward_history(stake_rewards, vote_rewards); + } + + fn store_stake_accounts(&self, stake_rewards: &[StakeReward], metrics: &mut RewardsMetrics) { // store stake account even if stakers_reward is 0 // because credits observed has changed let mut m = Measure::start("store_stake_account"); - self.store_accounts((self.slot(), &stake_rewards[..])); + self.store_accounts((self.slot(), stake_rewards)); m.stop(); metrics .store_stake_accounts_us .fetch_add(m.as_us(), Relaxed); + } + fn store_vote_accounts( + &self, + vote_account_rewards: DashMap, + metrics: &mut RewardsMetrics, + ) -> Vec<(Pubkey, RewardInfo)> { let mut m = Measure::start("store_vote_accounts"); - let mut vote_rewards = vote_account_rewards + let vote_rewards = vote_account_rewards .into_iter() .filter_map( |(vote_pubkey, (mut vote_account, commission, vote_rewards, vote_needs_store))| { @@ -3298,19 +3335,22 @@ impl Bank { m.stop(); metrics.store_vote_accounts_us.fetch_add(m.as_us(), Relaxed); + vote_rewards + } + fn update_reward_history( + &self, + stake_rewards: Vec, + mut vote_rewards: Vec<(Pubkey, RewardInfo)>, + ) { let additional_reserve = stake_rewards.len() + vote_rewards.len(); - { - let mut rewards = self.rewards.write().unwrap(); - rewards.reserve(additional_reserve); - rewards.append(&mut vote_rewards); - stake_rewards - .into_iter() - .filter(|x| x.get_stake_reward() > 0) - .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); - } - - point_value.rewards as f64 / point_value.points as f64 + let mut rewards = self.rewards.write().unwrap(); + rewards.reserve(additional_reserve); + rewards.append(&mut vote_rewards); + stake_rewards + .into_iter() + .filter(|x| x.get_stake_reward() > 0) + .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); } fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { @@ -3341,7 +3381,7 @@ impl Bank { let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().ok()?; let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?; - (slot_delta <= slots_per_epoch).then(|| { + (slot_delta <= slots_per_epoch).then_some({ ( *pubkey, ( @@ -3756,8 +3796,6 @@ impl Bank { message, lamports_per_signature, &self.fee_structure, - self.feature_set - .is_active(&add_set_compute_unit_price_ix::id()), self.feature_set .is_active(&use_default_units_in_fee_calculation::id()), )) @@ -3799,8 +3837,6 @@ impl Bank { message, lamports_per_signature, &self.fee_structure, - self.feature_set - .is_active(&add_set_compute_unit_price_ix::id()), self.feature_set .is_active(&use_default_units_in_fee_calculation::id()), ) @@ -3931,13 +3967,28 @@ impl Bank { } } + /// Get the max number of accounts that a transaction may lock in this block + pub fn get_transaction_account_lock_limit(&self) -> usize { + if let Some(transaction_account_lock_limit) = + self.runtime_config.transaction_account_lock_limit + { + transaction_account_lock_limit + } else { + MAX_TX_ACCOUNT_LOCKS + } + } + /// Prepare a transaction batch from a list of legacy transactions. Used for tests only. pub fn prepare_batch_for_tests(&self, txs: Vec) -> TransactionBatch { + let transaction_account_lock_limit = self.get_transaction_account_lock_limit(); let sanitized_txs = txs .into_iter() .map(SanitizedTransaction::from_transaction_for_tests) .collect::>(); - let lock_results = self.rc.accounts.lock_accounts(sanitized_txs.iter()); + let lock_results = self + .rc + .accounts + .lock_accounts(sanitized_txs.iter(), transaction_account_lock_limit); TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) } @@ -3957,7 +4008,11 @@ impl Bank { ) }) .collect::>>()?; - let lock_results = self.rc.accounts.lock_accounts(sanitized_txs.iter()); + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + let lock_results = self + .rc + .accounts + .lock_accounts(sanitized_txs.iter(), tx_account_lock_limit); Ok(TransactionBatch::new( lock_results, self, @@ -3970,7 +4025,11 @@ impl Bank { &'a self, txs: &'b [SanitizedTransaction], ) -> TransactionBatch<'a, 'b> { - let lock_results = self.rc.accounts.lock_accounts(txs.iter()); + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + let lock_results = self + .rc + .accounts + .lock_accounts(txs.iter(), tx_account_lock_limit); TransactionBatch::new(lock_results, self, Cow::Borrowed(txs)) } @@ -3983,10 +4042,11 @@ impl Bank { additional_read_locks: &HashSet, additional_write_locks: &HashSet, ) -> TransactionBatch<'a, 'b> { - // this lock_results could be: Ok, AccountInUse, AccountLoadedTwice or TooManyAccountLocks + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_results = self.rc.accounts.lock_accounts_with_results( transactions.iter(), transaction_results, + tx_account_lock_limit, additional_read_locks, additional_write_locks, ); @@ -4002,19 +4062,24 @@ impl Bank { account_locks_override: Option>, ) -> TransactionBatch<'a, 'b> { // this lock_results could be: Ok, AccountInUse, BundleNotContinuous, AccountLoadedTwice, or TooManyAccountLocks - let lock_results = self - .rc - .accounts - .lock_accounts_sequential_with_results(transactions.iter(), account_locks_override); + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + let lock_results = self.rc.accounts.lock_accounts_sequential_with_results( + transactions.iter(), + tx_account_lock_limit, + account_locks_override, + ); TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) } /// Prepare a transaction batch without locking accounts for transaction simulation. - pub(crate) fn prepare_simulation_batch<'a>( - &'a self, + pub(crate) fn prepare_simulation_batch( + &self, transaction: SanitizedTransaction, - ) -> TransactionBatch<'a, '_> { - let lock_result = transaction.get_account_locks().map(|_| ()); + ) -> TransactionBatch<'_, '_> { + let tx_account_lock_limit = self.get_transaction_account_lock_limit(); + let lock_result = transaction + .get_account_locks(tx_account_lock_limit) + .map(|_| ()); let mut batch = TransactionBatch::new(vec![lock_result], self, Cow::Owned(vec![transaction])); batch.set_needs_unlock(false); @@ -4524,6 +4589,29 @@ impl Bank { balances } + pub fn collect_balances_with_cache( + &self, + batch: &TransactionBatch, + account_overrides: Option<&AccountOverrides>, + ) -> TransactionBalances { + let mut balances: TransactionBalances = vec![]; + for transaction in batch.sanitized_transactions() { + let mut transaction_balances: Vec = vec![]; + for account_key in transaction.message().account_keys().iter() { + let balance = match account_overrides { + None => self.get_balance(account_key), + Some(overrides) => match overrides.get(account_key) { + None => self.get_balance(account_key), + Some(account_data) => account_data.lamports(), + }, + }; + transaction_balances.push(balance); + } + balances.push(transaction_balances); + } + balances + } + /// Get any cached executors needed by the transaction fn get_executors(&self, accounts: &[TransactionAccount]) -> Rc> { let executable_keys: Vec<_> = accounts @@ -4662,7 +4750,7 @@ impl Bank { self.feature_set.clone(), compute_budget, timings, - &*self.sysvar_cache.read().unwrap(), + &self.sysvar_cache.read().unwrap(), blockhash, lamports_per_signature, prev_accounts_data_len, @@ -4721,13 +4809,18 @@ impl Bank { .ok() }); + let inner_instructions = if enable_cpi_recording { + Some(inner_instructions_list_from_instruction_trace( + &transaction_context, + )) + } else { + None + }; + let ExecutionRecord { accounts, - instruction_trace, mut return_data, - changed_account_count, - total_size_of_all_accounts, - total_size_of_touched_accounts, + touched_account_count, accounts_resize_delta, } = transaction_context.into(); loaded_transaction.accounts = accounts; @@ -4739,26 +4832,10 @@ impl Bank { timings.details.total_account_count, loaded_transaction.accounts.len() as u64 ); - saturating_add_assign!(timings.details.changed_account_count, changed_account_count); - saturating_add_assign!( - timings.details.total_data_size, - total_size_of_all_accounts as usize - ); - saturating_add_assign!( - timings.details.data_size_changed, - total_size_of_touched_accounts as usize - ); + saturating_add_assign!(timings.details.changed_account_count, touched_account_count); accounts_data_len_delta = status.as_ref().map_or(0, |_| accounts_resize_delta); } - let inner_instructions = if enable_cpi_recording { - Some(inner_instructions_list_from_instruction_trace( - &instruction_trace, - )) - } else { - None - }; - let return_data = if enable_return_data_recording { if let Some(end_index) = return_data.data.iter().rposition(|&x| x != 0) { let end_index = end_index.saturating_add(1); @@ -4852,40 +4929,30 @@ impl Bank { .map(|(accs, tx)| match accs { (Err(e), _nonce) => TransactionExecutionResult::NotExecuted(e.clone()), (Ok(loaded_transaction), nonce) => { - let mut feature_set_clone_time = Measure::start("feature_set_clone"); - let feature_set = self.feature_set.clone(); - feature_set_clone_time.stop(); - saturating_add_assign!( - timings.execute_accessories.feature_set_clone_us, - feature_set_clone_time.as_us() - ); - - let compute_budget = - if let Some(compute_budget) = self.runtime_config.compute_budget { - compute_budget - } else { - let mut compute_budget = - ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); - - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let process_transaction_result = compute_budget.process_instructions( - tx.message().program_instructions_iter(), - feature_set.is_active(&default_units_per_instruction::id()), - feature_set.is_active(&add_set_compute_unit_price_ix::id()), - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = process_transaction_result { - return TransactionExecutionResult::NotExecuted(err); - } - compute_budget - }; + let compute_budget = if let Some(compute_budget) = + self.runtime_config.compute_budget + { + compute_budget + } else { + let mut compute_budget = + ComputeBudget::new(compute_budget::MAX_COMPUTE_UNIT_LIMIT as u64); + + let mut compute_budget_process_transaction_time = + Measure::start("compute_budget_process_transaction_time"); + let process_transaction_result = compute_budget + .process_instructions(tx.message().program_instructions_iter(), true); + compute_budget_process_transaction_time.stop(); + saturating_add_assign!( + timings + .execute_accessories + .compute_budget_process_transaction_us, + compute_budget_process_transaction_time.as_us() + ); + if let Err(err) = process_transaction_result { + return TransactionExecutionResult::NotExecuted(err); + } + compute_budget + }; self.execute_loaded_transaction( tx, @@ -5157,7 +5224,6 @@ impl Bank { message: &SanitizedMessage, lamports_per_signature: u64, fee_structure: &FeeStructure, - support_set_compute_unit_price_ix: bool, use_default_units_per_instruction: bool, ) -> u64 { // Fee based on compute units and signatures @@ -5174,7 +5240,6 @@ impl Bank { .process_instructions( message.program_instructions_iter(), use_default_units_per_instruction, - support_set_compute_unit_price_ix, ) .unwrap_or_default(); let prioritization_fee = prioritization_fee_details.get_fee(); @@ -5238,8 +5303,6 @@ impl Bank { tx.message(), lamports_per_signature, &self.fee_structure, - self.feature_set - .is_active(&add_set_compute_unit_price_ix::id()), self.feature_set .is_active(&use_default_units_in_fee_calculation::id()), ); @@ -6749,11 +6812,11 @@ impl Bank { } #[cfg(test)] - pub fn flush_accounts_cache_slot(&self) { + pub fn flush_accounts_cache_slot_for_tests(&self) { self.rc .accounts .accounts_db - .flush_accounts_cache_slot(self.slot()) + .flush_accounts_cache_slot_for_tests(self.slot()) } pub fn expire_old_recycle_stores(&self) { @@ -7478,31 +7541,25 @@ impl Bank { &self, test_hash_calculation: bool, accounts_db_skip_shrink: bool, - last_full_snapshot_slot: Option, + last_full_snapshot_slot: Slot, ) -> bool { let mut clean_time = Measure::start("clean"); - if !accounts_db_skip_shrink { - if self.slot() > 0 { - info!("cleaning.."); - self.clean_accounts(true, true, last_full_snapshot_slot); - } - } else { - // if we are skipping shrink, there should be no uncleaned_roots deferred to later - assert_eq!( - self.rc - .accounts - .accounts_db - .accounts_index - .uncleaned_roots_len(), - 0 - ); + if !accounts_db_skip_shrink && self.slot() > 0 { + info!("cleaning.."); + self.rc + .accounts + .accounts_db + .clean_accounts(None, true, Some(last_full_snapshot_slot)); } clean_time.stop(); let mut shrink_all_slots_time = Measure::start("shrink_all_slots"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("shrinking.."); - self.shrink_all_slots(true, last_full_snapshot_slot); + self.rc + .accounts + .accounts_db + .shrink_all_slots(true, Some(last_full_snapshot_slot)); } shrink_all_slots_time.stop(); @@ -7776,35 +7833,26 @@ impl Bank { debug!("Added precompiled program {:?}", program_id); } - pub fn clean_accounts( - &self, - skip_last: bool, - is_startup: bool, - last_full_snapshot_slot: Option, - ) { + // Call AccountsDb::clean_accounts() + // + // This fn is meant to be called by the snapshot handler in Accounts Background Service. If + // calling from elsewhere, ensure the same invariants hold/expectations are met. + pub(crate) fn clean_accounts(&self, last_full_snapshot_slot: Option) { // Don't clean the slot we're snapshotting because it may have zero-lamport // accounts that were included in the bank delta hash when the bank was frozen, // and if we clean them here, any newly created snapshot's hash for this bank // may not match the frozen hash. // - // So when we're snapshotting, set `skip_last` to true so the highest slot to clean is - // lowered by one. - let highest_slot_to_clean = skip_last.then(|| self.slot().saturating_sub(1)); + // So when we're snapshotting, the highest slot to clean is lowered by one. + let highest_slot_to_clean = self.slot().saturating_sub(1); self.rc.accounts.accounts_db.clean_accounts( - highest_slot_to_clean, - is_startup, + Some(highest_slot_to_clean), + false, last_full_snapshot_slot, ); } - pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option) { - self.rc - .accounts - .accounts_db - .shrink_all_slots(is_startup, last_full_snapshot_slot); - } - pub fn print_accounts_stats(&self) { self.rc.accounts.accounts_db.print_accounts_stats(""); } @@ -7966,7 +8014,6 @@ impl Bank { ); self.reconfigure_token2_native_mint(); } - self.ensure_no_storage_rewards_pool(); if new_feature_activations.contains(&feature_set::cap_accounts_data_len::id()) { const ACCOUNTS_DATA_LEN: u64 = 50_000_000_000; @@ -8143,36 +8190,6 @@ impl Bank { } } - fn ensure_no_storage_rewards_pool(&mut self) { - let purge_window_epoch = match self.cluster_type() { - ClusterType::Development => false, - // never do this for devnet; we're pristine here. :) - ClusterType::Devnet => false, - // schedule to remove at testnet/tds - ClusterType::Testnet => self.epoch() == 93, - // never do this for stable; we're pristine here. :) - ClusterType::MainnetBeta => false, - }; - - if purge_window_epoch { - for reward_pubkey in self.rewards_pool_pubkeys.iter() { - if let Some(mut reward_account) = self.get_account_with_fixed_root(reward_pubkey) { - if reward_account.lamports() == u64::MAX { - reward_account.set_lamports(0); - self.store_account(reward_pubkey, &reward_account); - // Adjust capitalization.... it has been wrapping, reducing the real capitalization by 1-lamport - self.capitalization.fetch_add(1, Relaxed); - info!( - "purged rewards pool account: {}, new capitalization: {}", - reward_pubkey, - self.capitalization() - ); - } - }; - } - } - } - /// Get all the accounts for this bank and calculate stats pub fn get_total_accounts_stats(&self) -> ScanResult { let accounts = self.get_all_accounts_with_modified_slots()?; @@ -8421,7 +8438,6 @@ pub(crate) mod tests { system_program, timing::duration_as_s, transaction::MAX_TX_ACCOUNT_LOCKS, - transaction_context::InstructionContext, }, solana_vote_program::{ vote_instruction, @@ -8582,6 +8598,12 @@ pub(crate) mod tests { assert_eq!(actual_next_start, expected_next_start); } + impl Bank { + fn clean_accounts_for_tests(&self) { + self.rc.accounts.accounts_db.clean_accounts_for_tests() + } + } + #[test] fn test_nonce_info() { let lamports_per_signature = 42; @@ -11274,13 +11296,13 @@ pub(crate) mod tests { bank0.store_account_and_update_capitalization(&stake_id, &stake_account); // generate some rewards - let mut vote_state = Some(VoteState::from(&vote_account).unwrap()); + let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); for i in 0..MAX_LOCKOUT_HISTORY + 42 { if let Some(v) = vote_state.as_mut() { - v.process_slot_vote_unchecked(i as u64) + vote_state::process_slot_vote_unchecked(v, i as u64) } let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); - VoteState::to(&versioned, &mut vote_account).unwrap(); + vote_state::to(&versioned, &mut vote_account).unwrap(); bank0.store_account_and_update_capitalization(&vote_id, &vote_account); match versioned { VoteStateVersions::Current(v) => { @@ -11395,13 +11417,13 @@ pub(crate) mod tests { bank.store_account_and_update_capitalization(&stake_id2, &stake_account2); // generate some rewards - let mut vote_state = Some(VoteState::from(&vote_account).unwrap()); + let mut vote_state = Some(vote_state::from(&vote_account).unwrap()); for i in 0..MAX_LOCKOUT_HISTORY + 42 { if let Some(v) = vote_state.as_mut() { - v.process_slot_vote_unchecked(i as u64) + vote_state::process_slot_vote_unchecked(v, i as u64) } let versioned = VoteStateVersions::Current(Box::new(vote_state.take().unwrap())); - VoteState::to(&versioned, &mut vote_account).unwrap(); + vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account_and_update_capitalization(&vote_id, &vote_account); match versioned { VoteStateVersions::Current(v) => { @@ -11487,7 +11509,7 @@ pub(crate) mod tests { bank.squash(); bank.force_flush_accounts_cache(); let hash = bank.update_accounts_hash(); - bank.clean_accounts(false, false, None); + bank.clean_accounts_for_tests(); assert_eq!(bank.update_accounts_hash(), hash); let bank0 = Arc::new(new_from_parent(&bank)); @@ -11510,7 +11532,7 @@ pub(crate) mod tests { info!("bank0 purge"); let hash = bank0.update_accounts_hash(); - bank0.clean_accounts(false, false, None); + bank0.clean_accounts_for_tests(); assert_eq!(bank0.update_accounts_hash(), hash); assert_eq!( @@ -11520,7 +11542,7 @@ pub(crate) mod tests { assert_eq!(bank1.get_account(&keypair.pubkey()), None); info!("bank1 purge"); - bank1.clean_accounts(false, false, None); + bank1.clean_accounts_for_tests(); assert_eq!( bank0.get_account(&keypair.pubkey()).unwrap().lamports(), @@ -11544,7 +11566,7 @@ pub(crate) mod tests { assert_eq!(bank0.get_account(&keypair.pubkey()), None); assert_eq!(bank1.get_account(&keypair.pubkey()), None); bank1.force_flush_accounts_cache(); - bank1.clean_accounts(false, false, None); + bank1.clean_accounts_for_tests(); assert!(bank1.verify_bank_hash(VerifyBankHash::default_for_test())); } @@ -11953,7 +11975,6 @@ pub(crate) mod tests { .lamports_per_signature, &FeeStructure::default(), true, - true, ); let (expected_fee_collected, expected_fee_burned) = @@ -12135,7 +12156,6 @@ pub(crate) mod tests { cheap_lamports_per_signature, &FeeStructure::default(), true, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -12153,7 +12173,6 @@ pub(crate) mod tests { expensive_lamports_per_signature, &FeeStructure::default(), true, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -12269,7 +12288,6 @@ pub(crate) mod tests { .lamports_per_signature, &FeeStructure::default(), true, - true, ) * 2 ) .0 @@ -12714,11 +12732,11 @@ pub(crate) mod tests { .unwrap(); bank.freeze(); bank.update_accounts_hash(); - assert!(bank.verify_snapshot_bank(true, false, None)); + assert!(bank.verify_snapshot_bank(true, false, bank.slot())); // tamper the bank after freeze! bank.increment_signature_count(1); - assert!(!bank.verify_snapshot_bank(true, false, None)); + assert!(!bank.verify_snapshot_bank(true, false, bank.slot())); } // Test that two bank forks with the same accounts should not hash to the same value. @@ -14034,7 +14052,7 @@ pub(crate) mod tests { #[cfg(not(target_os = "linux"))] { error!("{} banks, sleeping for 5 sec", num_banks); - std::thread::sleep(Duration::new(5, 0)); + std::thread::sleep(Duration::from_secs(5)); } } } @@ -15631,25 +15649,25 @@ pub(crate) mod tests { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "9tLrxkBoNE7zEUZ2g72ZwE4fTfhUQnhC8A4Xt4EmYhP1" + "5gY6TCgB9NymbbxgFgAjvYLpXjyXiVyyruS1aEwbWKLK" ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "AxphC8xDj9gmFosor5gyiovNvPVMydJCFRUTxn2wFiQf" + "6uJ5C4QDXWCN39EjJ5Frcz73nnS2jMJ55KgkQff12Fqp" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "4vZCSbBuL8xjE43rCy9Cm3dCh1BMj45heMiMb6n6qgzA" + "4u8bxZRLYdQBkWRBwmpcwcQVMCJoEpzY7hCuAzxr3kCe" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "46LUpeBdJuisnfwgYisvh4x7jnxzBaLfHF614GtcTs59" + "4c5F8UbcDD8FM7qXcfv6BPPo6nHNYJQmN5gHiCMTdEzX" ); break; } @@ -15761,7 +15779,7 @@ pub(crate) mod tests { bank1.deposit(&pubkey0, some_lamports).unwrap(); goto_end_of_slot(Arc::::get_mut(&mut bank1).unwrap()); bank1.freeze(); - bank1.flush_accounts_cache_slot(); + bank1.flush_accounts_cache_slot_for_tests(); bank1.print_accounts_stats(); @@ -15780,7 +15798,7 @@ pub(crate) mod tests { // Clean accounts, which should add earlier slots to the shrink // candidate set - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); let mut bank3 = Arc::new(Bank::new_from_parent(&bank2, &Pubkey::default(), 3)); bank3.deposit(&pubkey1, some_lamports + 1).unwrap(); @@ -15789,7 +15807,7 @@ pub(crate) mod tests { bank3.squash(); bank3.force_flush_accounts_cache(); - bank3.clean_accounts(false, false, None); + bank3.clean_accounts_for_tests(); assert_eq!( bank3.rc.accounts.accounts_db.ref_count_for_pubkey(&pubkey0), 2 @@ -15858,7 +15876,7 @@ pub(crate) mod tests { // Clean accounts, which should add earlier slots to the shrink // candidate set - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); // Slots 0 and 1 should be candidates for shrinking, but slot 2 // shouldn't because none of its accounts are outdated by a later @@ -15877,7 +15895,7 @@ pub(crate) mod tests { // No more slots should be shrunk assert_eq!(bank2.shrink_candidate_slots(), 0); // alive_counts represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(alive_counts, vec![9, 1, 7]); + assert_eq!(alive_counts, vec![11, 1, 7]); } #[test] @@ -15912,7 +15930,7 @@ pub(crate) mod tests { goto_end_of_slot(Arc::::get_mut(&mut bank).unwrap()); bank.squash(); - bank.clean_accounts(false, false, None); + bank.clean_accounts_for_tests(); let force_to_return_alive_account = 0; assert_eq!( bank.process_stale_slot_with_budget(22, force_to_return_alive_account), @@ -15923,7 +15941,7 @@ pub(crate) mod tests { .map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account)) .sum(); // consumed_budgets represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(consumed_budgets, 10); + assert_eq!(consumed_budgets, 12); } #[test] @@ -16282,57 +16300,6 @@ pub(crate) mod tests { assert_eq!(native_mint_account.owner(), &inline_spl_token::id()); } - #[test] - fn test_ensure_no_storage_rewards_pool() { - solana_logger::setup(); - - let mut genesis_config = - create_genesis_config_with_leader(5, &solana_sdk::pubkey::new_rand(), 0).genesis_config; - - // Testnet - Storage rewards pool is purged at epoch 93 - // Also this is with bad capitalization - genesis_config.cluster_type = ClusterType::Testnet; - genesis_config.inflation = Inflation::default(); - let reward_pubkey = solana_sdk::pubkey::new_rand(); - genesis_config.rewards_pools.insert( - reward_pubkey, - Account::new(u64::MAX, 0, &solana_sdk::pubkey::new_rand()), - ); - let bank0 = Bank::new_for_tests(&genesis_config); - // because capitalization has been reset with bogus capitalization calculation allowing overflows, - // deliberately substract 1 lamport to simulate it - bank0.capitalization.fetch_sub(1, Relaxed); - let bank0 = Arc::new(bank0); - assert_eq!(bank0.get_balance(&reward_pubkey), u64::MAX,); - - let bank1 = Bank::new_from_parent( - &bank0, - &Pubkey::default(), - genesis_config.epoch_schedule.get_first_slot_in_epoch(93), - ); - - // assert that everything gets in order.... - assert!(bank1.get_account(&reward_pubkey).is_none()); - let sysvar_and_builtin_program_delta = 1; - assert_eq!( - bank0.capitalization() + 1 + 1_000_000_000 + sysvar_and_builtin_program_delta, - bank1.capitalization() - ); - assert_eq!(bank1.capitalization(), bank1.calculate_capitalization(true)); - - // Depending on RUSTFLAGS, this test exposes rust's checked math behavior or not... - // So do some convolted setup; anyway this test itself will just be temporary - let bank0 = std::panic::AssertUnwindSafe(bank0); - let overflowing_capitalization = - std::panic::catch_unwind(|| bank0.calculate_capitalization(true)); - if let Ok(overflowing_capitalization) = overflowing_capitalization { - info!("asserting overflowing capitalization for bank0"); - assert_eq!(overflowing_capitalization, bank0.capitalization()); - } else { - info!("NOT-asserting overflowing capitalization for bank0"); - } - } - #[derive(Debug)] struct TestExecutor {} impl Executor for TestExecutor { @@ -16818,10 +16785,10 @@ pub(crate) mod tests { vote_pubkey: &Pubkey, ) { let mut vote_account = bank.get_account(vote_pubkey).unwrap_or_default(); - let mut vote_state = VoteState::from(&vote_account).unwrap_or_default(); + let mut vote_state = vote_state::from(&vote_account).unwrap_or_default(); vote_state.last_timestamp = timestamp; let versioned = VoteStateVersions::new_current(vote_state); - VoteState::to(&versioned, &mut vote_account).unwrap(); + vote_state::to(&versioned, &mut vote_account).unwrap(); bank.store_account(vote_pubkey, &vote_account); } @@ -17349,7 +17316,7 @@ pub(crate) mod tests { current_major_fork_bank.squash(); // Try to get cache flush/clean to overlap with the scan current_major_fork_bank.force_flush_accounts_cache(); - current_major_fork_bank.clean_accounts(false, false, None); + current_major_fork_bank.clean_accounts_for_tests(); // Move purge here so that Bank::drop()->purge_slots() doesn't race // with clean. Simulates the call from AccountsBackgroundService abs_request_handler.handle_pruned_banks(¤t_major_fork_bank, true); @@ -17397,7 +17364,7 @@ pub(crate) mod tests { current_bank.squash(); if current_bank.slot() % 2 == 0 { current_bank.force_flush_accounts_cache(); - current_bank.clean_accounts(true, false, None); + current_bank.clean_accounts(None); } prev_bank = current_bank.clone(); current_bank = Arc::new(Bank::new_from_parent( @@ -18478,7 +18445,7 @@ pub(crate) mod tests { bank2.squash(); drop(bank1); - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); let expected_ref_count_for_cleaned_up_keys = 0; let expected_ref_count_for_keys_in_both_slot1_and_slot2 = 1; @@ -18873,7 +18840,6 @@ pub(crate) mod tests { ..FeeStructure::default() }, true, - true, ), 0 ); @@ -18888,7 +18854,6 @@ pub(crate) mod tests { ..FeeStructure::default() }, true, - true, ), 1 ); @@ -18908,7 +18873,6 @@ pub(crate) mod tests { ..FeeStructure::default() }, true, - true, ), 4 ); @@ -18928,7 +18892,7 @@ pub(crate) mod tests { let message = SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(); assert_eq!( - Bank::calculate_fee(&message, 1, &fee_structure, true, true,), + Bank::calculate_fee(&message, 1, &fee_structure, true), max_fee + lamports_per_signature ); @@ -18940,7 +18904,7 @@ pub(crate) mod tests { SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&Pubkey::new_unique()))) .unwrap(); assert_eq!( - Bank::calculate_fee(&message, 1, &fee_structure, true, true,), + Bank::calculate_fee(&message, 1, &fee_structure, true), max_fee + 3 * lamports_per_signature ); @@ -18973,7 +18937,7 @@ pub(crate) mod tests { Some(&Pubkey::new_unique()), )) .unwrap(); - let fee = Bank::calculate_fee(&message, 1, &fee_structure, true, true); + let fee = Bank::calculate_fee(&message, 1, &fee_structure, true); assert_eq!( fee, lamports_per_signature + prioritization_fee_details.get_fee() @@ -19011,10 +18975,7 @@ pub(crate) mod tests { Some(&key0), )) .unwrap(); - assert_eq!( - Bank::calculate_fee(&message, 1, &fee_structure, true, true,), - 2 - ); + assert_eq!(Bank::calculate_fee(&message, 1, &fee_structure, true), 2); secp_instruction1.data = vec![0]; secp_instruction2.data = vec![10]; @@ -19023,10 +18984,7 @@ pub(crate) mod tests { Some(&key0), )) .unwrap(); - assert_eq!( - Bank::calculate_fee(&message, 1, &fee_structure, true, true,), - 11 - ); + assert_eq!(Bank::calculate_fee(&message, 1, &fee_structure, true), 11); } #[test] @@ -19983,26 +19941,24 @@ pub(crate) mod tests { #[test] fn test_inner_instructions_list_from_instruction_trace() { - let instruction_trace = vec![ - vec![ - InstructionContext::new(0, 0, &[], &[], &[1]), - InstructionContext::new(1, 0, &[], &[], &[2]), - ], - vec![], - vec![ - InstructionContext::new(0, 0, &[], &[], &[3]), - InstructionContext::new(1, 0, &[], &[], &[4]), - InstructionContext::new(2, 0, &[], &[], &[5]), - InstructionContext::new(1, 0, &[], &[], &[6]), - ], - ]; - - let inner_instructions = inner_instructions_list_from_instruction_trace(&instruction_trace); + let mut transaction_context = TransactionContext::new(vec![], None, 3, 3); + for (index_in_trace, stack_height) in [1, 2, 1, 1, 2, 3, 2].into_iter().enumerate() { + while stack_height <= transaction_context.get_instruction_context_stack_height() { + transaction_context.pop().unwrap(); + } + if stack_height > transaction_context.get_instruction_context_stack_height() { + transaction_context + .push(&[], &[], &[index_in_trace as u8]) + .unwrap(); + } + } + let inner_instructions = + inner_instructions_list_from_instruction_trace(&transaction_context); assert_eq!( inner_instructions, vec![ - vec![CompiledInstruction::new_from_raw_parts(0, vec![2], vec![])], + vec![CompiledInstruction::new_from_raw_parts(0, vec![1], vec![])], vec![], vec![ CompiledInstruction::new_from_raw_parts(0, vec![4], vec![]), @@ -20397,7 +20353,10 @@ pub(crate) mod tests { { let account_pubkey = Pubkey::new_unique(); let account_balance = LAMPORTS_PER_SOL; - let account_size = rng.gen_range(1, MAX_PERMITTED_DATA_LENGTH) as usize; + let account_size = rng.gen_range( + 1, + MAX_PERMITTED_DATA_LENGTH as usize - MAX_PERMITTED_DATA_INCREASE, + ); let account_data = AccountSharedData::new(account_balance, account_size, &mock_program_id); bank.store_account(&account_pubkey, &account_data); diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index d9eb457126..c1cedf6206 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -312,7 +312,7 @@ impl BankClient { let thread_bank = bank.clone(); let bank = bank.clone(); Builder::new() - .name("solana-bank-client".to_string()) + .name("solBankClient".to_string()) .spawn(move || Self::run(&thread_bank, transaction_receiver)) .unwrap(); Self { diff --git a/runtime/src/block_cost_limits.rs b/runtime/src/block_cost_limits.rs index a1f1db85dc..cc279b4dd7 100644 --- a/runtime/src/block_cost_limits.rs +++ b/runtime/src/block_cost_limits.rs @@ -3,7 +3,8 @@ use { lazy_static::lazy_static, solana_sdk::{ - feature, incinerator, native_loader, pubkey::Pubkey, secp256k1_program, system_program, + ed25519_program, feature, incinerator, native_loader, pubkey::Pubkey, secp256k1_program, + system_program, }, std::collections::HashMap, }; @@ -38,8 +39,8 @@ lazy_static! { (solana_sdk::stake::program::id(), COMPUTE_UNIT_TO_US_RATIO * 25), (solana_config_program::id(), COMPUTE_UNIT_TO_US_RATIO * 15), (solana_vote_program::id(), COMPUTE_UNIT_TO_US_RATIO * 70), - // secp256k1 is executed in banking stage, it should cost similar to sigverify (secp256k1_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), + (ed25519_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), (system_program::id(), COMPUTE_UNIT_TO_US_RATIO * 5), ] .iter() @@ -63,5 +64,6 @@ pub const MAX_WRITABLE_ACCOUNT_UNITS: u64 = MAX_BLOCK_REPLAY_TIME_US * COMPUTE_U /// sets at ~75% of MAX_BLOCK_UNITS to leave room for non-vote transactions pub const MAX_VOTE_UNITS: u64 = (MAX_BLOCK_UNITS as f64 * 0.75_f64) as u64; -/// max length of account data in a block (bytes) -pub const MAX_ACCOUNT_DATA_BLOCK_LEN: u64 = 100_000_000; +/// The maximum allowed size, in bytes, that accounts data can grow, per block. +/// This can also be thought of as the maximum size of new allocations per block. +pub const MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA: u64 = 100_000_000; diff --git a/runtime/src/bucket_map_holder.rs b/runtime/src/bucket_map_holder.rs index b0237c5910..aea586dca6 100644 --- a/runtime/src/bucket_map_holder.rs +++ b/runtime/src/bucket_map_holder.rs @@ -31,6 +31,7 @@ pub struct BucketMapHolder { pub disk: Option>, pub count_buckets_flushed: AtomicUsize, + /// rolling 'current' age pub age: AtomicU8, pub stats: BucketMapHolderStats, diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index d0653441f4..f06b6c3e8c 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -2,10 +2,8 @@ use solana_frozen_abi::abi_example::AbiExample; use { crate::system_instruction_processor, - solana_program_runtime::invoke_context::{InvokeContext, ProcessInstructionWithContext}, - solana_sdk::{ - feature_set, instruction::InstructionError, pubkey::Pubkey, stake, system_program, - }, + solana_program_runtime::invoke_context::ProcessInstructionWithContext, + solana_sdk::{feature_set, pubkey::Pubkey, stake, system_program}, std::fmt, }; @@ -141,14 +139,6 @@ fn genesis_builtins() -> Vec { ] } -/// place holder for precompile programs, remove when the precompile program is deactivated via feature activation -fn dummy_process_instruction( - _first_instruction_account: usize, - _invoke_context: &mut InvokeContext, -) -> Result<(), InstructionError> { - Ok(()) -} - /// Dynamic feature transitions for builtin programs fn builtin_feature_transitions() -> Vec { vec![ @@ -160,24 +150,6 @@ fn builtin_feature_transitions() -> Vec { ), feature_id: feature_set::add_compute_budget_program::id(), }, - BuiltinFeatureTransition::RemoveOrRetain { - previously_added_builtin: Builtin::new( - "secp256k1_program", - solana_sdk::secp256k1_program::id(), - dummy_process_instruction, - ), - addition_feature_id: feature_set::secp256k1_program_enabled::id(), - removal_feature_id: feature_set::prevent_calling_precompiles_as_programs::id(), - }, - BuiltinFeatureTransition::RemoveOrRetain { - previously_added_builtin: Builtin::new( - "ed25519_program", - solana_sdk::ed25519_program::id(), - dummy_process_instruction, - ), - addition_feature_id: feature_set::ed25519_program_enabled::id(), - removal_feature_id: feature_set::prevent_calling_precompiles_as_programs::id(), - }, BuiltinFeatureTransition::Add { builtin: Builtin::new( "address_lookup_table_program", diff --git a/runtime/src/cost_tracker.rs b/runtime/src/cost_tracker.rs index a1d779a8a5..6e848b63d2 100644 --- a/runtime/src/cost_tracker.rs +++ b/runtime/src/cost_tracker.rs @@ -218,7 +218,7 @@ impl CostTracker { } } - if account_data_size > MAX_ACCOUNT_DATA_BLOCK_LEN { + if account_data_size > MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA { return Err(CostTrackerError::WouldExceedAccountDataBlockLimit); } @@ -618,8 +618,8 @@ mod tests { let second_account = Keypair::new(); let (_tx1, mut tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); let (_tx2, mut tx_cost2) = build_simple_transaction(&second_account, &start_hash); - tx_cost1.account_data_size = MAX_ACCOUNT_DATA_BLOCK_LEN; - tx_cost2.account_data_size = MAX_ACCOUNT_DATA_BLOCK_LEN + 1; + tx_cost1.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; + tx_cost2.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA + 1; let cost1 = tx_cost1.sum(); let cost2 = tx_cost2.sum(); diff --git a/runtime/src/execute_cost_table.rs b/runtime/src/execute_cost_table.rs index 922d31c1c0..158b544f9d 100644 --- a/runtime/src/execute_cost_table.rs +++ b/runtime/src/execute_cost_table.rs @@ -44,7 +44,6 @@ impl ExecuteCostTable { self.table.len() } - /// default program cost, set to ComputeBudget::DEFAULT_COMPUTE_UNIT_LIMIT pub fn get_default_compute_unit_limit(&self) -> u64 { DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64 } diff --git a/runtime/src/expected_rent_collection.rs b/runtime/src/expected_rent_collection.rs index d049430933..bd6a6bb484 100644 --- a/runtime/src/expected_rent_collection.rs +++ b/runtime/src/expected_rent_collection.rs @@ -684,7 +684,7 @@ pub mod tests { ); assert_eq!( result, - (!leave_alone).then(|| ExpectedRentCollection { + (!leave_alone).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -712,7 +712,7 @@ pub mod tests { ); assert_eq!( result, - (!greater).then(|| ExpectedRentCollection { + (!greater).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -909,7 +909,7 @@ pub mod tests { ); assert_eq!( result, - (account_rent_epoch != 0).then(|| ExpectedRentCollection { + (account_rent_epoch != 0).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch + 1, partition_index_from_max_slot: partition_index_max_inclusive, @@ -1084,7 +1084,7 @@ pub mod tests { }; assert_eq!( result, - some_expected.then(|| ExpectedRentCollection { + some_expected.then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot, diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 73ab5c105b..d5330df003 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -27,6 +27,7 @@ pub fn bootstrap_validator_stake_lamports() -> u64 { // Number of lamports automatically used for genesis accounts pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { const NUM_BUILTIN_PROGRAMS: u64 = 4; + const NUM_PRECOMPILES: u64 = 2; const FEES_SYSVAR_MIN_BALANCE: u64 = 946_560; const STAKE_HISTORY_MIN_BALANCE: u64 = 114_979_200; const CLOCK_SYSVAR_MIN_BALANCE: u64 = 1_169_280; @@ -41,6 +42,7 @@ pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { + EPOCH_SCHEDULE_SYSVAR_MIN_BALANCE + RECENT_BLOCKHASHES_SYSVAR_MIN_BALANCE + NUM_BUILTIN_PROGRAMS + + NUM_PRECOMPILES } pub struct ValidatorVoteKeypairs { diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index e3af855216..ac1c231673 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -384,7 +384,7 @@ where .map(|path_buf| path_buf.as_path()) { Some(path) => { - accounts_path_processor(*file, path); + accounts_path_processor(file, path); UnpackPath::Valid(path) } None => UnpackPath::Invalid, diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index b252499267..731bfc8279 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -279,7 +279,7 @@ impl InMemAccountsIndex { m.stop(); callback(if let Some(entry) = result { - entry.set_age(self.storage.future_age_to_flush()); + self.set_age_to_future(entry); Some(entry) } else { drop(map); @@ -305,6 +305,10 @@ impl InMemAccountsIndex { self.get_internal(pubkey, |entry| (true, entry.map(Arc::clone))) } + fn set_age_to_future(&self, entry: &AccountMapEntry) { + entry.set_age(self.storage.future_age_to_flush()); + } + /// lookup 'pubkey' in index (in_mem or disk). /// call 'callback' whether found or not pub(crate) fn get_internal( @@ -315,11 +319,10 @@ impl InMemAccountsIndex { ) -> RT { self.get_only_in_mem(pubkey, |entry| { if let Some(entry) = entry { - entry.set_age(self.storage.future_age_to_flush()); callback(Some(entry)).1 } else { // not in cache, look on disk - let stats = &self.stats(); + let stats = self.stats(); let disk_entry = self.load_account_entry_from_disk(pubkey); if disk_entry.is_none() { return callback(None).1; @@ -474,7 +477,7 @@ impl InMemAccountsIndex { reclaims, reclaim, ); - current.set_age(self.storage.future_age_to_flush()); + self.set_age_to_future(current); } Entry::Vacant(vacant) => { // not in cache, look on disk @@ -527,7 +530,7 @@ impl InMemAccountsIndex { } fn update_entry_stats(&self, stopped_measure: Measure, found: bool) { - let stats = &self.stats(); + let stats = self.stats(); let (count, time) = if found { (&stats.entries_from_mem, &stats.entry_mem_us) } else { @@ -1418,6 +1421,8 @@ impl<'a> FlushGuard<'a> { #[must_use = "if unused, the `flushing` flag will immediately clear"] fn lock(flushing: &'a AtomicBool) -> Option { let already_flushing = flushing.swap(true, Ordering::AcqRel); + // Eager evaluation here would result in dropping Self and clearing flushing flag + #[allow(clippy::unnecessary_lazy_evaluations)] (!already_flushing).then(|| Self { flushing }) } } diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index c1b06c141d..c95bfc0b72 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -10,7 +10,7 @@ use { }, solana_sdk::{ account::WritableAccount, - feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, + feature_set::FeatureSet, hash::Hash, message::SanitizedMessage, precompiles::is_precompile, @@ -86,10 +86,8 @@ impl MessageProcessor { .zip(program_indices.iter()) .enumerate() { - let is_precompile = invoke_context - .feature_set - .is_active(&prevent_calling_precompiles_as_programs::id()) - && is_precompile(program_id, |id| invoke_context.feature_set.is_active(id)); + let is_precompile = + is_precompile(program_id, |id| invoke_context.feature_set.is_active(id)); // Fixup the special instructions key if present // before the account pre-values are taken care of @@ -682,6 +680,6 @@ mod tests { InstructionError::Custom(0xbabb1e) )) ); - assert_eq!(transaction_context.get_instruction_trace().len(), 2); + assert_eq!(transaction_context.get_instruction_trace_length(), 2); } } diff --git a/runtime/src/read_only_accounts_cache.rs b/runtime/src/read_only_accounts_cache.rs index f58eb70a9e..ed35f24fbf 100644 --- a/runtime/src/read_only_accounts_cache.rs +++ b/runtime/src/read_only_accounts_cache.rs @@ -38,6 +38,7 @@ pub(crate) struct ReadOnlyAccountsCache { data_size: AtomicUsize, hits: AtomicU64, misses: AtomicU64, + evicts: AtomicU64, } impl ReadOnlyAccountsCache { @@ -49,6 +50,7 @@ impl ReadOnlyAccountsCache { data_size: AtomicUsize::default(), hits: AtomicU64::default(), misses: AtomicU64::default(), + evicts: AtomicU64::default(), } } @@ -107,13 +109,16 @@ impl ReadOnlyAccountsCache { } }; // Evict entries from the front of the queue. + let mut num_evicts = 0; while self.data_size.load(Ordering::Relaxed) > self.max_data_size { let (pubkey, slot) = match self.queue.lock().unwrap().get_first() { None => break, Some(key) => *key, }; + num_evicts += 1; self.remove(pubkey, slot); } + self.evicts.fetch_add(num_evicts, Ordering::Relaxed); } pub(crate) fn remove(&self, pubkey: Pubkey, slot: Slot) -> Option { @@ -135,10 +140,12 @@ impl ReadOnlyAccountsCache { self.data_size.load(Ordering::Relaxed) } - pub(crate) fn get_and_reset_stats(&self) -> (u64, u64) { + pub(crate) fn get_and_reset_stats(&self) -> (u64, u64, u64) { let hits = self.hits.swap(0, Ordering::Relaxed); let misses = self.misses.swap(0, Ordering::Relaxed); - (hits, misses) + let evicts = self.evicts.swap(0, Ordering::Relaxed); + + (hits, misses, evicts) } } diff --git a/runtime/src/runtime_config.rs b/runtime/src/runtime_config.rs index 48430487b0..65c51552fc 100644 --- a/runtime/src/runtime_config.rs +++ b/runtime/src/runtime_config.rs @@ -6,4 +6,5 @@ pub struct RuntimeConfig { pub bpf_jit: bool, pub compute_budget: Option, pub log_messages_bytes_limit: Option, + pub transaction_account_lock_limit: Option, } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 27f554f68b..6b71e668b8 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -8,7 +8,7 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, append_vec::{AppendVec, StoredMetaWriteVersion}, - bank::{Bank, BankFieldsToDeserialize, BankRc}, + bank::{Bank, BankFieldsToDeserialize, BankIncrementalSnapshotPersistence, BankRc}, blockhash_queue::BlockhashQueue, builtins::Builtins, epoch_stakes::EpochStakes, @@ -65,7 +65,7 @@ pub(crate) enum SerdeStyle { const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; -#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq)] +#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq, Eq)] pub struct AccountsDbFields( HashMap>, StoredMetaWriteVersion, @@ -77,6 +77,7 @@ pub struct AccountsDbFields( /// slots that were roots within the last epoch for which we care about the hash value #[serde(deserialize_with = "default_on_eof")] Vec<(Slot, Hash)>, + // here? ); /// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a @@ -119,7 +120,7 @@ impl SnapshotAccountsDbFields { // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot incremental_snapshot_storages .iter() - .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then(|| ()).ok_or_else(|| { + .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| { io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!") })?; @@ -193,6 +194,7 @@ trait TypeContext<'a>: PartialEq { stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> std::result::Result<(), Box> where R: Read, @@ -370,12 +372,18 @@ fn reserialize_bank_fields_with_new_hash( stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> Result<(), Error> where W: Write, R: Read, { - newer::Context::reserialize_bank_fields_with_hash(stream_reader, stream_writer, accounts_hash) + newer::Context::reserialize_bank_fields_with_hash( + stream_reader, + stream_writer, + accounts_hash, + incremental_snapshot_persistence, + ) } /// effectively updates the accounts hash in the serialized bank file on disk @@ -387,6 +395,7 @@ pub fn reserialize_bank_with_new_accounts_hash( bank_snapshots_dir: impl AsRef, slot: Slot, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> bool { let bank_post = snapshot_utils::get_bank_snapshots_dir(bank_snapshots_dir, slot); let bank_post = bank_post.join(snapshot_utils::get_snapshot_file_name(slot)); @@ -404,6 +413,7 @@ pub fn reserialize_bank_with_new_accounts_hash( &mut BufReader::new(file), &mut BufWriter::new(file_out), accounts_hash, + incremental_snapshot_persistence, ) .unwrap(); } @@ -781,7 +791,7 @@ where let accounts_db = Arc::new(accounts_db); let accounts_db_clone = accounts_db.clone(); let handle = Builder::new() - .name("notify_account_restore_from_snapshot".to_string()) + .name("solNfyAccRestor".to_string()) .spawn(move || { accounts_db_clone.notify_account_restore_from_snapshot(); }) diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 3dd73803cf..ab27961bf2 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -96,6 +96,7 @@ impl From for BankFieldsToDeserialize { stakes: dvb.stakes, epoch_stakes: dvb.epoch_stakes, is_delta: dvb.is_delta, + incremental_snapshot_persistence: None, } } } @@ -200,7 +201,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), @@ -209,6 +210,7 @@ impl<'a> TypeContext<'a> for Context { // we can grab it on restart. // TODO: if we do a snapshot version bump, consider moving this out. lamports_per_signature, + None::, ) .serialize(serializer) } @@ -226,7 +228,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), @@ -314,6 +316,10 @@ impl<'a> TypeContext<'a> for Context { bank_fields.fee_rate_governor = bank_fields .fee_rate_governor .clone_with_lamports_per_signature(lamports_per_signature); + + let incremental_snapshot_persistence = ignore_eof_error(deserialize_from(stream))?; + bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence; + Ok((bank_fields, accounts_db_fields)) } @@ -327,12 +333,13 @@ impl<'a> TypeContext<'a> for Context { } /// deserialize the bank from 'stream_reader' - /// modify the accounts_hash + /// modify the accounts_hash and incremental_snapshot_persistence /// reserialize the bank to 'stream_writer' fn reserialize_bank_fields_with_hash( stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> std::result::Result<(), Box> where R: Read, @@ -345,6 +352,7 @@ impl<'a> TypeContext<'a> for Context { let blockhash_queue = RwLock::new(rhs.blockhash_queue.clone()); let hard_forks = RwLock::new(rhs.hard_forks.clone()); let lamports_per_signature = rhs.fee_rate_governor.lamports_per_signature; + let bank = SerializableVersionedBank { blockhash_queue: &blockhash_queue, ancestors: &rhs.ancestors, @@ -382,7 +390,12 @@ impl<'a> TypeContext<'a> for Context { bincode::serialize_into( stream_writer, - &(bank, accounts_db_fields, lamports_per_signature), + &( + bank, + accounts_db_fields, + lamports_per_signature, + incremental_snapshot_persistence, + ), ) } } diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 75aa8ce51a..7491369f6d 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -155,7 +155,7 @@ fn test_accounts_serialize_style(serde_style: SerdeStyle) { accountsdb_to_stream( serde_style, &mut writer, - &*accounts.accounts_db, + &accounts.accounts_db, 0, &accounts.accounts_db.get_snapshot_storages(0, None, None).0, ) @@ -190,6 +190,7 @@ fn test_bank_serialize_style( serde_style: SerdeStyle, reserialize_accounts_hash: bool, update_accounts_hash: bool, + incremental_snapshot_persistence: bool, ) { solana_logger::setup(); let (genesis_config, _) = create_genesis_config(500); @@ -236,8 +237,18 @@ fn test_bank_serialize_style( } else { bank2.get_accounts_hash() }; - if reserialize_accounts_hash { - let slot = bank2.slot(); + + let slot = bank2.slot(); + let incremental = + incremental_snapshot_persistence.then(|| BankIncrementalSnapshotPersistence { + full_slot: slot + 1, + full_hash: Hash::new(&[1; 32]), + full_capitalization: 31, + incremental_hash: Hash::new(&[2; 32]), + incremental_capitalization: 32, + }); + + if reserialize_accounts_hash || incremental_snapshot_persistence { let temp_dir = TempDir::new().unwrap(); let slot_dir = temp_dir.path().join(slot.to_string()); let post_path = slot_dir.join(slot.to_string()); @@ -248,21 +259,32 @@ fn test_bank_serialize_style( let mut f = std::fs::File::create(&pre_path).unwrap(); f.write_all(&buf).unwrap(); } + assert!(reserialize_bank_with_new_accounts_hash( temp_dir.path(), slot, - &accounts_hash + &accounts_hash, + incremental.as_ref(), )); let previous_len = buf.len(); // larger buffer than expected to make sure the file isn't larger than expected - let mut buf_reserialized = vec![0; previous_len + 1]; + let sizeof_none = std::mem::size_of::(); + let sizeof_incremental_snapshot_persistence = + std::mem::size_of::>(); + let mut buf_reserialized = + vec![0; previous_len + sizeof_incremental_snapshot_persistence + 1]; { let mut f = std::fs::File::open(post_path).unwrap(); let size = f.read(&mut buf_reserialized).unwrap(); - assert_eq!(size, previous_len); + let expected = if !incremental_snapshot_persistence { + previous_len + } else { + previous_len + sizeof_incremental_snapshot_persistence - sizeof_none + }; + assert_eq!(size, expected); buf_reserialized.truncate(size); } - if update_accounts_hash { + if update_accounts_hash || incremental_snapshot_persistence { // We cannot guarantee buffer contents are exactly the same if hash is the same. // Things like hashsets/maps have randomness in their in-mem representations. // This make serialized bytes not deterministic. @@ -311,6 +333,7 @@ fn test_bank_serialize_style( assert_eq!(dbank.get_balance(&key3.pubkey()), 0); assert_eq!(dbank.get_accounts_hash(), accounts_hash); assert!(bank2 == dbank); + assert_eq!(dbank.incremental_snapshot_persistence, incremental); } pub(crate) fn reconstruct_accounts_db_via_serialization( @@ -359,11 +382,18 @@ fn test_bank_serialize_newer() { for (reserialize_accounts_hash, update_accounts_hash) in [(false, false), (true, false), (true, true)] { - test_bank_serialize_style( - SerdeStyle::Newer, - reserialize_accounts_hash, - update_accounts_hash, - ) + for incremental_snapshot_persistence in if reserialize_accounts_hash { + [false, true].to_vec() + } else { + [false].to_vec() + } { + test_bank_serialize_style( + SerdeStyle::Newer, + reserialize_accounts_hash, + update_accounts_hash, + incremental_snapshot_persistence, + ) + } } } @@ -555,7 +585,7 @@ mod test_bank_serialize { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "9vGBt7YfymKUTPWLHVVpQbDtPD7dFDwXRMFkCzwujNqJ")] + #[frozen_abi(digest = "5py4Wkuj5fV2sLyA1MrPg4pGNwMEaygQLnpLyY8MMLGC")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/runtime/src/shared_buffer_reader.rs b/runtime/src/shared_buffer_reader.rs index 819b4bcb3f..41c57b0d48 100644 --- a/runtime/src/shared_buffer_reader.rs +++ b/runtime/src/shared_buffer_reader.rs @@ -75,7 +75,7 @@ impl SharedBuffer { let bg_reader_data = instance.bg_reader_data.clone(); let handle = Builder::new() - .name("solana-compressed_file_reader".to_string()) + .name("solCompFileRead".to_string()) .spawn(move || { // importantly, this thread does NOT hold a refcount on the arc of 'instance' bg_reader_data.read_entire_file_in_bg(reader, total_buffer_budget, chunk_size); diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 69e7a99e8e..94a82e1d48 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -543,7 +543,7 @@ mod tests { .accounts .iter() .filter_map(|(pubkey, account)| { - stake::program::check_id(account.owner()).then(|| *pubkey) + stake::program::check_id(account.owner()).then_some(*pubkey) }) .collect(); expected_stake_accounts.push(bootstrap_validator_pubkey); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index d9903e358f..3e60060547 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -19,6 +19,7 @@ use { snapshot_package::{ AccountsPackage, PendingAccountsPackage, SnapshotPackage, SnapshotType, }, + status_cache, }, bincode::{config::Options, serialize_into}, bzip2::bufread::BzDecoder, @@ -28,7 +29,13 @@ use { rayon::prelude::*, regex::Regex, solana_measure::measure::Measure, - solana_sdk::{clock::Slot, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey}, + solana_sdk::{ + clock::Slot, + genesis_config::GenesisConfig, + hash::Hash, + pubkey::Pubkey, + slot_history::{Check, SlotHistory}, + }, std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -223,9 +230,37 @@ pub enum SnapshotError { #[error("snapshot has mismatch: deserialized bank: {:?}, snapshot archive info: {:?}", .0, .1)] MismatchedSlotHash((Slot, Hash), (Slot, Hash)), + + #[error("snapshot slot deltas are invalid: {0}")] + VerifySlotDeltas(#[from] VerifySlotDeltasError), } pub type Result = std::result::Result; +/// Errors that can happen in `verify_slot_deltas()` +#[derive(Error, Debug, PartialEq, Eq)] +pub enum VerifySlotDeltasError { + #[error("too many entries: {0} (max: {1})")] + TooManyEntries(usize, usize), + + #[error("slot {0} is not a root")] + SlotIsNotRoot(Slot), + + #[error("slot {0} is greater than bank slot {1}")] + SlotGreaterThanMaxRoot(Slot, Slot), + + #[error("slot {0} has multiple entries")] + SlotHasMultipleEntries(Slot), + + #[error("slot {0} was not found in slot history")] + SlotNotFoundInHistory(Slot), + + #[error("slot {0} was in history but missing from slot deltas")] + SlotNotFoundInDeltas(Slot), + + #[error("slot history is bad and cannot be used to verify slot deltas")] + BadSlotHistory, +} + /// If the validator halts in the middle of `archive_snapshot_package()`, the temporary staging /// directory won't be cleaned up. Call this function to clean them up. pub fn remove_tmp_snapshot_archives(snapshot_archives_dir: impl AsRef) { @@ -345,10 +380,12 @@ pub fn archive_snapshot_package( let do_archive_files = |encoder: &mut dyn Write| -> Result<()> { let mut archive = tar::Builder::new(encoder); + // Serialize the version and snapshots files before accounts so we can quickly determine the version + // and other bank fields. This is necessary if we want to interleave unpacking with reconstruction + archive.append_path_with_name(staging_dir.as_ref().join("version"), "version")?; for dir in ["snapshots", "accounts"] { archive.append_dir_all(dir, staging_dir.as_ref().join(dir))?; } - archive.append_path_with_name(staging_dir.as_ref().join("version"), "version")?; archive.into_inner()?; Ok(()) }; @@ -948,8 +985,8 @@ pub fn bank_from_snapshot_archives( let mut measure_verify = Measure::start("verify"); if !bank.verify_snapshot_bank( test_hash_calculation, - accounts_db_skip_shrink, - Some(full_snapshot_archive_info.slot()), + accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(), + full_snapshot_archive_info.slot(), ) && limit_load_slot_count_from_snapshot.is_none() { panic!("Snapshot bank for slot {} failed to verify", bank.slot()); @@ -1179,7 +1216,7 @@ fn check_are_snapshots_compatible( let incremental_snapshot_archive_info = incremental_snapshot_archive_info.unwrap(); (full_snapshot_archive_info.slot() == incremental_snapshot_archive_info.base_slot()) - .then(|| ()) + .then_some(()) .ok_or_else(|| { SnapshotError::MismatchedBaseSlot( full_snapshot_archive_info.slot(), @@ -1736,6 +1773,8 @@ fn rebuild_bank_from_snapshots( Ok(slot_deltas) })?; + verify_slot_deltas(slot_deltas.as_slice(), &bank)?; + bank.status_cache.write().unwrap().append(&slot_deltas); bank.prepare_rewrites_for_hash(); @@ -1744,6 +1783,106 @@ fn rebuild_bank_from_snapshots( Ok(bank) } +/// Verify that the snapshot's slot deltas are not corrupt/invalid +fn verify_slot_deltas( + slot_deltas: &[BankSlotDelta], + bank: &Bank, +) -> std::result::Result<(), VerifySlotDeltasError> { + let info = verify_slot_deltas_structural(slot_deltas, bank.slot())?; + verify_slot_deltas_with_history(&info.slots, &bank.get_slot_history(), bank.slot()) +} + +/// Verify that the snapshot's slot deltas are not corrupt/invalid +/// These checks are simple/structural +fn verify_slot_deltas_structural( + slot_deltas: &[BankSlotDelta], + bank_slot: Slot, +) -> std::result::Result { + // there should not be more entries than that status cache's max + let num_entries = slot_deltas.len(); + if num_entries > status_cache::MAX_CACHE_ENTRIES { + return Err(VerifySlotDeltasError::TooManyEntries( + num_entries, + status_cache::MAX_CACHE_ENTRIES, + )); + } + + let mut slots_seen_so_far = HashSet::new(); + for &(slot, is_root, ..) in slot_deltas { + // all entries should be roots + if !is_root { + return Err(VerifySlotDeltasError::SlotIsNotRoot(slot)); + } + + // all entries should be for slots less than or equal to the bank's slot + if slot > bank_slot { + return Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot( + slot, bank_slot, + )); + } + + // there should only be one entry per slot + let is_duplicate = !slots_seen_so_far.insert(slot); + if is_duplicate { + return Err(VerifySlotDeltasError::SlotHasMultipleEntries(slot)); + } + } + + // detect serious logic error for future careless changes. :) + assert_eq!(slots_seen_so_far.len(), slot_deltas.len()); + + Ok(VerifySlotDeltasStructuralInfo { + slots: slots_seen_so_far, + }) +} + +/// Computed information from `verify_slot_deltas_structural()`, that may be reused/useful later. +#[derive(Debug, PartialEq, Eq)] +struct VerifySlotDeltasStructuralInfo { + /// All the slots in the slot deltas + slots: HashSet, +} + +/// Verify that the snapshot's slot deltas are not corrupt/invalid +/// These checks use the slot history for verification +fn verify_slot_deltas_with_history( + slots_from_slot_deltas: &HashSet, + slot_history: &SlotHistory, + bank_slot: Slot, +) -> std::result::Result<(), VerifySlotDeltasError> { + // ensure the slot history is valid (as much as possible), since we're using it to verify the + // slot deltas + if slot_history.newest() != bank_slot { + return Err(VerifySlotDeltasError::BadSlotHistory); + } + + // all slots in the slot deltas should be in the bank's slot history + let slot_missing_from_history = slots_from_slot_deltas + .iter() + .find(|slot| slot_history.check(**slot) != Check::Found); + if let Some(slot) = slot_missing_from_history { + return Err(VerifySlotDeltasError::SlotNotFoundInHistory(*slot)); + } + + // all slots in the history should be in the slot deltas (up to MAX_CACHE_ENTRIES) + // this ensures nothing was removed from the status cache + // + // go through the slot history and make sure there's an entry for each slot + // note: it's important to go highest-to-lowest since the status cache removes + // older entries first + // note: we already checked above that `bank_slot == slot_history.newest()` + let slot_missing_from_deltas = (slot_history.oldest()..=slot_history.newest()) + .rev() + .filter(|slot| slot_history.check(*slot) == Check::Found) + .take(status_cache::MAX_CACHE_ENTRIES) + .find(|slot| !slots_from_slot_deltas.contains(slot)); + if let Some(slot) = slot_missing_from_deltas { + return Err(VerifySlotDeltasError::SlotNotFoundInDeltas(slot)); + } + + Ok(()) +} + pub(crate) fn get_snapshot_file_name(slot: Slot) -> String { slot.to_string() } @@ -1939,7 +2078,7 @@ pub fn bank_to_full_snapshot_archive( assert!(bank.is_complete()); bank.squash(); // Bank may not be a root bank.force_flush_accounts_cache(); - bank.clean_accounts(true, false, Some(bank.slot())); + bank.clean_accounts(Some(bank.slot())); bank.update_accounts_hash(); bank.rehash(); // Bank accounts may have been manually modified by the caller @@ -1986,7 +2125,7 @@ pub fn bank_to_incremental_snapshot_archive( assert!(bank.slot() > full_snapshot_slot); bank.squash(); // Bank may not be a root bank.force_flush_accounts_cache(); - bank.clean_accounts(true, false, Some(full_snapshot_slot)); + bank.clean_accounts(Some(full_snapshot_slot)); bank.update_accounts_hash(); bank.rehash(); // Bank accounts may have been manually modified by the caller @@ -2043,6 +2182,7 @@ pub fn package_and_archive_full_snapshot( accounts_package.snapshot_links.path(), accounts_package.slot, &bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash()); @@ -2095,6 +2235,7 @@ pub fn package_and_archive_incremental_snapshot( accounts_package.snapshot_links.path(), accounts_package.slot, &bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash()); @@ -2163,13 +2304,14 @@ fn can_submit_accounts_package( mod tests { use { super::*, - crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, + crate::{accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, status_cache::Status}, assert_matches::assert_matches, bincode::{deserialize_from, serialize_into}, solana_sdk::{ genesis_config::create_genesis_config, native_token::sol_to_lamports, signature::{Keypair, Signer}, + slot_history::SlotHistory, system_transaction, transaction::SanitizedTransaction, }, @@ -3629,7 +3771,7 @@ mod tests { // Ensure account1 has been cleaned/purged from everywhere bank4.squash(); - bank4.clean_accounts(true, false, Some(full_snapshot_slot)); + bank4.clean_accounts(Some(full_snapshot_slot)); assert!( bank4.get_account_modified_slot(&key1.pubkey()).is_none(), "Ensure Account1 has been cleaned and purged from AccountsDb" @@ -3827,4 +3969,155 @@ mod tests { assert_eq!(expected_result, actual_result); } } + + #[test] + fn test_verify_slot_deltas_structural_good() { + // NOTE: slot deltas do not need to be sorted + let slot_deltas = vec![ + (222, true, Status::default()), + (333, true, Status::default()), + (111, true, Status::default()), + ]; + + let bank_slot = 333; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Ok(VerifySlotDeltasStructuralInfo { + slots: HashSet::from([111, 222, 333]) + }) + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_too_many_entries() { + let bank_slot = status_cache::MAX_CACHE_ENTRIES as Slot + 1; + let slot_deltas: Vec<_> = (0..bank_slot) + .map(|slot| (slot, true, Status::default())) + .collect(); + + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::TooManyEntries( + status_cache::MAX_CACHE_ENTRIES + 1, + status_cache::MAX_CACHE_ENTRIES + )), + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_not_root() { + let slot_deltas = vec![ + (111, true, Status::default()), + (222, false, Status::default()), // <-- slot is not a root + (333, true, Status::default()), + ]; + + let bank_slot = 333; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!(result, Err(VerifySlotDeltasError::SlotIsNotRoot(222))); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_greater_than_bank() { + let slot_deltas = vec![ + (222, true, Status::default()), + (111, true, Status::default()), + (555, true, Status::default()), // <-- slot is greater than the bank slot + ]; + + let bank_slot = 444; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot( + 555, bank_slot + )), + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_has_multiple_entries() { + let slot_deltas = vec![ + (111, true, Status::default()), + (222, true, Status::default()), + (111, true, Status::default()), // <-- slot is a duplicate + ]; + + let bank_slot = 222; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotHasMultipleEntries(111)), + ); + } + + #[test] + fn test_verify_slot_deltas_with_history_good() { + let mut slots_from_slot_deltas = HashSet::default(); + let mut slot_history = SlotHistory::default(); + // note: slot history expects slots to be added in numeric order + for slot in [0, 111, 222, 333, 444] { + slots_from_slot_deltas.insert(slot); + slot_history.add(slot); + } + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + assert_eq!(result, Ok(())); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_history() { + let bank_slot = 444; + let result = verify_slot_deltas_with_history( + &HashSet::default(), + &SlotHistory::default(), // <-- will only have an entry for slot 0 + bank_slot, + ); + assert_eq!(result, Err(VerifySlotDeltasError::BadSlotHistory)); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_not_in_history() { + let slots_from_slot_deltas = HashSet::from([ + 0, // slot history has slot 0 added by default + 444, 222, + ]); + let mut slot_history = SlotHistory::default(); + slot_history.add(444); // <-- slot history is missing slot 222 + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotNotFoundInHistory(222)), + ); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_not_in_deltas() { + let slots_from_slot_deltas = HashSet::from([ + 0, // slot history has slot 0 added by default + 444, 222, + // <-- slot deltas is missing slot 333 + ]); + let mut slot_history = SlotHistory::default(); + slot_history.add(222); + slot_history.add(333); + slot_history.add(444); + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotNotFoundInDeltas(333)), + ); + } } diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 32ddd737b4..3f003b3a93 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -8,6 +8,7 @@ use { }, dashmap::DashMap, im::HashMap as ImHashMap, + log::error, num_derive::ToPrimitive, num_traits::ToPrimitive, rayon::{prelude::*, ThreadPool}, @@ -19,7 +20,7 @@ use { }, solana_vote_program::vote_state::VoteState, std::{ - collections::HashMap, + collections::{HashMap, HashSet}, ops::Add, sync::{Arc, RwLock, RwLockReadGuard}, }, @@ -34,6 +35,12 @@ pub enum Error { InvalidStakeAccount(#[from] stake_account::Error), #[error("Stake account not found: {0}")] StakeAccountNotFound(Pubkey), + #[error("Vote account mismatch: {0}")] + VoteAccountMismatch(Pubkey), + #[error("Vote account not cached: {0}")] + VoteAccountNotCached(Pubkey), + #[error("Vote account not found: {0}")] + VoteAccountNotFound(Pubkey), } #[derive(Debug, Clone, PartialEq, Eq, ToPrimitive)] @@ -222,6 +229,47 @@ impl Stakes { Err(Error::InvalidDelegation(*pubkey)) } }); + // Assert that cached vote accounts are consistent with accounts-db. + for (pubkey, vote_account) in stakes.vote_accounts.iter() { + let account = match get_account(pubkey) { + None => return Err(Error::VoteAccountNotFound(*pubkey)), + Some(account) => account, + }; + // Ignoring rent_epoch until the feature for + // preserve_rent_epoch_for_rent_exempt_accounts is activated. + let vote_account = vote_account.account(); + if vote_account.lamports() != account.lamports() + || vote_account.owner() != account.owner() + || vote_account.executable() != account.executable() + || vote_account.data() != account.data() + { + error!( + "vote account mismatch: {}, {:?}, {:?}", + pubkey, vote_account, account + ); + return Err(Error::VoteAccountMismatch(*pubkey)); + } + } + // Assert that all valid vote-accounts referenced in + // stake delegations are already cached. + let voter_pubkeys: HashSet = stakes + .stake_delegations + .values() + .map(|delegation| delegation.voter_pubkey) + .filter(|voter_pubkey| stakes.vote_accounts.get(voter_pubkey).is_none()) + .collect(); + for pubkey in voter_pubkeys { + let account = match get_account(&pubkey) { + None => continue, + Some(account) => account, + }; + if VoteState::is_correct_size_and_initialized(account.data()) + && VoteAccount::try_from(account.clone()).is_ok() + { + error!("vote account not cached: {}, {:?}", pubkey, account); + return Err(Error::VoteAccountNotCached(pubkey)); + } + } Ok(Self { vote_accounts: stakes.vote_accounts.clone(), stake_delegations: stake_delegations.collect::>()?, @@ -618,7 +666,7 @@ pub(crate) mod tests { stakes_cache.check_and_store(&vote11_pubkey, &vote11_account); stakes_cache.check_and_store(&stake11_pubkey, &stake11_account); - let vote11_node_pubkey = VoteState::from(&vote11_account).unwrap().node_pubkey; + let vote11_node_pubkey = vote_state::from(&vote11_account).unwrap().node_pubkey; let highest_staked_node = stakes_cache.stakes().highest_staked_node(); assert_eq!(highest_staked_node, Some(vote11_node_pubkey)); @@ -681,7 +729,7 @@ pub(crate) mod tests { // Vote account uninitialized let default_vote_state = VoteState::default(); let versioned = VoteStateVersions::new_current(default_vote_state); - VoteState::to(&versioned, &mut vote_account).unwrap(); + vote_state::to(&versioned, &mut vote_account).unwrap(); stakes_cache.check_and_store(&vote_pubkey, &vote_account); { diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index f9d48941e8..c5d8379ce0 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -137,7 +137,7 @@ impl StatusCache { if let Some(stored_forks) = keymap.get(key_slice) { let res = stored_forks .iter() - .find(|(f, _)| ancestors.get(f) || self.roots.get(f).is_some()) + .find(|(f, _)| ancestors.contains_key(f) || self.roots.get(f).is_some()) .cloned(); if res.is_some() { return res; @@ -221,30 +221,15 @@ impl StatusCache { .for_each(|(_, status)| status.lock().unwrap().clear()); } - // returns the statuses for each slot in the slots provided - pub fn slot_deltas(&self, slots: &[Slot]) -> Vec> { - let empty = Arc::new(Mutex::new(HashMap::new())); - slots - .iter() - .map(|slot| { - ( - *slot, - self.roots.contains(slot), - Arc::clone(self.slot_deltas.get(slot).unwrap_or(&empty)), - ) - }) - .collect() - } - /// Get the statuses for all the root slots pub fn root_slot_deltas(&self) -> Vec> { - self.roots + self.roots() .iter() - .map(|slot| { + .map(|root| { ( - *slot, - true, - self.slot_deltas.get(slot).cloned().unwrap_or_default(), + *root, + true, // <-- is_root + self.slot_deltas.get(root).cloned().unwrap_or_default(), ) }) .collect() @@ -444,10 +429,11 @@ mod tests { let blockhash = hash(Hash::default().as_ref()); status_cache.clear(); status_cache.insert(&blockhash, &sig, 0, ()); - let slot_deltas = status_cache.slot_deltas(&[0]); + assert!(status_cache.roots().contains(&0)); + let slot_deltas = status_cache.root_slot_deltas(); let cache = StatusCache::from_slot_deltas(&slot_deltas); assert_eq!(cache, status_cache); - let slot_deltas = cache.slot_deltas(&[0]); + let slot_deltas = cache.root_slot_deltas(); let cache = StatusCache::from_slot_deltas(&slot_deltas); assert_eq!(cache, status_cache); } @@ -464,10 +450,9 @@ mod tests { for i in 0..(MAX_CACHE_ENTRIES + 1) { status_cache.add_root(i as u64); } - let slots: Vec<_> = (0..MAX_CACHE_ENTRIES as u64 + 1).collect(); assert_eq!(status_cache.slot_deltas.len(), 1); assert!(status_cache.slot_deltas.get(&1).is_some()); - let slot_deltas = status_cache.slot_deltas(&slots); + let slot_deltas = status_cache.root_slot_deltas(); let cache = StatusCache::from_slot_deltas(&slot_deltas); assert_eq!(cache, status_cache); } diff --git a/runtime/src/storable_accounts.rs b/runtime/src/storable_accounts.rs index 8d79c0f78c..bfa35cf71c 100644 --- a/runtime/src/storable_accounts.rs +++ b/runtime/src/storable_accounts.rs @@ -143,7 +143,7 @@ pub mod tests { slot, &vec![(&pk, &account, slot), (&pk, &account, slot)][..], ); - assert!(!(&test3).contains_multiple_slots()); + assert!(!test3.contains_multiple_slots()); let test3 = ( slot, &vec![(&pk, &account, slot), (&pk, &account, slot + 1)][..], diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 67f1f93114..82589ea65f 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1626,7 +1626,8 @@ mod tests { .unwrap(); // super fun time; callback chooses to .clean_accounts(None) or not - callback(&*bank); + let bank = Arc::new(Bank::new_from_parent(&bank, &collector, bank.slot() + 1)); + callback(&bank); // create a normal account at the same pubkey as the zero-lamports account let lamports = genesis_config.rent.minimum_balance(len2); @@ -1651,9 +1652,9 @@ mod tests { bank.squash(); bank.force_flush_accounts_cache(); // do clean and assert that it actually did its job + assert_eq!(4, bank.get_snapshot_storages(None).len()); + bank.clean_accounts(None); assert_eq!(3, bank.get_snapshot_storages(None).len()); - bank.clean_accounts(false, false, None); - assert_eq!(2, bank.get_snapshot_storages(None).len()); }); } diff --git a/runtime/src/verify_accounts_hash_in_background.rs b/runtime/src/verify_accounts_hash_in_background.rs index 90266e36a6..63a7eeed97 100644 --- a/runtime/src/verify_accounts_hash_in_background.rs +++ b/runtime/src/verify_accounts_hash_in_background.rs @@ -117,7 +117,7 @@ pub(crate) mod tests { let verify_ = Arc::clone(verify); verify.start(|| { Builder::new() - .name("solana-bg-hash-verifier".to_string()) + .name("solBgHashVerfy".to_string()) .spawn(move || { // should have been marked not complete before thread started assert!(!verify_.check_complete()); diff --git a/runtime/src/vote_account.rs b/runtime/src/vote_account.rs index 1feefb6ffa..c37dd7b248 100644 --- a/runtime/src/vote_account.rs +++ b/runtime/src/vote_account.rs @@ -3,7 +3,7 @@ use { once_cell::sync::OnceCell, serde::ser::{Serialize, Serializer}, solana_sdk::{ - account::{accounts_equal, AccountSharedData, ReadableAccount}, + account::{AccountSharedData, ReadableAccount}, instruction::InstructionError, pubkey::Pubkey, }, @@ -53,6 +53,10 @@ pub struct VoteAccounts { } impl VoteAccount { + pub(crate) fn account(&self) -> &AccountSharedData { + &self.0.account + } + pub(crate) fn lamports(&self) -> u64 { self.0.account.lamports() } @@ -255,12 +259,6 @@ impl PartialEq for VoteAccountInner { } } -impl PartialEq for VoteAccount { - fn eq(&self, other: &AccountSharedData) -> bool { - accounts_equal(&self.0.account, other) - } -} - impl Default for VoteAccounts { fn default() -> Self { Self { diff --git a/runtime/store-tool/src/main.rs b/runtime/store-tool/src/main.rs index edfc00d8ee..8f7d2f2a40 100644 --- a/runtime/store-tool/src/main.rs +++ b/runtime/store-tool/src/main.rs @@ -40,8 +40,12 @@ fn main() { break; } info!( - " account: {:?} version: {} data: {} hash: {:?}", - account.meta.pubkey, account.meta.write_version, account.meta.data_len, account.hash + " account: {:?} version: {} lamports: {} data: {} hash: {:?}", + account.meta.pubkey, + account.meta.write_version, + account.account_meta.lamports, + account.meta.data_len, + account.hash ); num_accounts = num_accounts.saturating_add(1); stored_accounts_len = stored_accounts_len.saturating_add(account.stored_size); diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index a055d62da1..d272e738a1 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -65,7 +65,7 @@ fn test_shrink_and_clean() { // let's dance. for _ in 0..10 { - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); std::thread::sleep(std::time::Duration::from_millis(100)); } diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index fa4fdd8331..81afad23fa 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -67,10 +67,10 @@ qstring = "0.7.2" rand = { version = "0.7.0", optional = true } rand_chacha = { version = "0.2.2", optional = true } rustversion = "1.0.9" -serde = "1.0.138" +serde = "1.0.143" serde_bytes = "0.11" serde_derive = "1.0.103" -serde_json = { version = "1.0.81", optional = true } +serde_json = { version = "1.0.83", optional = true } sha2 = "0.10.2" sha3 = { version = "0.10.2", optional = true } solana-frozen-abi = { path = "../frozen-abi", version = "=1.12.0" } @@ -84,7 +84,7 @@ uuid = { version = "1.0.0", features = ["v4", "fast-rng"], optional = true } wasm-bindgen = "0.2" [target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3.58" +js-sys = "0.3.59" [dev-dependencies] anyhow = "1.0.58" diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index 5de2742c2a..0ea34f68c6 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -26,8 +26,9 @@ fn main() { args.remove(0); } } - args.push("--arch".to_string()); - args.push("bpf".to_string()); + let index = args.iter().position(|x| x == "--").unwrap_or(args.len()); + args.insert(index, "bpf".to_string()); + args.insert(index, "--arch".to_string()); print!("cargo-build-bpf child: {}", program.display()); for a in &args { print!(" {}", a); diff --git a/sdk/cargo-build-sbf/Cargo.toml b/sdk/cargo-build-sbf/Cargo.toml index 2c8359c4de..9cbdfc508b 100644 --- a/sdk/cargo-build-sbf/Cargo.toml +++ b/sdk/cargo-build-sbf/Cargo.toml @@ -22,6 +22,7 @@ tar = "0.4.38" [dev-dependencies] assert_cmd = "*" +predicates = "2.1" serial_test = "*" [features] diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index d6437b0434..0a49b93766 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -564,25 +564,6 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m env::set_var("OBJDUMP", llvm_bin.join("llvm-objdump")); env::set_var("OBJCOPY", llvm_bin.join("llvm-objcopy")); - let rustflags = env::var("RUSTFLAGS").ok(); - let mut rustflags = Cow::Borrowed(rustflags.as_deref().unwrap_or_default()); - if config.remap_cwd { - rustflags = Cow::Owned(format!("{} -Zremap-cwd-prefix=", &rustflags)); - } - if config.debug { - // Replace with -Zsplit-debuginfo=packed when stabilized. - rustflags = Cow::Owned(format!("{} -g", &rustflags)); - } - if let Cow::Owned(flags) = rustflags { - env::set_var("RUSTFLAGS", &flags); - } - if config.verbose { - debug!( - "RUSTFLAGS=\"{}\"", - env::var("RUSTFLAGS").ok().unwrap_or_default() - ); - } - // RUSTC variable overrides cargo + mechanism of // selecting the rust compiler and makes cargo run a rust compiler // other than the one linked in BPF toolchain. We have to prevent @@ -593,15 +574,40 @@ fn build_sbf_package(config: &Config, target_directory: &Path, package: &cargo_m ); env::remove_var("RUSTC") } - - let mut target_rustflags = env::var("CARGO_TARGET_SBF_SOLANA_SOLANA_RUSTFLAGS") - .ok() - .unwrap_or_default(); + let cargo_target = if config.arch == "bpf" { + "CARGO_TARGET_BPFEL_UNKNOWN_UNKNOWN_RUSTFLAGS" + } else { + "CARGO_TARGET_SBF_SOLANA_SOLANA_RUSTFLAGS" + }; + let rustflags = env::var("RUSTFLAGS").ok().unwrap_or_default(); + if env::var("RUSTFLAGS").is_ok() { + warn!( + "Removed RUSTFLAGS from cargo environment, because it overrides {}.", + cargo_target, + ); + env::remove_var("RUSTFLAGS") + } + let target_rustflags = env::var(cargo_target).ok(); + let mut target_rustflags = Cow::Borrowed(target_rustflags.as_deref().unwrap_or_default()); + target_rustflags = Cow::Owned(format!("{} {}", &rustflags, &target_rustflags)); + if config.remap_cwd { + target_rustflags = Cow::Owned(format!("{} -Zremap-cwd-prefix=", &target_rustflags)); + } + if config.debug { + // Replace with -Zsplit-debuginfo=packed when stabilized. + target_rustflags = Cow::Owned(format!("{} -g", &target_rustflags)); + } if config.arch == "sbfv2" { - target_rustflags = format!("{} {}", "-C target_cpu=sbfv2", target_rustflags); - env::set_var( - "CARGO_TARGET_SBF_SOLANA_SOLANA_RUSTFLAGS", - &target_rustflags, + target_rustflags = Cow::Owned(format!("{} -C target_cpu=sbfv2", &target_rustflags)); + } + if let Cow::Owned(flags) = target_rustflags { + env::set_var(cargo_target, &flags); + } + if config.verbose { + debug!( + "{}=\"{}\"", + cargo_target, + env::var(cargo_target).ok().unwrap_or_default(), ); } diff --git a/sdk/cargo-build-sbf/tests/crates.rs b/sdk/cargo-build-sbf/tests/crates.rs index 9b775a19a7..227e0d86a6 100644 --- a/sdk/cargo-build-sbf/tests/crates.rs +++ b/sdk/cargo-build-sbf/tests/crates.rs @@ -1,4 +1,7 @@ -use std::{env, fs, process}; +use { + predicates::prelude::*, + std::{env, fs}, +}; #[macro_use] extern crate serial_test; @@ -47,11 +50,10 @@ fn test_build() { #[serial] fn test_dump() { // This test requires rustfilt. - assert!(process::Command::new("cargo") + assert_cmd::Command::new("cargo") .args(&["install", "-f", "rustfilt"]) - .status() - .expect("Unable to install rustfilt required for --dump option") - .success()); + .assert() + .success(); run_cargo_build("noop", &["--dump"], false); let cwd = env::current_dir().expect("Unable to get current working directory"); let dump = cwd @@ -90,3 +92,39 @@ fn test_generate_child_script_on_failre() { fs::remove_file(scr).expect("Failed to remove script"); clean_target("fail"); } + +#[test] +#[serial] +fn test_sbfv2() { + run_cargo_build("noop", &["--arch", "sbfv2"], false); + let cwd = env::current_dir().expect("Unable to get current working directory"); + let bin = cwd + .join("tests") + .join("crates") + .join("noop") + .join("target") + .join("deploy") + .join("noop.so"); + let bin = bin.to_str().unwrap(); + let root = cwd + .parent() + .expect("Unable to get parent directory of current working dir") + .parent() + .expect("Unable to get ../.. of current working dir"); + let readelf = root + .join("sdk") + .join("bpf") + .join("dependencies") + .join("sbf-tools") + .join("llvm") + .join("bin") + .join("llvm-readelf"); + assert_cmd::Command::new(readelf) + .args(&["-h", bin]) + .assert() + .stdout(predicate::str::contains( + "Flags: 0x20", + )) + .success(); + clean_target("noop"); +} diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index fee4dc7381..af5a382fdd 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -32,8 +32,9 @@ fn main() { args.remove(0); } } - args.push("--arch".to_string()); - args.push("bpf".to_string()); + let index = args.iter().position(|x| x == "--").unwrap_or(args.len()); + args.insert(index, "bpf".to_string()); + args.insert(index, "--arch".to_string()); print!("cargo-test-bpf child: {}", program.display()); for a in &args { print!(" {}", a); diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index 723a5729c1..bc9fb1c2e9 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -44,15 +44,15 @@ fn id_to_tokens( tokens: &mut proc_macro2::TokenStream, ) { tokens.extend(quote! { - /// The static program ID + /// The static program ID. pub static ID: #pubkey_type = #id; - /// Confirms that a given pubkey is equivalent to the program ID + /// Returns `true` if given pubkey is the program ID. pub fn check_id(id: &#pubkey_type) -> bool { id == &ID } - /// Returns the program ID + /// Returns the program ID. pub fn id() -> #pubkey_type { ID } @@ -71,16 +71,16 @@ fn deprecated_id_to_tokens( tokens: &mut proc_macro2::TokenStream, ) { tokens.extend(quote! { - /// The static program ID + /// The static program ID. pub static ID: #pubkey_type = #id; - /// Confirms that a given pubkey is equivalent to the program ID + /// Returns `true` if given pubkey is the program ID. #[deprecated()] pub fn check_id(id: &#pubkey_type) -> bool { id == &ID } - /// Returns the program ID + /// Returns the program ID. #[deprecated()] pub fn id() -> #pubkey_type { ID diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index 05e01e8a69..408fdebe2e 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -1,20 +1,45 @@ //! Information about the network's clock, ticks, slots, etc. +//! +//! Time in Solana is marked primarily by _slots_, which occur approximately every +//! 400 milliseconds, and are numbered sequentially. For every slot, a leader is +//! chosen from the validator set, and that leader is expected to produce a new +//! block, though sometimes leaders may fail to do so. Blocks can be identified +//! by their slot number, and some slots do not contain a block. +//! +//! An approximation of the passage of real-world time can be calculated by +//! multiplying a number of slots by [`SLOT_MS`], which is a constant target +//! time for the network to produce slots. Note though that this method suffers +//! a variable amount of drift, as the network does not produce slots at exactly +//! the target rate, and the greater number of slots being calculated for, the +//! greater the drift. Epochs cannot be used this way as they contain variable +//! numbers of slots. +//! +//! The network's current view of the real-world time can always be accessed via +//! [`Clock::unix_timestamp`], which is produced by an [oracle derived from the +//! validator set][oracle]. +//! +//! [oracle]: https://docs.solana.com/implemented-proposals/validator-timestamp-oracle use { crate::{clone_zeroed, copy_field}, std::mem::MaybeUninit, }; -// The default tick rate that the cluster attempts to achieve. Note that the actual tick -// rate at any given time should be expected to drift +/// The default tick rate that the cluster attempts to achieve (160 per second). +/// +/// Note that the actual tick rate at any given time should be expected to drift. pub const DEFAULT_TICKS_PER_SECOND: u64 = 160; #[cfg(test)] static_assertions::const_assert_eq!(MS_PER_TICK, 6); + +/// The number of milliseconds per tick (6). pub const MS_PER_TICK: u64 = 1000 / DEFAULT_TICKS_PER_SECOND; #[cfg(test)] static_assertions::const_assert_eq!(SLOT_MS, 400); + +/// The expected duration of a slot (400 milliseconds). pub const SLOT_MS: u64 = (DEFAULT_TICKS_PER_SLOT * 1000) / DEFAULT_TICKS_PER_SECOND; // At 160 ticks/s, 64 ticks per slot implies that leader rotation and voting will happen @@ -41,7 +66,10 @@ pub const TICKS_PER_DAY: u64 = DEFAULT_TICKS_PER_SECOND * SECONDS_PER_DAY; #[cfg(test)] static_assertions::const_assert_eq!(DEFAULT_SLOTS_PER_EPOCH, 432_000); -// 1 Epoch ~= 2 days + +/// The number of slots per epoch after initial network warmup. +/// +/// 1 Epoch ~= 2 days. pub const DEFAULT_SLOTS_PER_EPOCH: u64 = 2 * TICKS_PER_DAY / DEFAULT_TICKS_PER_SLOT; // leader schedule is governed by this @@ -52,12 +80,14 @@ static_assertions::const_assert_eq!(DEFAULT_MS_PER_SLOT, 400); pub const DEFAULT_MS_PER_SLOT: u64 = 1_000 * DEFAULT_TICKS_PER_SLOT / DEFAULT_TICKS_PER_SECOND; pub const DEFAULT_S_PER_SLOT: f64 = DEFAULT_TICKS_PER_SLOT as f64 / DEFAULT_TICKS_PER_SECOND as f64; -/// The time window of recent block hash values that the bank will track the signatures -/// of over. Once the bank discards a block hash, it will reject any transactions that use -/// that `recent_blockhash` in a transaction. Lowering this value reduces memory consumption, -/// but requires clients to update its `recent_blockhash` more frequently. Raising the value -/// lengthens the time a client must wait to be certain a missing transaction will -/// not be processed by the network. +/// The time window of recent block hash values over which the bank will track +/// signatures. +/// +/// Once the bank discards a block hash, it will reject any transactions that +/// use that `recent_blockhash` in a transaction. Lowering this value reduces +/// memory consumption, but requires a client to update its `recent_blockhash` +/// more frequently. Raising the value lengthens the time a client must wait to +/// be certain a missing transaction will not be processed by the network. pub const MAX_HASH_AGE_IN_SECONDS: usize = 120; #[cfg(test)] @@ -78,52 +108,61 @@ pub const MAX_TRANSACTION_FORWARDING_DELAY_GPU: usize = 2; /// More delay is expected if CUDA is not enabled (as signature verification takes longer) pub const MAX_TRANSACTION_FORWARDING_DELAY: usize = 6; -/// Slot is a unit of time given to a leader for encoding, -/// is some some number of Ticks long. +/// The unit of time given to a leader for encoding a block. +/// +/// It is some some number of _ticks_ long. pub type Slot = u64; -/// Uniquely distinguishes every version of a slot, even if the -/// slot number is the same, i.e. duplicate slots +/// Uniquely distinguishes every version of a slot. +/// +/// The `BankId` is unique even if the slot number of two different slots is the +/// same. This can happen in the case of e.g. duplicate slots. pub type BankId = u64; -/// Epoch is a unit of time a given leader schedule is honored, -/// some number of Slots. +/// The unit of time a given leader schedule is honored. +/// +/// It lasts for some number of [`Slot`]s. pub type Epoch = u64; pub const GENESIS_EPOCH: Epoch = 0; // must be sync with Account::rent_epoch::default() pub const INITIAL_RENT_EPOCH: Epoch = 0; -/// SlotIndex is an index to the slots of a epoch +/// An index to the slots of a epoch. pub type SlotIndex = u64; -/// SlotCount is the number of slots in a epoch +/// The number of slots in a epoch. pub type SlotCount = u64; -/// UnixTimestamp is an approximate measure of real-world time, -/// expressed as Unix time (ie. seconds since the Unix epoch) +/// An approximate measure of real-world time. +/// +/// Expressed as Unix time (i.e. seconds since the Unix epoch). pub type UnixTimestamp = i64; -/// Clock represents network time. Members of Clock start from 0 upon -/// network boot. The best way to map Clock to wallclock time is to use -/// current Slot, as Epochs vary in duration (they start short and grow -/// as the network progresses). +/// A representation of network time. /// +/// All members of `Clock` start from 0 upon network boot. #[repr(C)] #[derive(Serialize, Deserialize, Debug, Default, PartialEq, Eq)] pub struct Clock { - /// the current network/bank Slot + /// The current `Slot`. pub slot: Slot, - /// the timestamp of the first Slot in this Epoch + /// The timestamp of the first `Slot` in this `Epoch`. pub epoch_start_timestamp: UnixTimestamp, - /// the bank Epoch + /// The current `Epoch`. pub epoch: Epoch, - /// the future Epoch for which the leader schedule has - /// most recently been calculated + /// The future `Epoch` for which the leader schedule has + /// most recently been calculated. pub leader_schedule_epoch: Epoch, - /// originally computed from genesis creation time and network time - /// in slots (drifty); corrected using validator timestamp oracle as of - /// timestamp_correction and timestamp_bounding features + /// The approximate real world time of the current slot. + /// + /// This value was originally computed from genesis creation time and + /// network time in slots, incurring a lot of drift. Following activation of + /// the [`timestamp_correction` and `timestamp_bounding`][tsc] features it + /// is calculated using a [validator timestamp oracle][oracle]. + /// + /// [tsc]: https://docs.solana.com/implemented-proposals/bank-timestamp-correction + /// [oracle]: https://docs.solana.com/implemented-proposals/validator-timestamp-oracle pub unix_timestamp: UnixTimestamp, } diff --git a/sdk/program/src/epoch_schedule.rs b/sdk/program/src/epoch_schedule.rs index 01f6f06a9f..984efb1e0f 100644 --- a/sdk/program/src/epoch_schedule.rs +++ b/sdk/program/src/epoch_schedule.rs @@ -1,23 +1,34 @@ //! Configuration for epochs and slots. +//! +//! Epochs mark a period of time composed of _slots_, for which a particular +//! [leader schedule][ls] is in effect. The epoch schedule determines the length +//! of epochs, and the timing of the next leader-schedule selection. +//! +//! [ls]: https://docs.solana.com/cluster/leader-rotation#leader-schedule-rotation +//! +//! The epoch schedule does not change during the life of a blockchain, +//! though the length of an epoch does — during the initial launch of +//! the chain there is a "warmup" period, where epochs are short, with subsequent +//! epochs increasing in slots until they last for [`DEFAULT_SLOTS_PER_EPOCH`]. -/// 1 Epoch = 400 * 8192 ms ~= 55 minutes pub use crate::clock::{Epoch, Slot, DEFAULT_SLOTS_PER_EPOCH}; use { crate::{clone_zeroed, copy_field}, std::mem::MaybeUninit, }; -/// The number of slots before an epoch starts to calculate the leader schedule. -/// Default is an entire epoch, i.e. leader schedule for epoch X is calculated at -/// the beginning of epoch X - 1. +/// The default number of slots before an epoch starts to calculate the leader schedule. pub const DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET: u64 = DEFAULT_SLOTS_PER_EPOCH; /// The maximum number of slots before an epoch starts to calculate the leader schedule. -/// Default is an entire epoch, i.e. leader schedule for epoch X is calculated at -/// the beginning of epoch X - 1. +/// +/// Default is an entire epoch, i.e. leader schedule for epoch X is calculated at +/// the beginning of epoch X - 1. pub const MAX_LEADER_SCHEDULE_EPOCH_OFFSET: u64 = 3; -/// based on MAX_LOCKOUT_HISTORY from vote_program +/// The minimum number of slots per epoch during the warmup period. +/// +/// Based on `MAX_LOCKOUT_HISTORY` from `vote_program`. pub const MINIMUM_SLOTS_PER_EPOCH: u64 = 32; #[repr(C)] @@ -28,16 +39,20 @@ pub struct EpochSchedule { pub slots_per_epoch: u64, /// A number of slots before beginning of an epoch to calculate - /// a leader schedule for that epoch + /// a leader schedule for that epoch. pub leader_schedule_slot_offset: u64, - /// whether epochs start short and grow + /// Whether epochs start short and grow. pub warmup: bool, - /// basically: log2(slots_per_epoch) - log2(MINIMUM_SLOTS_PER_EPOCH) + /// The first epoch after the warmup period. + /// + /// Basically: `log2(slots_per_epoch) - log2(MINIMUM_SLOTS_PER_EPOCH)`. pub first_normal_epoch: Epoch, - /// basically: MINIMUM_SLOTS_PER_EPOCH * (2.pow(first_normal_epoch) - 1) + /// The first slot after the warmup period. + /// + /// Basically: `MINIMUM_SLOTS_PER_EPOCH * (2.pow(first_normal_epoch) - 1)`. pub first_normal_slot: Slot, } diff --git a/sdk/program/src/example_mocks.rs b/sdk/program/src/example_mocks.rs index 97b5f8b5c6..a21470d383 100644 --- a/sdk/program/src/example_mocks.rs +++ b/sdk/program/src/example_mocks.rs @@ -116,6 +116,10 @@ pub mod solana_sdk { address_lookup_table_account, hash, instruction, keccak, message, nonce, pubkey::{self, Pubkey}, system_instruction, system_program, + sysvar::{ + self, + clock::{self, Clock}, + }, }; pub mod account { diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 65b951014c..e97b7c87e1 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -28,7 +28,7 @@ //! [serialization]: #serialization //! [np]: #native-programs //! [cpi]: #cross-program-instruction-execution -//! [sysvar]: #sysvars +//! [sysvar]: crate::sysvar //! //! Idiomatic examples of `solana-program` usage can be found in //! [the Solana Program Library][spl]. @@ -466,89 +466,6 @@ //! - Invokable by programs? yes //! //! [lut]: https://docs.solana.com/proposals/transactions-v2 -//! -//! # Sysvars -//! -//! Sysvars are special accounts that contain dynamically-updated data about -//! the network cluster, the blockchain history, and the executing transaction. -//! -//! The program IDs for sysvars are defined in the [`sysvar`] module, and simple -//! sysvars implement the [`Sysvar::get`] method, which loads a sysvar directly -//! from the runtime, as in this example that logs the `clock` sysvar: -//! -//! [`Sysvar::get`]: sysvar::Sysvar::get -//! -//! ``` -//! use solana_program::{ -//! account_info::AccountInfo, -//! clock, -//! entrypoint::ProgramResult, -//! msg, -//! pubkey::Pubkey, -//! sysvar::Sysvar, -//! }; -//! -//! fn process_instruction( -//! program_id: &Pubkey, -//! accounts: &[AccountInfo], -//! instruction_data: &[u8], -//! ) -> ProgramResult { -//! let clock = clock::Clock::get()?; -//! msg!("clock: {:#?}", clock); -//! Ok(()) -//! } -//! ``` -//! -//! Since Solana sysvars are accounts, if the `AccountInfo` is provided to the -//! program, then the program can deserialize the sysvar with -//! [`Sysvar::from_account_info`] to access its data, as in this example that -//! again logs the [`clock`][clk] sysvar. -//! -//! [`Sysvar::from_account_info`]: sysvar::Sysvar::from_account_info -//! [clk]: sysvar::clock -//! -//! ``` -//! use solana_program::{ -//! account_info::{next_account_info, AccountInfo}, -//! clock, -//! entrypoint::ProgramResult, -//! msg, -//! pubkey::Pubkey, -//! sysvar::Sysvar, -//! }; -//! -//! fn process_instruction( -//! program_id: &Pubkey, -//! accounts: &[AccountInfo], -//! instruction_data: &[u8], -//! ) -> ProgramResult { -//! let account_info_iter = &mut accounts.iter(); -//! let clock_account = next_account_info(account_info_iter)?; -//! let clock = clock::Clock::from_account_info(&clock_account)?; -//! msg!("clock: {:#?}", clock); -//! Ok(()) -//! } -//! ``` -//! -//! When possible, programs should prefer to call `Sysvar::get` instead of -//! deserializing with `Sysvar::from_account_info`, as the latter imposes extra -//! overhead of deserialization while also requiring the sysvar account address -//! be passed to the program, wasting the limited space available to -//! transactions. Deserializing sysvars that can instead be retrieved with -//! `Sysvar::get` should be only be considered for compatibility with older -//! programs that pass around sysvar accounts. -//! -//! Some sysvars are too large to deserialize within a program, and -//! `Sysvar::from_account_info` returns an error. Some sysvars are too large -//! to deserialize within a program, and attempting to will exhaust the -//! program's compute budget. Some sysvars do not implement `Sysvar::get` and -//! return an error. Some sysvars have custom deserializers that do not -//! implement the `Sysvar` trait. These cases are documented in the modules for -//! individual sysvars. -//! -//! For more details see the Solana [documentation on sysvars][sysvardoc]. -//! -//! [sysvardoc]: https://docs.solana.com/developing/runtime-facilities/sysvars #![allow(incomplete_features)] #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] @@ -607,6 +524,7 @@ pub mod syscalls; pub mod system_instruction; pub mod system_program; pub mod sysvar; +pub mod vote; pub mod wasm; #[cfg(target_os = "solana")] @@ -626,15 +544,6 @@ pub mod config { } } -/// The [vote native program][np]. -/// -/// [np]: https://docs.solana.com/developing/runtime-facilities/programs#vote-program -pub mod vote { - pub mod program { - crate::declare_id!("Vote111111111111111111111111111111111111111"); - } -} - /// A vector of Solana SDK IDs pub mod sdk_ids { use { diff --git a/sdk/program/src/message/compiled_keys.rs b/sdk/program/src/message/compiled_keys.rs index d56c7aca2c..c689d08f39 100644 --- a/sdk/program/src/message/compiled_keys.rs +++ b/sdk/program/src/message/compiled_keys.rs @@ -80,20 +80,20 @@ impl CompiledKeys { .chain( key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then(|| *key)), + .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then_some(*key)), ) .collect(); let readonly_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then_some(*key)) .collect(); let writable_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then_some(*key)) .collect(); let readonly_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then_some(*key)) .collect(); let signers_len = writable_signer_keys @@ -160,7 +160,7 @@ impl CompiledKeys { for search_key in self .key_meta_map .iter() - .filter_map(|(key, meta)| key_meta_filter(meta).then(|| key)) + .filter_map(|(key, meta)| key_meta_filter(meta).then_some(key)) { for (key_index, key) in lookup_table_addresses.iter().enumerate() { if key == search_key { diff --git a/sdk/program/src/nonce/state/mod.rs b/sdk/program/src/nonce/state/mod.rs index a4a850b93c..d55bc9063a 100644 --- a/sdk/program/src/nonce/state/mod.rs +++ b/sdk/program/src/nonce/state/mod.rs @@ -46,7 +46,7 @@ impl Versions { Self::Current(state) => match **state { State::Uninitialized => None, State::Initialized(ref data) => { - (recent_blockhash == &data.blockhash()).then(|| data) + (recent_blockhash == &data.blockhash()).then_some(data) } }, } diff --git a/sdk/program/src/rent.rs b/sdk/program/src/rent.rs index 3a9ef33fd9..f5d95ec8d6 100644 --- a/sdk/program/src/rent.rs +++ b/sdk/program/src/rent.rs @@ -3,40 +3,53 @@ //! [rent]: https://docs.solana.com/implemented-proposals/rent #![allow(clippy::integer_arithmetic)] -//! configuration for network rent use { crate::{clock::DEFAULT_SLOTS_PER_EPOCH, clone_zeroed, copy_field}, std::mem::MaybeUninit, }; +/// Configuration of network rent. #[repr(C)] #[derive(Serialize, Deserialize, PartialEq, Copy, Debug, AbiExample)] pub struct Rent { - /// Rental rate + /// Rental rate in lamports/byte-year. pub lamports_per_byte_year: u64, - /// exemption threshold, in years + /// Amount of time (in years) a balance must include rent for the account to + /// be rent exempt. pub exemption_threshold: f64, - // What portion of collected rent are to be destroyed, percentage-wise + /// The percentage of collected rent that is burned. + /// + /// Valid values are in the range [0, 100]. The remaining percentage is + /// distributed to validators. pub burn_percent: u8, } -/// default rental rate in lamports/byte-year, based on: -/// 10^9 lamports per SOL -/// $1 per SOL -/// $0.01 per megabyte day -/// $3.65 per megabyte year +/// Default rental rate in lamports/byte-year. +/// +/// This calculation is based on: +/// - 10^9 lamports per SOL +/// - $1 per SOL +/// - $0.01 per megabyte day +/// - $3.65 per megabyte year pub const DEFAULT_LAMPORTS_PER_BYTE_YEAR: u64 = 1_000_000_000 / 100 * 365 / (1024 * 1024); -/// default amount of time (in years) the balance has to include rent for +/// Default amount of time (in years) the balance has to include rent for the +/// account to be rent exempt. pub const DEFAULT_EXEMPTION_THRESHOLD: f64 = 2.0; -/// default percentage of rent to burn (Valid values are 0 to 100) +/// Default percentage of collected rent that is burned. +/// +/// Valid values are in the range [0, 100]. The remaining percentage is +/// distributed to validators. pub const DEFAULT_BURN_PERCENT: u8 = 50; -/// account storage overhead for calculation of base rent +/// Account storage overhead for calculation of base rent. +/// +/// This is the number of bytes required to store an account with no data. It is +/// added to an accounts data length when calculating [`Rent::minimum_balance`]. pub const ACCOUNT_STORAGE_OVERHEAD: u64 = 128; impl Default for Rent { @@ -63,28 +76,33 @@ impl Clone for Rent { } impl Rent { - /// calculate how much rent to burn from the collected rent + /// Calculate how much rent to burn from the collected rent. + /// + /// The first value returned is the amount burned. The second is the amount + /// to distribute to validators. pub fn calculate_burn(&self, rent_collected: u64) -> (u64, u64) { let burned_portion = (rent_collected * u64::from(self.burn_percent)) / 100; (burned_portion, rent_collected - burned_portion) } - /// minimum balance due for rent-exemption of a given size Account::data.len() + + /// Minimum balance due for rent-exemption of a given account data size. /// /// Note: a stripped-down version of this calculation is used in - /// calculate_split_rent_exempt_reserve in the stake program. When this function is updated, -- - /// eg. when making rent variable -- the stake program will need to be refactored + /// `calculate_split_rent_exempt_reserve` in the stake program. When this + /// function is updated, eg. when making rent variable, the stake program + /// will need to be refactored. pub fn minimum_balance(&self, data_len: usize) -> u64 { let bytes = data_len as u64; (((ACCOUNT_STORAGE_OVERHEAD + bytes) * self.lamports_per_byte_year) as f64 * self.exemption_threshold) as u64 } - /// whether a given balance and data_len would be exempt + /// Whether a given balance and data length would be exempt. pub fn is_exempt(&self, balance: u64, data_len: usize) -> bool { balance >= self.minimum_balance(data_len) } - /// rent due on account's data_len with balance + /// Rent due on account's data length with balance. pub fn due(&self, balance: u64, data_len: usize, years_elapsed: f64) -> RentDue { if self.is_exempt(balance, data_len) { RentDue::Exempt @@ -93,13 +111,16 @@ impl Rent { } } - /// rent due for account that is known to be not exempt + /// Rent due for account that is known to be not exempt. pub fn due_amount(&self, data_len: usize, years_elapsed: f64) -> u64 { let actual_data_len = data_len as u64 + ACCOUNT_STORAGE_OVERHEAD; let lamports_per_year = self.lamports_per_byte_year * actual_data_len; (lamports_per_year as f64 * years_elapsed) as u64 } + /// Creates a `Rent` that charges no lamports. + /// + /// This is used for testing. pub fn free() -> Self { Self { lamports_per_byte_year: 0, @@ -107,6 +128,9 @@ impl Rent { } } + /// Creates a `Rent` that is scaled based on the number of slots in an epoch. + /// + /// This is used for testing. pub fn with_slots_per_epoch(slots_per_epoch: u64) -> Self { let ratio = slots_per_epoch as f64 / DEFAULT_SLOTS_PER_EPOCH as f64; let exemption_threshold = DEFAULT_EXEMPTION_THRESHOLD as f64 * ratio; @@ -119,17 +143,17 @@ impl Rent { } } -/// Enumerate return values from `Rent::due()` +/// The return value of [`Rent::due`]. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum RentDue { - /// Used to indicate the account is rent exempt + /// Used to indicate the account is rent exempt. Exempt, - /// The account owes rent, and the amount is the field + /// The account owes this much rent. Paying(u64), } impl RentDue { - /// Return the lamports due for rent + /// Return the lamports due for rent. pub fn lamports(&self) -> u64 { match self { RentDue::Exempt => 0, @@ -137,7 +161,7 @@ impl RentDue { } } - /// Return 'true' if rent exempt + /// Return 'true' if rent exempt. pub fn is_exempt(&self) -> bool { match self { RentDue::Exempt => true, diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index b4d68ebc8d..9de8638195 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -4,7 +4,7 @@ //! //! The sysvar ID is declared in [`sysvar::slot_hashes`]. //! -//! [`sysvar::slot_hashes`]: crate::slot_hashes +//! [`sysvar::slot_hashes`]: crate::sysvar::slot_hashes pub use crate::clock::Slot; use { diff --git a/sdk/program/src/slot_history.rs b/sdk/program/src/slot_history.rs index e4bc43efe9..1eea9e176e 100644 --- a/sdk/program/src/slot_history.rs +++ b/sdk/program/src/slot_history.rs @@ -4,12 +4,13 @@ //! //! The sysvar ID is declared in [`sysvar::slot_history`]. //! -//! [`sysvar::slot_history`]: crate::slot_history +//! [`sysvar::slot_history`]: crate::sysvar::slot_history #![allow(clippy::integer_arithmetic)] pub use crate::clock::Slot; use bv::{BitVec, BitsMut}; +/// A bitvector indicating which slots are present in the past epoch. #[repr(C)] #[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct SlotHistory { diff --git a/sdk/program/src/stake/tools.rs b/sdk/program/src/stake/tools.rs index 842a822b0e..e0447f49fc 100644 --- a/sdk/program/src/stake/tools.rs +++ b/sdk/program/src/stake/tools.rs @@ -28,7 +28,7 @@ fn get_minimum_delegation_return_data() -> Result { .ok_or(ProgramError::InvalidInstructionData) .and_then(|(program_id, return_data)| { (program_id == super::program::id()) - .then(|| return_data) + .then_some(return_data) .ok_or(ProgramError::IncorrectProgramId) }) .and_then(|return_data| { diff --git a/sdk/program/src/sysvar/clock.rs b/sdk/program/src/sysvar/clock.rs index d6a8c73d81..bf8b518f5a 100644 --- a/sdk/program/src/sysvar/clock.rs +++ b/sdk/program/src/sysvar/clock.rs @@ -1,5 +1,131 @@ -//! This account contains the clock slot, epoch, and leader_schedule_epoch +//! Information about the network’s clock, ticks, slots, etc. //! +//! The _clock sysvar_ provides access to the [`Clock`] type, which includes the +//! current slot, the current epoch, and the approximate real-world time of the +//! slot. +//! +//! [`Clock`] implements [`Sysvar::get`] and can be loaded efficiently without +//! passing the sysvar account ID to the program. +//! +//! See also the Solana [documentation on the clock sysvar][sdoc]. +//! +//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#clock +//! +//! # Examples +//! +//! Accessing via on-chain program directly: +//! +//! ```no_run +//! # use solana_program::{ +//! # account_info::{AccountInfo, next_account_info}, +//! # entrypoint::ProgramResult, +//! # msg, +//! # pubkey::Pubkey, +//! # sysvar::clock::{self, Clock}, +//! # sysvar::Sysvar, +//! # }; +//! # use solana_program::program_error::ProgramError; +//! # +//! fn process_instruction( +//! program_id: &Pubkey, +//! accounts: &[AccountInfo], +//! instruction_data: &[u8], +//! ) -> ProgramResult { +//! +//! let clock = Clock::get()?; +//! msg!("clock: {:#?}", clock); +//! +//! Ok(()) +//! } +//! # +//! # use solana_program::sysvar::SysvarId; +//! # let p = Clock::id(); +//! # let l = &mut 1169280; +//! # let d = &mut vec![240, 153, 233, 7, 0, 0, 0, 0, 11, 115, 118, 98, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 0, 0, 121, 50, 119, 98, 0, 0, 0, 0]; +//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let accounts = &[a.clone(), a]; +//! # process_instruction( +//! # &Pubkey::new_unique(), +//! # accounts, +//! # &[], +//! # )?; +//! # Ok::<(), ProgramError>(()) +//! ``` +//! +//! Accessing via on-chain program's account parameters: +//! +//! ``` +//! # use solana_program::{ +//! # account_info::{AccountInfo, next_account_info}, +//! # entrypoint::ProgramResult, +//! # msg, +//! # pubkey::Pubkey, +//! # sysvar::clock::{self, Clock}, +//! # sysvar::Sysvar, +//! # }; +//! # use solana_program::program_error::ProgramError; +//! # +//! fn process_instruction( +//! program_id: &Pubkey, +//! accounts: &[AccountInfo], +//! instruction_data: &[u8], +//! ) -> ProgramResult { +//! let account_info_iter = &mut accounts.iter(); +//! let clock_account_info = next_account_info(account_info_iter)?; +//! +//! assert!(clock::check_id(clock_account_info.key)); +//! +//! let clock = Clock::from_account_info(clock_account_info)?; +//! msg!("clock: {:#?}", clock); +//! +//! Ok(()) +//! } +//! # +//! # use solana_program::sysvar::SysvarId; +//! # let p = Clock::id(); +//! # let l = &mut 1169280; +//! # let d = &mut vec![240, 153, 233, 7, 0, 0, 0, 0, 11, 115, 118, 98, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 0, 0, 121, 50, 119, 98, 0, 0, 0, 0]; +//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let accounts = &[a.clone(), a]; +//! # process_instruction( +//! # &Pubkey::new_unique(), +//! # accounts, +//! # &[], +//! # )?; +//! # Ok::<(), ProgramError>(()) +//! ``` +//! +//! Accessing via the RPC client: +//! +//! ``` +//! # use solana_program::example_mocks::solana_sdk; +//! # use solana_program::example_mocks::solana_client; +//! # use solana_sdk::account::Account; +//! # use solana_client::rpc_client::RpcClient; +//! # use solana_sdk::sysvar::clock::{self, Clock}; +//! # use anyhow::Result; +//! # +//! fn print_sysvar_clock(client: &RpcClient) -> Result<()> { +//! # client.set_get_account_response(clock::ID, Account { +//! # lamports: 1169280, +//! # data: vec![240, 153, 233, 7, 0, 0, 0, 0, 11, 115, 118, 98, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 0, 0, 52, 1, 0, 0, 0, 0, 0, 0, 121, 50, 119, 98, 0, 0, 0, 0], +//! # owner: solana_sdk::system_program::ID, +//! # executable: false, +//! # rent_epoch: 307, +//! # }); +//! # +//! let clock = client.get_account(&clock::ID)?; +//! let data: Clock = bincode::deserialize(&clock.data)?; +//! +//! Ok(()) +//! } +//! # +//! # let client = RpcClient::new(String::new()); +//! # print_sysvar_clock(&client)?; +//! # +//! # Ok::<(), anyhow::Error>(()) +//! ``` + pub use crate::clock::Clock; use crate::{impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar}; diff --git a/sdk/program/src/sysvar/epoch_schedule.rs b/sdk/program/src/sysvar/epoch_schedule.rs index d5b1a81a10..81edba476c 100644 --- a/sdk/program/src/sysvar/epoch_schedule.rs +++ b/sdk/program/src/sysvar/epoch_schedule.rs @@ -1,5 +1,130 @@ -//! This account contains the current cluster rent +//! Information about epoch duration. //! +//! The _epoch schedule_ sysvar provides access to the [`EpochSchedule`] type, +//! which includes the number of slots per epoch, timing of leader schedule +//! selection, and information about epoch warm-up time. +//! +//! [`EpochSchedule`] implements [`Sysvar::get`] and can be loaded efficiently without +//! passing the sysvar account ID to the program. +//! +//! See also the Solana [documentation on the epoch schedule sysvar][sdoc]. +//! +//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#epochschedule +//! +//! # Examples +//! +//! Accessing via on-chain program directly: +//! +//! ```no_run +//! # use solana_program::{ +//! # account_info::{AccountInfo, next_account_info}, +//! # entrypoint::ProgramResult, +//! # msg, +//! # pubkey::Pubkey, +//! # sysvar::epoch_schedule::{self, EpochSchedule}, +//! # sysvar::Sysvar, +//! # }; +//! # use solana_program::program_error::ProgramError; +//! # +//! fn process_instruction( +//! program_id: &Pubkey, +//! accounts: &[AccountInfo], +//! instruction_data: &[u8], +//! ) -> ProgramResult { +//! +//! let epoch_schedule = EpochSchedule::get()?; +//! msg!("epoch_schedule: {:#?}", epoch_schedule); +//! +//! Ok(()) +//! } +//! # +//! # use solana_program::sysvar::SysvarId; +//! # let p = EpochSchedule::id(); +//! # let l = &mut 1120560; +//! # let d = &mut vec![0, 32, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; +//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let accounts = &[a.clone(), a]; +//! # process_instruction( +//! # &Pubkey::new_unique(), +//! # accounts, +//! # &[], +//! # )?; +//! # Ok::<(), ProgramError>(()) +//! ``` +//! +//! Accessing via on-chain program's account parameters: +//! +//! ``` +//! # use solana_program::{ +//! # account_info::{AccountInfo, next_account_info}, +//! # entrypoint::ProgramResult, +//! # msg, +//! # pubkey::Pubkey, +//! # sysvar::epoch_schedule::{self, EpochSchedule}, +//! # sysvar::Sysvar, +//! # }; +//! # use solana_program::program_error::ProgramError; +//! # +//! fn process_instruction( +//! program_id: &Pubkey, +//! accounts: &[AccountInfo], +//! instruction_data: &[u8], +//! ) -> ProgramResult { +//! let account_info_iter = &mut accounts.iter(); +//! let epoch_schedule_account_info = next_account_info(account_info_iter)?; +//! +//! assert!(epoch_schedule::check_id(epoch_schedule_account_info.key)); +//! +//! let epoch_schedule = EpochSchedule::from_account_info(epoch_schedule_account_info)?; +//! msg!("epoch_schedule: {:#?}", epoch_schedule); +//! +//! Ok(()) +//! } +//! # +//! # use solana_program::sysvar::SysvarId; +//! # let p = EpochSchedule::id(); +//! # let l = &mut 1120560; +//! # let d = &mut vec![0, 32, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; +//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let accounts = &[a.clone(), a]; +//! # process_instruction( +//! # &Pubkey::new_unique(), +//! # accounts, +//! # &[], +//! # )?; +//! # Ok::<(), ProgramError>(()) +//! ``` +//! +//! Accessing via the RPC client: +//! +//! ``` +//! # use solana_program::example_mocks::solana_sdk; +//! # use solana_program::example_mocks::solana_client; +//! # use solana_sdk::account::Account; +//! # use solana_client::rpc_client::RpcClient; +//! # use solana_sdk::sysvar::epoch_schedule::{self, EpochSchedule}; +//! # use anyhow::Result; +//! # +//! fn print_sysvar_epoch_schedule(client: &RpcClient) -> Result<()> { +//! # client.set_get_account_response(epoch_schedule::ID, Account { +//! # lamports: 1120560, +//! # data: vec![0, 32, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], +//! # owner: solana_sdk::system_program::ID, +//! # executable: false, +//! # rent_epoch: 307, +//! # }); +//! # +//! let epoch_schedule = client.get_account(&epoch_schedule::ID)?; +//! let data: EpochSchedule = bincode::deserialize(&epoch_schedule.data)?; +//! +//! Ok(()) +//! } +//! # +//! # let client = RpcClient::new(String::new()); +//! # print_sysvar_epoch_schedule(&client)?; +//! # +//! # Ok::<(), anyhow::Error>(()) +//! ``` pub use crate::epoch_schedule::EpochSchedule; use crate::{impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar}; diff --git a/sdk/program/src/sysvar/fees.rs b/sdk/program/src/sysvar/fees.rs index a957317f21..f80d8c6918 100644 --- a/sdk/program/src/sysvar/fees.rs +++ b/sdk/program/src/sysvar/fees.rs @@ -1,5 +1,23 @@ -//! This account contains the current cluster fees +//! Current cluster fees. //! +//! The _fees sysvar_ provides access to the [`Fees`] type, which contains the +//! current [`FeeCalculator`]. +//! +//! [`Fees`] implements [`Sysvar::get`] and can be loaded efficiently without +//! passing the sysvar account ID to the program. +//! +//! This sysvar is deprecated and will not be available in the future. +//! Transaction fees should be determined with the [`getFeeForMessage`] RPC +//! method. For additional context see the [Comprehensive Compute Fees +//! proposal][ccf]. +//! +//! [`getFeeForMessage`]: https://docs.solana.com/developing/clients/jsonrpc-api#getfeeformessage +//! [ccf]: https://docs.solana.com/proposals/comprehensive-compute-fees +//! +//! See also the Solana [documentation on the fees sysvar][sdoc]. +//! +//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#fees + #![allow(deprecated)] use { @@ -12,6 +30,7 @@ use { crate::declare_deprecated_sysvar_id!("SysvarFees111111111111111111111111111111111", Fees); +/// Transaction fees. #[deprecated( since = "1.9.0", note = "Please do not use, will no longer be available in the future" diff --git a/sdk/program/src/sysvar/instructions.rs b/sdk/program/src/sysvar/instructions.rs index 217753baa1..9d1ca40471 100644 --- a/sdk/program/src/sysvar/instructions.rs +++ b/sdk/program/src/sysvar/instructions.rs @@ -1,5 +1,34 @@ +//! The serialized instructions of the current transaction. +//! +//! The _instructions sysvar_ provides access to the serialized instruction data +//! for the currently-running transaction. This allows for [instruction +//! introspection][in], which is required for correctly interoperating with +//! native programs like the [secp256k1] and [ed25519] programs. +//! +//! [in]: https://docs.solana.com/implemented-proposals/instruction_introspection +//! [secp256k1]: crate::secp256k1_program +//! [ed25519]: crate::ed25519_program +//! +//! Unlike other sysvars, the data in the instructions sysvar is not accessed +//! through a type that implements the [`Sysvar`] trait. Instead, the +//! instruction sysvar is accessed through several free functions within this +//! module. +//! +//! [`Sysvar`]: crate::sysvar::Sysvar +//! +//! See also the Solana [documentation on the instructions sysvar][sdoc]. +//! +//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#instructions +//! +//! # Examples +//! +//! For a complete example of how the instructions sysvar is used see the +//! documentation for [`secp256k1_instruction`] in the `solana-sdk` crate. +//! +//! [`secp256k1_instruction`]: https://docs.rs/solana-sdk/latest/solana_sdk/secp256k1_instruction/index.html + #![allow(clippy::integer_arithmetic)] -//! This account contains the serialized transaction instructions + use crate::{ account_info::AccountInfo, instruction::{AccountMeta, Instruction}, @@ -14,12 +43,23 @@ use { bitflags::bitflags, }; -// Instructions Sysvar, dummy type, use the associated helpers instead of the Sysvar trait +/// Instructions sysvar, dummy type. +/// +/// This type exists for consistency with other sysvar modules, but is a dummy +/// type that does not contain sysvar data. It implements the [`SysvarId`] trait +/// but does not implement the [`Sysvar`] trait. +/// +/// [`SysvarId`]: crate::sysvar::SysvarId +/// [`Sysvar`]: crate::sysvar::Sysvar +/// +/// Use the free functions in this module to access the instructions sysvar. pub struct Instructions(); crate::declare_sysvar_id!("Sysvar1nstructions1111111111111111111111111", Instructions); -// Construct the account data for the Instructions Sysvar +/// Construct the account data for the instructions sysvar. +/// +/// This function is used by the runtime and not available to Solana programs. #[cfg(not(target_os = "solana"))] pub fn construct_instructions_data(instructions: &[BorrowedInstruction]) -> Vec { let mut data = serialize_instructions(instructions); @@ -29,14 +69,20 @@ pub fn construct_instructions_data(instructions: &[BorrowedInstruction]) -> Vec< data } -/// Borrowed version of AccountMeta +/// Borrowed version of `AccountMeta`. +/// +/// This struct is used by the runtime when constructing the sysvar. It is not +/// useful to Solana programs. pub struct BorrowedAccountMeta<'a> { pub pubkey: &'a Pubkey, pub is_signer: bool, pub is_writable: bool, } -/// Borrowed version of Instruction +/// Borrowed version of `Instruction`. +/// +/// This struct is used by the runtime when constructing the sysvar. It is not +/// useful to Solana programs. pub struct BorrowedInstruction<'a> { pub program_id: &'a Pubkey, pub accounts: Vec>, @@ -99,7 +145,9 @@ fn serialize_instructions(instructions: &[BorrowedInstruction]) -> Vec { } /// Load the current `Instruction`'s index in the currently executing -/// `Transaction` from the Instructions Sysvar data +/// `Transaction`. +/// +/// `data` is the instructions sysvar account data. #[deprecated( since = "1.8.0", note = "Unsafe because the sysvar accounts address is not checked, please use `load_current_index_checked` instead" @@ -112,7 +160,11 @@ pub fn load_current_index(data: &[u8]) -> u16 { } /// Load the current `Instruction`'s index in the currently executing -/// `Transaction` +/// `Transaction`. +/// +/// # Errors +/// +/// Returns [`ProgramError::UnsupportedSysvar`] if the given account's ID is not equal to [`ID`]. pub fn load_current_index_checked( instruction_sysvar_account_info: &AccountInfo, ) -> Result { @@ -127,7 +179,7 @@ pub fn load_current_index_checked( Ok(u16::from_le_bytes(instr_fixed_data)) } -/// Store the current `Instruction`'s index in the Instructions Sysvar data +/// Store the current `Instruction`'s index in the instructions sysvar data. pub fn store_current_index(data: &mut [u8], instruction_index: u16) { let last_index = data.len() - 2; data[last_index..last_index + 2].copy_from_slice(&instruction_index.to_le_bytes()); @@ -178,7 +230,9 @@ fn deserialize_instruction(index: usize, data: &[u8]) -> Result Result ProgramResult { +//! let clock = clock::Clock::get()?; +//! msg!("clock: {:#?}", clock); +//! Ok(()) +//! } +//! ``` +//! +//! Since Solana sysvars are accounts, if the `AccountInfo` is provided to the +//! program, then the program can deserialize the sysvar with +//! [`Sysvar::from_account_info`] to access its data, as in this example that +//! again logs the [`clock`] sysvar. +//! +//! ``` +//! use solana_program::{ +//! account_info::{next_account_info, AccountInfo}, +//! clock, +//! entrypoint::ProgramResult, +//! msg, +//! pubkey::Pubkey, +//! sysvar::Sysvar, +//! }; +//! +//! fn process_instruction( +//! program_id: &Pubkey, +//! accounts: &[AccountInfo], +//! instruction_data: &[u8], +//! ) -> ProgramResult { +//! let account_info_iter = &mut accounts.iter(); +//! let clock_account = next_account_info(account_info_iter)?; +//! let clock = clock::Clock::from_account_info(&clock_account)?; +//! msg!("clock: {:#?}", clock); +//! Ok(()) +//! } +//! ``` +//! +//! When possible, programs should prefer to call `Sysvar::get` instead of +//! deserializing with `Sysvar::from_account_info`, as the latter imposes extra +//! overhead of deserialization while also requiring the sysvar account address +//! be passed to the program, wasting the limited space available to +//! transactions. Deserializing sysvars that can instead be retrieved with +//! `Sysvar::get` should be only be considered for compatibility with older +//! programs that pass around sysvar accounts. +//! +//! Some sysvars are too large to deserialize within a program, and +//! `Sysvar::from_account_info` returns an error, or the serialization attempt +//! will exhaust the program's compute budget. Some sysvars do not implement +//! `Sysvar::get` and return an error. Some sysvars have custom deserializers +//! that do not implement the `Sysvar` trait. These cases are documented in the +//! modules for individual sysvars. +//! +//! All sysvar accounts are owned by the account identified by [`sysvar::ID`]. +//! +//! [`sysvar::ID`]: crate::sysvar::ID +//! //! For more details see the Solana [documentation on sysvars][sysvardoc]. //! //! [sysvardoc]: https://docs.solana.com/developing/runtime-facilities/sysvars @@ -37,6 +114,7 @@ lazy_static! { ]; } +/// Returns `true` of the given `Pubkey` is a sysvar account. pub fn is_sysvar_id(id: &Pubkey) -> bool { ALL_IDS.iter().any(|key| key == id) } @@ -94,35 +172,54 @@ macro_rules! declare_deprecated_sysvar_id( // Owner pubkey for sysvar accounts crate::declare_id!("Sysvar1111111111111111111111111111111111111"); +/// A type that holds sysvar data and has an associated sysvar `Pubkey`. pub trait SysvarId { + /// The `Pubkey` of the sysvar. fn id() -> Pubkey; + /// Returns `true` if the given pubkey is the program ID. fn check_id(pubkey: &Pubkey) -> bool; } -// Sysvar utilities +/// A type that holds sysvar data. pub trait Sysvar: SysvarId + Default + Sized + serde::Serialize + serde::de::DeserializeOwned { + /// The size in bytes of the sysvar as serialized account data. fn size_of() -> usize { bincode::serialized_size(&Self::default()).unwrap() as usize } - /// Deserializes a sysvar from its `AccountInfo`. + /// Deserializes the sysvar from its `AccountInfo`. /// /// # Errors /// - /// If `account_info` does not have the same ID as the sysvar - /// this function returns [`ProgramError::InvalidArgument`]. + /// If `account_info` does not have the same ID as the sysvar this function + /// returns [`ProgramError::InvalidArgument`]. fn from_account_info(account_info: &AccountInfo) -> Result { if !Self::check_id(account_info.unsigned_key()) { return Err(ProgramError::InvalidArgument); } bincode::deserialize(&account_info.data.borrow()).map_err(|_| ProgramError::InvalidArgument) } + + /// Serializes the sysvar to `AccountInfo`. + /// + /// # Errors + /// + /// Returns `None` if serialization failed. fn to_account_info(&self, account_info: &mut AccountInfo) -> Option<()> { bincode::serialize_into(&mut account_info.data.borrow_mut()[..], self).ok() } + + /// Load the sysvar directly from the runtime. + /// + /// This is the preferred way to load a sysvar. Calling this method does not + /// incur any deserialization overhead, and does not require the sysvar + /// account to be passed to the program. + /// + /// Not all sysvars support this method. If not, it returns + /// [`ProgramError::UnsupportedSysvar`]. fn get() -> Result { Err(ProgramError::UnsupportedSysvar) } diff --git a/sdk/program/src/sysvar/recent_blockhashes.rs b/sdk/program/src/sysvar/recent_blockhashes.rs index fc4da8980e..a9b02946a5 100644 --- a/sdk/program/src/sysvar/recent_blockhashes.rs +++ b/sdk/program/src/sysvar/recent_blockhashes.rs @@ -1,3 +1,21 @@ +//! Information about recent blocks and their fee calculators. +//! +//! The _recent blockhashes sysvar_ provides access to the [`RecentBlockhashes`], +//! which contains recent blockhahes and their [`FeeCalculator`]s. +//! +//! [`RecentBlockhashes`] does not implement [`Sysvar::get`]. +//! +//! This sysvar is deprecated and should not be used. Transaction fees should be +//! determined with the [`getFeeForMessage`] RPC method. For additional context +//! see the [Comprehensive Compute Fees proposal][ccf]. +//! +//! [`getFeeForMessage`]: https://docs.solana.com/developing/clients/jsonrpc-api#getfeeformessage +//! [ccf]: https://docs.solana.com/proposals/comprehensive-compute-fees +//! +//! See also the Solana [documentation on the recent blockhashes sysvar][sdoc]. +//! +//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#recentblockhashes + #![allow(deprecated)] #![allow(clippy::integer_arithmetic)] use { diff --git a/sdk/program/src/sysvar/rent.rs b/sdk/program/src/sysvar/rent.rs index 83f2dfdecb..8b604b9f4f 100644 --- a/sdk/program/src/sysvar/rent.rs +++ b/sdk/program/src/sysvar/rent.rs @@ -1,5 +1,131 @@ -//! This account contains the current cluster rent +//! Configuration for network [rent]. //! +//! [rent]: https://docs.solana.com/implemented-proposals/rent +//! +//! The _rent sysvar_ provides access to the [`Rent`] type, which defines +//! storage rent fees. +//! +//! [`Rent`] implements [`Sysvar::get`] and can be loaded efficiently without +//! passing the sysvar account ID to the program. +//! +//! See also the Solana [documentation on the rent sysvar][sdoc]. +//! +//! [sdoc]: https://docs.solana.com/developing/runtime-facilities/sysvars#rent +//! +//! # Examples +//! +//! Accessing via on-chain program directly: +//! +//! ```no_run +//! # use solana_program::{ +//! # account_info::{AccountInfo, next_account_info}, +//! # entrypoint::ProgramResult, +//! # msg, +//! # pubkey::Pubkey, +//! # sysvar::rent::{self, Rent}, +//! # sysvar::Sysvar, +//! # }; +//! # use solana_program::program_error::ProgramError; +//! # +//! fn process_instruction( +//! program_id: &Pubkey, +//! accounts: &[AccountInfo], +//! instruction_data: &[u8], +//! ) -> ProgramResult { +//! +//! let rent = Rent::get()?; +//! msg!("rent: {:#?}", rent); +//! +//! Ok(()) +//! } +//! # +//! # use solana_program::sysvar::SysvarId; +//! # let p = Rent::id(); +//! # let l = &mut 1009200; +//! # let d = &mut vec![152, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 100]; +//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let accounts = &[a.clone(), a]; +//! # process_instruction( +//! # &Pubkey::new_unique(), +//! # accounts, +//! # &[], +//! # )?; +//! # Ok::<(), ProgramError>(()) +//! ``` +//! +//! Accessing via on-chain program's parameters: +//! +//! ``` +//! # use solana_program::{ +//! # account_info::{AccountInfo, next_account_info}, +//! # entrypoint::ProgramResult, +//! # msg, +//! # pubkey::Pubkey, +//! # sysvar::rent::{self, Rent}, +//! # sysvar::Sysvar, +//! # }; +//! # use solana_program::program_error::ProgramError; +//! # +//! fn process_instruction( +//! program_id: &Pubkey, +//! accounts: &[AccountInfo], +//! instruction_data: &[u8], +//! ) -> ProgramResult { +//! let account_info_iter = &mut accounts.iter(); +//! let rent_account_info = next_account_info(account_info_iter)?; +//! +//! assert!(rent::check_id(rent_account_info.key)); +//! +//! let rent = Rent::from_account_info(rent_account_info)?; +//! msg!("rent: {:#?}", rent); +//! +//! Ok(()) +//! } +//! # +//! # use solana_program::sysvar::SysvarId; +//! # let p = Rent::id(); +//! # let l = &mut 1009200; +//! # let d = &mut vec![152, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 100]; +//! # let a = AccountInfo::new(&p, false, false, l, d, &p, false, 0); +//! # let accounts = &[a.clone(), a]; +//! # process_instruction( +//! # &Pubkey::new_unique(), +//! # accounts, +//! # &[], +//! # )?; +//! # Ok::<(), ProgramError>(()) +//! ``` +//! +//! Accessing via the RPC client: +//! +//! ``` +//! # use solana_program::example_mocks::solana_sdk; +//! # use solana_program::example_mocks::solana_client; +//! # use solana_sdk::account::Account; +//! # use solana_client::rpc_client::RpcClient; +//! # use solana_sdk::sysvar::rent::{self, Rent}; +//! # use anyhow::Result; +//! # +//! fn print_sysvar_rent(client: &RpcClient) -> Result<()> { +//! # client.set_get_account_response(rent::ID, Account { +//! # lamports: 1009200, +//! # data: vec![152, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 100], +//! # owner: solana_sdk::system_program::ID, +//! # executable: false, +//! # rent_epoch: 307, +//! # }); +//! # +//! let rent = client.get_account(&rent::ID)?; +//! let data: Rent = bincode::deserialize(&rent.data)?; +//! +//! Ok(()) +//! } +//! # +//! # let client = RpcClient::new(String::new()); +//! # print_sysvar_rent(&client)?; +//! # +//! # Ok::<(), anyhow::Error>(()) +//! ``` pub use crate::rent::Rent; use crate::{impl_sysvar_get, program_error::ProgramError, sysvar::Sysvar}; diff --git a/sdk/program/src/sysvar/rewards.rs b/sdk/program/src/sysvar/rewards.rs index 5aea49cefe..a9d8fb171d 100644 --- a/sdk/program/src/sysvar/rewards.rs +++ b/sdk/program/src/sysvar/rewards.rs @@ -1,5 +1,5 @@ -//! DEPRECATED: This sysvar can be removed once the pico-inflation feature is enabled -//! +//! This sysvar is deprecated and unused. + use crate::sysvar::Sysvar; crate::declare_sysvar_id!("SysvarRewards111111111111111111111111111111", Rewards); diff --git a/sdk/program/src/sysvar/slot_hashes.rs b/sdk/program/src/sysvar/slot_hashes.rs index e1f9d10a87..1aee84056c 100644 --- a/sdk/program/src/sysvar/slot_hashes.rs +++ b/sdk/program/src/sysvar/slot_hashes.rs @@ -1,7 +1,50 @@ -//! named accounts for synthesized data accounts for bank state, etc. +//! The most recent hashes of a slot's parent banks. //! -//! this account carries the Bank's most recent bank hashes for some N parents +//! The _slot hashes sysvar_ provides access to the [`SlotHashes`] type. //! +//! The [`Sysvar::from_account_info`] and [`Sysvar::get`] methods always return +//! [`ProgramError::UnsupportedSysvar`] because this sysvar account is too large +//! to process on-chain. Thus this sysvar cannot be accessed on chain, though +//! one can still use the [`SysvarId::id`], [`SysvarId::check_id`] and +//! [`Sysvar::size_of`] methods in an on-chain program, and it can be accessed +//! off-chain through RPC. +//! +//! [`SysvarId::id`]: crate::sysvar::SysvarId::id +//! [`SysvarId::check_id`]: crate::sysvar::SysvarId::check_id +//! +//! # Examples +//! +//! Calling via the RPC client: +//! +//! ``` +//! # use solana_program::example_mocks::solana_sdk; +//! # use solana_program::example_mocks::solana_client; +//! # use solana_sdk::account::Account; +//! # use solana_client::rpc_client::RpcClient; +//! # use solana_sdk::sysvar::slot_hashes::{self, SlotHashes}; +//! # use anyhow::Result; +//! # +//! fn print_sysvar_slot_hashes(client: &RpcClient) -> Result<()> { +//! # client.set_get_account_response(slot_hashes::ID, Account { +//! # lamports: 1009200, +//! # data: vec![1, 0, 0, 0, 0, 0, 0, 0, 86, 190, 235, 7, 0, 0, 0, 0, 133, 242, 94, 158, 223, 253, 207, 184, 227, 194, 235, 27, 176, 98, 73, 3, 175, 201, 224, 111, 21, 65, 73, 27, 137, 73, 229, 19, 255, 192, 193, 126], +//! # owner: solana_sdk::system_program::ID, +//! # executable: false, +//! # rent_epoch: 307, +//! # }); +//! # +//! let slot_hashes = client.get_account(&slot_hashes::ID)?; +//! let data: SlotHashes = bincode::deserialize(&slot_hashes.data)?; +//! +//! Ok(()) +//! } +//! # +//! # let client = RpcClient::new(String::new()); +//! # print_sysvar_slot_hashes(&client)?; +//! # +//! # Ok::<(), anyhow::Error>(()) +//! ``` + pub use crate::slot_hashes::SlotHashes; use crate::{account_info::AccountInfo, program_error::ProgramError, sysvar::Sysvar}; diff --git a/sdk/program/src/sysvar/slot_history.rs b/sdk/program/src/sysvar/slot_history.rs index a180a2398f..a742bf7b98 100644 --- a/sdk/program/src/sysvar/slot_history.rs +++ b/sdk/program/src/sysvar/slot_history.rs @@ -1,8 +1,52 @@ -//! named accounts for synthesized data accounts for bank state, etc. +//! A bitvector of slots present over the last epoch. //! -//! this account carries a bitvector of slots present over the past -//! epoch +//! The _slot history sysvar_ provides access to the [`SlotHistory`] type. //! +//! The [`Sysvar::from_account_info`] and [`Sysvar::get`] methods always return +//! [`ProgramError::UnsupportedSysvar`] because this sysvar account is too large +//! to process on-chain. Thus this sysvar cannot be accessed on chain, though +//! one can still use the [`SysvarId::id`], [`SysvarId::check_id`] and +//! [`Sysvar::size_of`] methods in an on-chain program, and it can be accessed +//! off-chain through RPC. +//! +//! [`SysvarId::id`]: crate::sysvar::SysvarId::id +//! [`SysvarId::check_id`]: crate::sysvar::SysvarId::check_id +//! +//! # Examples +//! +//! Calling via the RPC client: +//! +//! ``` +//! # use solana_program::example_mocks::solana_sdk; +//! # use solana_program::example_mocks::solana_client; +//! # use solana_sdk::account::Account; +//! # use solana_client::rpc_client::RpcClient; +//! # use solana_sdk::sysvar::slot_history::{self, SlotHistory}; +//! # use anyhow::Result; +//! # +//! fn print_sysvar_slot_history(client: &RpcClient) -> Result<()> { +//! # let slot_history = SlotHistory::default(); +//! # let data: Vec = bincode::serialize(&slot_history)?; +//! # client.set_get_account_response(slot_history::ID, Account { +//! # lamports: 913326000, +//! # data, +//! # owner: solana_sdk::system_program::ID, +//! # executable: false, +//! # rent_epoch: 307, +//! # }); +//! # +//! let slot_history = client.get_account(&slot_history::ID)?; +//! let data: SlotHistory = bincode::deserialize(&slot_history.data)?; +//! +//! Ok(()) +//! } +//! # +//! # let client = RpcClient::new(String::new()); +//! # print_sysvar_slot_history(&client)?; +//! # +//! # Ok::<(), anyhow::Error>(()) +//! ``` + use crate::sysvar::Sysvar; pub use crate::{ account_info::AccountInfo, program_error::ProgramError, slot_history::SlotHistory, diff --git a/sdk/program/src/sysvar/stake_history.rs b/sdk/program/src/sysvar/stake_history.rs index ef52730489..9586ae16e0 100644 --- a/sdk/program/src/sysvar/stake_history.rs +++ b/sdk/program/src/sysvar/stake_history.rs @@ -1,7 +1,50 @@ -//! named accounts for synthesized data accounts for bank state, etc. +//! History of stake activations and de-activations. //! -//! this account carries history about stake activations and de-activations +//! The _stake history sysvar_ provides access to the [`StakeHistory`] type. //! +//! The [`Sysvar::get`] method always returns +//! [`ProgramError::UnsupportedSysvar`], and in practice the data size of this +//! sysvar is too large to process on chain. One can still use the +//! [`SysvarId::id`], [`SysvarId::check_id`] and [`Sysvar::size_of`] methods in +//! an on-chain program, and it can be accessed off-chain through RPC. +//! +//! [`ProgramError::UnsupportedSysvar`]: crate::program_error::ProgramError::UnsupportedSysvar +//! [`SysvarId::id`]: crate::sysvar::SysvarId::id +//! [`SysvarId::check_id`]: crate::sysvar::SysvarId::check_id +//! +//! # Examples +//! +//! Calling via the RPC client: +//! +//! ``` +//! # use solana_program::example_mocks::solana_sdk; +//! # use solana_program::example_mocks::solana_client; +//! # use solana_sdk::account::Account; +//! # use solana_client::rpc_client::RpcClient; +//! # use solana_sdk::sysvar::stake_history::{self, StakeHistory}; +//! # use anyhow::Result; +//! # +//! fn print_sysvar_stake_history(client: &RpcClient) -> Result<()> { +//! # client.set_get_account_response(stake_history::ID, Account { +//! # lamports: 114979200, +//! # data: vec![0, 0, 0, 0, 0, 0, 0, 0], +//! # owner: solana_sdk::system_program::ID, +//! # executable: false, +//! # rent_epoch: 307, +//! # }); +//! # +//! let stake_history = client.get_account(&stake_history::ID)?; +//! let data: StakeHistory = bincode::deserialize(&stake_history.data)?; +//! +//! Ok(()) +//! } +//! # +//! # let client = RpcClient::new(String::new()); +//! # print_sysvar_stake_history(&client)?; +//! # +//! # Ok::<(), anyhow::Error>(()) +//! ``` + pub use crate::stake_history::StakeHistory; use crate::sysvar::Sysvar; diff --git a/programs/vote/src/authorized_voters.rs b/sdk/program/src/vote/authorized_voters.rs similarity index 97% rename from programs/vote/src/authorized_voters.rs rename to sdk/program/src/vote/authorized_voters.rs index 1951e35450..f361be237d 100644 --- a/programs/vote/src/authorized_voters.rs +++ b/sdk/program/src/vote/authorized_voters.rs @@ -1,7 +1,6 @@ use { - log::*, + crate::{clock::Epoch, pubkey::Pubkey}, serde_derive::{Deserialize, Serialize}, - solana_sdk::{clock::Epoch, pubkey::Pubkey}, std::collections::BTreeMap, }; @@ -93,12 +92,14 @@ impl AuthorizedVoters { // from the latest epoch before this one let res = self.authorized_voters.range(0..epoch).next_back(); + /* if res.is_none() { warn!( "Tried to query for the authorized voter of an epoch earlier than the current epoch. Earlier epochs have been purged" ); } + */ res.map(|(_, pubkey)| (*pubkey, false)) } else { diff --git a/programs/vote/src/vote_error.rs b/sdk/program/src/vote/error.rs similarity index 96% rename from programs/vote/src/vote_error.rs rename to sdk/program/src/vote/error.rs index b057b91db5..568cfc9678 100644 --- a/programs/vote/src/vote_error.rs +++ b/sdk/program/src/vote/error.rs @@ -1,9 +1,8 @@ //! Vote program errors use { - log::*, + crate::decode_error::DecodeError, num_derive::{FromPrimitive, ToPrimitive}, - solana_sdk::decode_error::DecodeError, thiserror::Error, }; @@ -77,7 +76,7 @@ impl DecodeError for VoteError { #[cfg(test)] mod tests { - use {super::*, solana_sdk::instruction::InstructionError}; + use {super::*, crate::instruction::InstructionError}; #[test] fn test_custom_error_decode() { diff --git a/programs/vote/src/vote_instruction.rs b/sdk/program/src/vote/instruction.rs similarity index 98% rename from programs/vote/src/vote_instruction.rs rename to sdk/program/src/vote/instruction.rs index 18b568376f..1ea13738f6 100644 --- a/programs/vote/src/vote_instruction.rs +++ b/sdk/program/src/vote/instruction.rs @@ -2,19 +2,19 @@ use { crate::{ - id, - vote_state::{ - CompactVoteStateUpdate, Vote, VoteAuthorize, VoteAuthorizeCheckedWithSeedArgs, - VoteAuthorizeWithSeedArgs, VoteInit, VoteState, VoteStateUpdate, - }, - }, - serde_derive::{Deserialize, Serialize}, - solana_sdk::{ hash::Hash, instruction::{AccountMeta, Instruction}, pubkey::Pubkey, system_instruction, sysvar, + vote::{ + program::id, + state::{ + CompactVoteStateUpdate, Vote, VoteAuthorize, VoteAuthorizeCheckedWithSeedArgs, + VoteAuthorizeWithSeedArgs, VoteInit, VoteState, VoteStateUpdate, + }, + }, }, + serde_derive::{Deserialize, Serialize}, }; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] @@ -103,20 +103,6 @@ pub enum VoteInstruction { /// 1. `[SIGNER]` Vote authority UpdateVoteStateSwitch(VoteStateUpdate, Hash), - /// Update the onchain vote state for the signer. - /// - /// # Account references - /// 0. `[Write]` Vote account to vote with - /// 1. `[SIGNER]` Vote authority - CompactUpdateVoteState(CompactVoteStateUpdate), - - /// Update the onchain vote state for the signer along with a switching proof. - /// - /// # Account references - /// 0. `[Write]` Vote account to vote with - /// 1. `[SIGNER]` Vote authority - CompactUpdateVoteStateSwitch(CompactVoteStateUpdate, Hash), - /// Given that the current Voter or Withdrawer authority is a derived key, /// this instruction allows someone who can sign for that derived key's /// base key to authorize a new Voter or Withdrawer for a vote account. @@ -140,6 +126,20 @@ pub enum VoteInstruction { /// 2. `[SIGNER]` Base key of current Voter or Withdrawer authority's derived key /// 3. `[SIGNER]` New vote or withdraw authority AuthorizeCheckedWithSeed(VoteAuthorizeCheckedWithSeedArgs), + + /// Update the onchain vote state for the signer. + /// + /// # Account references + /// 0. `[Write]` Vote account to vote with + /// 1. `[SIGNER]` Vote authority + CompactUpdateVoteState(CompactVoteStateUpdate), + + /// Update the onchain vote state for the signer along with a switching proof. + /// + /// # Account references + /// 0. `[Write]` Vote account to vote with + /// 1. `[SIGNER]` Vote authority + CompactUpdateVoteStateSwitch(CompactVoteStateUpdate, Hash), } fn initialize_account(vote_pubkey: &Pubkey, vote_init: &VoteInit) -> Instruction { diff --git a/sdk/program/src/vote/mod.rs b/sdk/program/src/vote/mod.rs new file mode 100644 index 0000000000..9b926a051e --- /dev/null +++ b/sdk/program/src/vote/mod.rs @@ -0,0 +1,11 @@ +/// The [vote native program][np]. +/// +/// [np]: https://docs.solana.com/developing/runtime-facilities/programs#vote-program +pub mod authorized_voters; +pub mod error; +pub mod instruction; +pub mod state; + +pub mod program { + crate::declare_id!("Vote111111111111111111111111111111111111111"); +} diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs new file mode 100644 index 0000000000..5bd703a0eb --- /dev/null +++ b/sdk/program/src/vote/state/mod.rs @@ -0,0 +1,1219 @@ +#![allow(clippy::integer_arithmetic)] +//! Vote state + +#[cfg(test)] +use crate::epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET; +use { + crate::{ + clock::{Epoch, Slot, UnixTimestamp}, + hash::Hash, + instruction::InstructionError, + pubkey::Pubkey, + rent::Rent, + short_vec, + sysvar::clock::Clock, + vote::{authorized_voters::AuthorizedVoters, error::VoteError}, + }, + bincode::{deserialize, serialize_into, ErrorKind}, + serde_derive::{Deserialize, Serialize}, + std::{collections::VecDeque, fmt::Debug}, +}; + +mod vote_state_0_23_5; +pub mod vote_state_versions; +pub use vote_state_versions::*; + +// Maximum number of votes to keep around, tightly coupled with epoch_schedule::MINIMUM_SLOTS_PER_EPOCH +pub const MAX_LOCKOUT_HISTORY: usize = 31; +pub const INITIAL_LOCKOUT: usize = 2; + +// Maximum number of credits history to keep around +pub const MAX_EPOCH_CREDITS_HISTORY: usize = 64; + +// Offset of VoteState::prior_voters, for determining initialization status without deserialization +const DEFAULT_PRIOR_VOTERS_OFFSET: usize = 82; + +#[frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH")] +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] +pub struct Vote { + /// A stack of votes starting with the oldest vote + pub slots: Vec, + /// signature of the bank's state at the last slot + pub hash: Hash, + /// processing timestamp of last slot + pub timestamp: Option, +} + +impl Vote { + pub fn new(slots: Vec, hash: Hash) -> Self { + Self { + slots, + hash, + timestamp: None, + } + } +} + +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Copy, Clone, AbiExample)] +pub struct Lockout { + pub slot: Slot, + pub confirmation_count: u32, +} + +impl Lockout { + pub fn new(slot: Slot) -> Self { + Self { + slot, + confirmation_count: 1, + } + } + + // The number of slots for which this vote is locked + pub fn lockout(&self) -> u64 { + (INITIAL_LOCKOUT as u64).pow(self.confirmation_count) + } + + // The last slot at which a vote is still locked out. Validators should not + // vote on a slot in another fork which is less than or equal to this slot + // to avoid having their stake slashed. + pub fn last_locked_out_slot(&self) -> Slot { + self.slot + self.lockout() + } + + pub fn is_locked_out_at_slot(&self, slot: Slot) -> bool { + self.last_locked_out_slot() >= slot + } +} + +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Copy, Clone, AbiExample)] +pub struct CompactLockout { + // Offset to the next vote, 0 if this is the last vote in the tower + pub offset: T, + // Confirmation count, guarenteed to be < 32 + pub confirmation_count: u8, +} + +impl CompactLockout { + pub fn new(offset: T) -> Self { + Self { + offset, + confirmation_count: 1, + } + } + + // The number of slots for which this vote is locked + pub fn lockout(&self) -> u64 { + (INITIAL_LOCKOUT as u64).pow(self.confirmation_count.into()) + } +} + +#[frozen_abi(digest = "GwJfVFsATSj7nvKwtUkHYzqPRaPY6SLxPGXApuCya3x5")] +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] +pub struct VoteStateUpdate { + /// The proposed tower + pub lockouts: VecDeque, + /// The proposed root + pub root: Option, + /// signature of the bank's state at the last slot + pub hash: Hash, + /// processing timestamp of last slot + pub timestamp: Option, +} + +impl From> for VoteStateUpdate { + fn from(recent_slots: Vec<(Slot, u32)>) -> Self { + let lockouts: VecDeque = recent_slots + .into_iter() + .map(|(slot, confirmation_count)| Lockout { + slot, + confirmation_count, + }) + .collect(); + Self { + lockouts, + root: None, + hash: Hash::default(), + timestamp: None, + } + } +} + +impl VoteStateUpdate { + pub fn new(lockouts: VecDeque, root: Option, hash: Hash) -> Self { + Self { + lockouts, + root, + hash, + timestamp: None, + } + } + + pub fn slots(&self) -> Vec { + self.lockouts.iter().map(|lockout| lockout.slot).collect() + } +} + +/// Ignoring overhead, in a full `VoteStateUpdate` the lockouts take up +/// 31 * (64 + 32) = 2976 bits. +/// +/// In this schema we separate the votes into 3 separate lockout structures +/// and store offsets rather than slot number, allowing us to use smaller fields. +/// +/// In a full `CompactVoteStateUpdate` the lockouts take up +/// 64 + (32 + 8) * 16 + (16 + 8) * 8 + (8 + 8) * 6 = 992 bits +/// allowing us to greatly reduce block size. +#[frozen_abi(digest = "EeMnyxPUyd3hK7UQ8BcWDW8qrsdXA9F6ZUoAWAh1nDxX")] +#[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] +pub struct CompactVoteStateUpdate { + /// The proposed root, u64::MAX if there is no root + pub root: Slot, + /// The offset from the root (or 0 if no root) to the first vote + pub root_to_first_vote_offset: u64, + /// Part of the proposed tower, votes with confirmation_count > 15 + #[serde(with = "short_vec")] + pub lockouts_32: Vec>, + /// Part of the proposed tower, votes with 15 >= confirmation_count > 7 + #[serde(with = "short_vec")] + pub lockouts_16: Vec>, + /// Part of the proposed tower, votes with 7 >= confirmation_count + #[serde(with = "short_vec")] + pub lockouts_8: Vec>, + + /// Signature of the bank's state at the last slot + pub hash: Hash, + /// Processing timestamp of last slot + pub timestamp: Option, +} + +impl From> for CompactVoteStateUpdate { + fn from(recent_slots: Vec<(Slot, u32)>) -> Self { + let lockouts: VecDeque = recent_slots + .into_iter() + .map(|(slot, confirmation_count)| Lockout { + slot, + confirmation_count, + }) + .collect(); + Self::new(lockouts, None, Hash::default()) + } +} + +impl CompactVoteStateUpdate { + pub fn new(mut lockouts: VecDeque, root: Option, hash: Hash) -> Self { + if lockouts.is_empty() { + return Self::default(); + } + let mut cur_slot = root.unwrap_or(0u64); + let mut cur_confirmation_count = 0; + let offset = lockouts + .pop_front() + .map( + |Lockout { + slot, + confirmation_count, + }| { + assert!(confirmation_count < 32); + + let offset = slot - cur_slot; + cur_slot = slot; + cur_confirmation_count = confirmation_count; + offset + }, + ) + .expect("Tower should not be empty"); + let mut lockouts_32 = Vec::new(); + let mut lockouts_16 = Vec::new(); + let mut lockouts_8 = Vec::new(); + + for Lockout { + slot, + confirmation_count, + } in lockouts + { + assert!(confirmation_count < 32); + let offset = slot - cur_slot; + if cur_confirmation_count > 15 { + lockouts_32.push(CompactLockout { + offset: offset.try_into().unwrap(), + confirmation_count: cur_confirmation_count.try_into().unwrap(), + }); + } else if cur_confirmation_count > 7 { + lockouts_16.push(CompactLockout { + offset: offset.try_into().unwrap(), + confirmation_count: cur_confirmation_count.try_into().unwrap(), + }); + } else { + lockouts_8.push(CompactLockout { + offset: offset.try_into().unwrap(), + confirmation_count: cur_confirmation_count.try_into().unwrap(), + }) + } + + cur_slot = slot; + cur_confirmation_count = confirmation_count; + } + // Last vote should be at the top of tower, so we don't have to explicitly store it + assert!(cur_confirmation_count == 1); + Self { + root: root.unwrap_or(u64::MAX), + root_to_first_vote_offset: offset, + lockouts_32, + lockouts_16, + lockouts_8, + hash, + timestamp: None, + } + } + + pub fn root(&self) -> Option { + if self.root == u64::MAX { + None + } else { + Some(self.root) + } + } + + pub fn slots(&self) -> Vec { + std::iter::once(self.root_to_first_vote_offset) + .chain(self.lockouts_32.iter().map(|lockout| lockout.offset.into())) + .chain(self.lockouts_16.iter().map(|lockout| lockout.offset.into())) + .chain(self.lockouts_8.iter().map(|lockout| lockout.offset.into())) + .scan(self.root().unwrap_or(0), |prev_slot, offset| { + let slot = *prev_slot + offset; + *prev_slot = slot; + Some(slot) + }) + .collect() + } +} + +impl From for VoteStateUpdate { + fn from(vote_state_update: CompactVoteStateUpdate) -> Self { + let lockouts = vote_state_update + .lockouts_32 + .iter() + .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)) + .chain( + vote_state_update + .lockouts_16 + .iter() + .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)), + ) + .chain( + vote_state_update + .lockouts_8 + .iter() + .map(|lockout| (lockout.offset.into(), lockout.confirmation_count)), + ) + .chain( + // To pick up the last element + std::iter::once((0, 1)), + ) + .scan( + vote_state_update.root().unwrap_or(0) + vote_state_update.root_to_first_vote_offset, + |slot, (offset, confirmation_count): (u64, u8)| { + let cur_slot = *slot; + *slot += offset; + Some(Lockout { + slot: cur_slot, + confirmation_count: confirmation_count.into(), + }) + }, + ) + .collect(); + Self { + lockouts, + root: vote_state_update.root(), + hash: vote_state_update.hash, + timestamp: vote_state_update.timestamp, + } + } +} + +impl From for CompactVoteStateUpdate { + fn from(vote_state_update: VoteStateUpdate) -> Self { + CompactVoteStateUpdate::new( + vote_state_update.lockouts, + vote_state_update.root, + vote_state_update.hash, + ) + } +} + +#[derive(Default, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub struct VoteInit { + pub node_pubkey: Pubkey, + pub authorized_voter: Pubkey, + pub authorized_withdrawer: Pubkey, + pub commission: u8, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub enum VoteAuthorize { + Voter, + Withdrawer, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +pub struct VoteAuthorizeWithSeedArgs { + pub authorization_type: VoteAuthorize, + pub current_authority_derived_key_owner: Pubkey, + pub current_authority_derived_key_seed: String, + pub new_authority: Pubkey, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +pub struct VoteAuthorizeCheckedWithSeedArgs { + pub authorization_type: VoteAuthorize, + pub current_authority_derived_key_owner: Pubkey, + pub current_authority_derived_key_seed: String, +} + +#[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] +pub struct BlockTimestamp { + pub slot: Slot, + pub timestamp: UnixTimestamp, +} + +// this is how many epochs a voter can be remembered for slashing +const MAX_ITEMS: usize = 32; + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] +pub struct CircBuf { + buf: [I; MAX_ITEMS], + /// next pointer + idx: usize, + is_empty: bool, +} + +impl Default for CircBuf { + fn default() -> Self { + Self { + buf: [I::default(); MAX_ITEMS], + idx: MAX_ITEMS - 1, + is_empty: true, + } + } +} + +impl CircBuf { + pub fn append(&mut self, item: I) { + // remember prior delegate and when we switched, to support later slashing + self.idx += 1; + self.idx %= MAX_ITEMS; + + self.buf[self.idx] = item; + self.is_empty = false; + } + + pub fn buf(&self) -> &[I; MAX_ITEMS] { + &self.buf + } + + pub fn last(&self) -> Option<&I> { + if !self.is_empty { + Some(&self.buf[self.idx]) + } else { + None + } + } +} + +#[frozen_abi(digest = "4oxo6mBc8zrZFA89RgKsNyMqqM52iVrCphsWfaHjaAAY")] +#[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone, AbiExample)] +pub struct VoteState { + /// the node that votes in this account + pub node_pubkey: Pubkey, + + /// the signer for withdrawals + pub authorized_withdrawer: Pubkey, + /// percentage (0-100) that represents what part of a rewards + /// payout should be given to this VoteAccount + pub commission: u8, + + pub votes: VecDeque, + + // This usually the last Lockout which was popped from self.votes. + // However, it can be arbitrary slot, when being used inside Tower + pub root_slot: Option, + + /// the signer for vote transactions + authorized_voters: AuthorizedVoters, + + /// history of prior authorized voters and the epochs for which + /// they were set, the bottom end of the range is inclusive, + /// the top of the range is exclusive + prior_voters: CircBuf<(Pubkey, Epoch, Epoch)>, + + /// history of how many credits earned by the end of each epoch + /// each tuple is (Epoch, credits, prev_credits) + pub epoch_credits: Vec<(Epoch, u64, u64)>, + + /// most recent timestamp submitted with a vote + pub last_timestamp: BlockTimestamp, +} + +impl VoteState { + pub fn new(vote_init: &VoteInit, clock: &Clock) -> Self { + Self { + node_pubkey: vote_init.node_pubkey, + authorized_voters: AuthorizedVoters::new(clock.epoch, vote_init.authorized_voter), + authorized_withdrawer: vote_init.authorized_withdrawer, + commission: vote_init.commission, + ..VoteState::default() + } + } + + pub fn get_authorized_voter(&self, epoch: Epoch) -> Option { + self.authorized_voters.get_authorized_voter(epoch) + } + + pub fn authorized_voters(&self) -> &AuthorizedVoters { + &self.authorized_voters + } + + pub fn prior_voters(&mut self) -> &CircBuf<(Pubkey, Epoch, Epoch)> { + &self.prior_voters + } + + pub fn get_rent_exempt_reserve(rent: &Rent) -> u64 { + rent.minimum_balance(VoteState::size_of()) + } + + /// Upper limit on the size of the Vote State + /// when votes.len() is MAX_LOCKOUT_HISTORY. + pub const fn size_of() -> usize { + 3731 // see test_vote_state_size_of. + } + + pub fn deserialize(input: &[u8]) -> Result { + deserialize::(input) + .map(|versioned| versioned.convert_to_current()) + .map_err(|_| InstructionError::InvalidAccountData) + } + + pub fn serialize( + versioned: &VoteStateVersions, + output: &mut [u8], + ) -> Result<(), InstructionError> { + serialize_into(output, versioned).map_err(|err| match *err { + ErrorKind::SizeLimit => InstructionError::AccountDataTooSmall, + _ => InstructionError::GenericError, + }) + } + + /// returns commission split as (voter_portion, staker_portion, was_split) tuple + /// + /// if commission calculation is 100% one way or other, + /// indicate with false for was_split + pub fn commission_split(&self, on: u64) -> (u64, u64, bool) { + match self.commission.min(100) { + 0 => (0, on, false), + 100 => (on, 0, false), + split => { + let on = u128::from(on); + // Calculate mine and theirs independently and symmetrically instead of + // using the remainder of the other to treat them strictly equally. + // This is also to cancel the rewarding if either of the parties + // should receive only fractional lamports, resulting in not being rewarded at all. + // Thus, note that we intentionally discard any residual fractional lamports. + let mine = on * u128::from(split) / 100u128; + let theirs = on * u128::from(100 - split) / 100u128; + + (mine as u64, theirs as u64, true) + } + } + } + + /// Returns if the vote state contains a slot `candidate_slot` + pub fn contains_slot(&self, candidate_slot: Slot) -> bool { + self.votes + .binary_search_by(|lockout| lockout.slot.cmp(&candidate_slot)) + .is_ok() + } + + #[cfg(test)] + fn get_max_sized_vote_state() -> VoteState { + let mut authorized_voters = AuthorizedVoters::default(); + for i in 0..=MAX_LEADER_SCHEDULE_EPOCH_OFFSET { + authorized_voters.insert(i, Pubkey::new_unique()); + } + + VoteState { + votes: VecDeque::from(vec![Lockout::default(); MAX_LOCKOUT_HISTORY]), + root_slot: Some(std::u64::MAX), + epoch_credits: vec![(0, 0, 0); MAX_EPOCH_CREDITS_HISTORY], + authorized_voters, + ..Self::default() + } + } + + pub fn process_next_vote_slot(&mut self, next_vote_slot: Slot, epoch: Epoch) { + // Ignore votes for slots earlier than we already have votes for + if self + .last_voted_slot() + .map_or(false, |last_voted_slot| next_vote_slot <= last_voted_slot) + { + return; + } + + let vote = Lockout::new(next_vote_slot); + + self.pop_expired_votes(next_vote_slot); + + // Once the stack is full, pop the oldest lockout and distribute rewards + if self.votes.len() == MAX_LOCKOUT_HISTORY { + let vote = self.votes.pop_front().unwrap(); + self.root_slot = Some(vote.slot); + + self.increment_credits(epoch, 1); + } + self.votes.push_back(vote); + self.double_lockouts(); + } + + /// increment credits, record credits for last epoch if new epoch + pub fn increment_credits(&mut self, epoch: Epoch, credits: u64) { + // increment credits, record by epoch + + // never seen a credit + if self.epoch_credits.is_empty() { + self.epoch_credits.push((epoch, 0, 0)); + } else if epoch != self.epoch_credits.last().unwrap().0 { + let (_, credits, prev_credits) = *self.epoch_credits.last().unwrap(); + + if credits != prev_credits { + // if credits were earned previous epoch + // append entry at end of list for the new epoch + self.epoch_credits.push((epoch, credits, credits)); + } else { + // else just move the current epoch + self.epoch_credits.last_mut().unwrap().0 = epoch; + } + + // Remove too old epoch_credits + if self.epoch_credits.len() > MAX_EPOCH_CREDITS_HISTORY { + self.epoch_credits.remove(0); + } + } + + self.epoch_credits.last_mut().unwrap().1 += credits; + } + + pub fn nth_recent_vote(&self, position: usize) -> Option<&Lockout> { + if position < self.votes.len() { + let pos = self.votes.len() - 1 - position; + self.votes.get(pos) + } else { + None + } + } + + pub fn last_lockout(&self) -> Option<&Lockout> { + self.votes.back() + } + + pub fn last_voted_slot(&self) -> Option { + self.last_lockout().map(|v| v.slot) + } + + // Upto MAX_LOCKOUT_HISTORY many recent unexpired + // vote slots pushed onto the stack. + pub fn tower(&self) -> Vec { + self.votes.iter().map(|v| v.slot).collect() + } + + pub fn current_epoch(&self) -> Epoch { + if self.epoch_credits.is_empty() { + 0 + } else { + self.epoch_credits.last().unwrap().0 + } + } + + /// Number of "credits" owed to this account from the mining pool. Submit this + /// VoteState to the Rewards program to trade credits for lamports. + pub fn credits(&self) -> u64 { + if self.epoch_credits.is_empty() { + 0 + } else { + self.epoch_credits.last().unwrap().1 + } + } + + /// Number of "credits" owed to this account from the mining pool on a per-epoch basis, + /// starting from credits observed. + /// Each tuple of (Epoch, u64, u64) is read as (epoch, credits, prev_credits), where + /// credits for each epoch is credits - prev_credits; while redundant this makes + /// calculating rewards over partial epochs nice and simple + pub fn epoch_credits(&self) -> &Vec<(Epoch, u64, u64)> { + &self.epoch_credits + } + + pub fn set_new_authorized_voter( + &mut self, + authorized_pubkey: &Pubkey, + current_epoch: Epoch, + target_epoch: Epoch, + verify: F, + ) -> Result<(), InstructionError> + where + F: Fn(Pubkey) -> Result<(), InstructionError>, + { + let epoch_authorized_voter = self.get_and_update_authorized_voter(current_epoch)?; + verify(epoch_authorized_voter)?; + + // The offset in slots `n` on which the target_epoch + // (default value `DEFAULT_LEADER_SCHEDULE_SLOT_OFFSET`) is + // calculated is the number of slots available from the + // first slot `S` of an epoch in which to set a new voter for + // the epoch at `S` + `n` + if self.authorized_voters.contains(target_epoch) { + return Err(VoteError::TooSoonToReauthorize.into()); + } + + // Get the latest authorized_voter + let (latest_epoch, latest_authorized_pubkey) = self + .authorized_voters + .last() + .ok_or(InstructionError::InvalidAccountData)?; + + // If we're not setting the same pubkey as authorized pubkey again, + // then update the list of prior voters to mark the expiration + // of the old authorized pubkey + if latest_authorized_pubkey != authorized_pubkey { + // Update the epoch ranges of authorized pubkeys that will be expired + let epoch_of_last_authorized_switch = + self.prior_voters.last().map(|range| range.2).unwrap_or(0); + + // target_epoch must: + // 1) Be monotonically increasing due to the clock always + // moving forward + // 2) not be equal to latest epoch otherwise this + // function would have returned TooSoonToReauthorize error + // above + assert!(target_epoch > *latest_epoch); + + // Commit the new state + self.prior_voters.append(( + *latest_authorized_pubkey, + epoch_of_last_authorized_switch, + target_epoch, + )); + } + + self.authorized_voters + .insert(target_epoch, *authorized_pubkey); + + Ok(()) + } + + pub fn get_and_update_authorized_voter( + &mut self, + current_epoch: Epoch, + ) -> Result { + let pubkey = self + .authorized_voters + .get_and_cache_authorized_voter_for_epoch(current_epoch) + .ok_or(InstructionError::InvalidAccountData)?; + self.authorized_voters + .purge_authorized_voters(current_epoch); + Ok(pubkey) + } + + // Pop all recent votes that are not locked out at the next vote slot. This + // allows validators to switch forks once their votes for another fork have + // expired. This also allows validators continue voting on recent blocks in + // the same fork without increasing lockouts. + pub fn pop_expired_votes(&mut self, next_vote_slot: Slot) { + while let Some(vote) = self.last_lockout() { + if !vote.is_locked_out_at_slot(next_vote_slot) { + self.votes.pop_back(); + } else { + break; + } + } + } + + pub fn double_lockouts(&mut self) { + let stack_depth = self.votes.len(); + for (i, v) in self.votes.iter_mut().enumerate() { + // Don't increase the lockout for this vote until we get more confirmations + // than the max number of confirmations this vote has seen + if stack_depth > i + v.confirmation_count as usize { + v.confirmation_count += 1; + } + } + } + + pub fn process_timestamp( + &mut self, + slot: Slot, + timestamp: UnixTimestamp, + ) -> Result<(), VoteError> { + if (slot < self.last_timestamp.slot || timestamp < self.last_timestamp.timestamp) + || (slot == self.last_timestamp.slot + && BlockTimestamp { slot, timestamp } != self.last_timestamp + && self.last_timestamp.slot != 0) + { + return Err(VoteError::TimestampTooOld); + } + self.last_timestamp = BlockTimestamp { slot, timestamp }; + Ok(()) + } + + pub fn is_correct_size_and_initialized(data: &[u8]) -> bool { + const VERSION_OFFSET: usize = 4; + data.len() == VoteState::size_of() + && data[VERSION_OFFSET..VERSION_OFFSET + DEFAULT_PRIOR_VOTERS_OFFSET] + != [0; DEFAULT_PRIOR_VOTERS_OFFSET] + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_vote_serialize() { + let mut buffer: Vec = vec![0; VoteState::size_of()]; + let mut vote_state = VoteState::default(); + vote_state + .votes + .resize(MAX_LOCKOUT_HISTORY, Lockout::default()); + vote_state.root_slot = Some(1); + let versioned = VoteStateVersions::new_current(vote_state); + assert!(VoteState::serialize(&versioned, &mut buffer[0..4]).is_err()); + VoteState::serialize(&versioned, &mut buffer).unwrap(); + assert_eq!( + VoteState::deserialize(&buffer).unwrap(), + versioned.convert_to_current() + ); + } + + #[test] + fn test_vote_state_commission_split() { + let vote_state = VoteState::default(); + + assert_eq!(vote_state.commission_split(1), (0, 1, false)); + + let mut vote_state = VoteState { + commission: std::u8::MAX, + ..VoteState::default() + }; + assert_eq!(vote_state.commission_split(1), (1, 0, false)); + + vote_state.commission = 99; + assert_eq!(vote_state.commission_split(10), (9, 0, true)); + + vote_state.commission = 1; + assert_eq!(vote_state.commission_split(10), (0, 9, true)); + + vote_state.commission = 50; + let (voter_portion, staker_portion, was_split) = vote_state.commission_split(10); + + assert_eq!((voter_portion, staker_portion, was_split), (5, 5, true)); + } + + #[test] + fn test_vote_state_epoch_credits() { + let mut vote_state = VoteState::default(); + + assert_eq!(vote_state.credits(), 0); + assert_eq!(vote_state.epoch_credits().clone(), vec![]); + + let mut expected = vec![]; + let mut credits = 0; + let epochs = (MAX_EPOCH_CREDITS_HISTORY + 2) as u64; + for epoch in 0..epochs { + for _j in 0..epoch { + vote_state.increment_credits(epoch, 1); + credits += 1; + } + expected.push((epoch, credits, credits - epoch)); + } + + while expected.len() > MAX_EPOCH_CREDITS_HISTORY { + expected.remove(0); + } + + assert_eq!(vote_state.credits(), credits); + assert_eq!(vote_state.epoch_credits().clone(), expected); + } + + #[test] + fn test_vote_state_epoch0_no_credits() { + let mut vote_state = VoteState::default(); + + assert_eq!(vote_state.epoch_credits().len(), 0); + vote_state.increment_credits(1, 1); + assert_eq!(vote_state.epoch_credits().len(), 1); + + vote_state.increment_credits(2, 1); + assert_eq!(vote_state.epoch_credits().len(), 2); + } + + #[test] + fn test_vote_state_increment_credits() { + let mut vote_state = VoteState::default(); + + let credits = (MAX_EPOCH_CREDITS_HISTORY + 2) as u64; + for i in 0..credits { + vote_state.increment_credits(i as u64, 1); + } + assert_eq!(vote_state.credits(), credits); + assert!(vote_state.epoch_credits().len() <= MAX_EPOCH_CREDITS_HISTORY); + } + + #[test] + fn test_vote_process_timestamp() { + let (slot, timestamp) = (15, 1_575_412_285); + let mut vote_state = VoteState { + last_timestamp: BlockTimestamp { slot, timestamp }, + ..VoteState::default() + }; + + assert_eq!( + vote_state.process_timestamp(slot - 1, timestamp + 1), + Err(VoteError::TimestampTooOld) + ); + assert_eq!( + vote_state.last_timestamp, + BlockTimestamp { slot, timestamp } + ); + assert_eq!( + vote_state.process_timestamp(slot + 1, timestamp - 1), + Err(VoteError::TimestampTooOld) + ); + assert_eq!( + vote_state.process_timestamp(slot, timestamp + 1), + Err(VoteError::TimestampTooOld) + ); + assert_eq!(vote_state.process_timestamp(slot, timestamp), Ok(())); + assert_eq!( + vote_state.last_timestamp, + BlockTimestamp { slot, timestamp } + ); + assert_eq!(vote_state.process_timestamp(slot + 1, timestamp), Ok(())); + assert_eq!( + vote_state.last_timestamp, + BlockTimestamp { + slot: slot + 1, + timestamp + } + ); + assert_eq!( + vote_state.process_timestamp(slot + 2, timestamp + 1), + Ok(()) + ); + assert_eq!( + vote_state.last_timestamp, + BlockTimestamp { + slot: slot + 2, + timestamp: timestamp + 1 + } + ); + + // Test initial vote + vote_state.last_timestamp = BlockTimestamp::default(); + assert_eq!(vote_state.process_timestamp(0, timestamp), Ok(())); + } + + #[test] + fn test_get_and_update_authorized_voter() { + let original_voter = Pubkey::new_unique(); + let mut vote_state = VoteState::new( + &VoteInit { + node_pubkey: original_voter, + authorized_voter: original_voter, + authorized_withdrawer: original_voter, + commission: 0, + }, + &Clock::default(), + ); + + assert_eq!(vote_state.authorized_voters.len(), 1); + assert_eq!( + *vote_state.authorized_voters.first().unwrap().1, + original_voter + ); + + // If no new authorized voter was set, the same authorized voter + // is locked into the next epoch + assert_eq!( + vote_state.get_and_update_authorized_voter(1).unwrap(), + original_voter + ); + + // Try to get the authorized voter for epoch 5, implies + // the authorized voter for epochs 1-4 were unchanged + assert_eq!( + vote_state.get_and_update_authorized_voter(5).unwrap(), + original_voter + ); + + // Authorized voter for expired epoch 0..5 should have been + // purged and no longer queryable + assert_eq!(vote_state.authorized_voters.len(), 1); + for i in 0..5 { + assert!(vote_state + .authorized_voters + .get_authorized_voter(i) + .is_none()); + } + + // Set an authorized voter change at slot 7 + let new_authorized_voter = Pubkey::new_unique(); + vote_state + .set_new_authorized_voter(&new_authorized_voter, 5, 7, |_| Ok(())) + .unwrap(); + + // Try to get the authorized voter for epoch 6, unchanged + assert_eq!( + vote_state.get_and_update_authorized_voter(6).unwrap(), + original_voter + ); + + // Try to get the authorized voter for epoch 7 and onwards, should + // be the new authorized voter + for i in 7..10 { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_authorized_voter + ); + } + assert_eq!(vote_state.authorized_voters.len(), 1); + } + + #[test] + fn test_set_new_authorized_voter() { + let original_voter = Pubkey::new_unique(); + let epoch_offset = 15; + let mut vote_state = VoteState::new( + &VoteInit { + node_pubkey: original_voter, + authorized_voter: original_voter, + authorized_withdrawer: original_voter, + commission: 0, + }, + &Clock::default(), + ); + + assert!(vote_state.prior_voters.last().is_none()); + + let new_voter = Pubkey::new_unique(); + // Set a new authorized voter + vote_state + .set_new_authorized_voter(&new_voter, 0, epoch_offset, |_| Ok(())) + .unwrap(); + + assert_eq!(vote_state.prior_voters.idx, 0); + assert_eq!( + vote_state.prior_voters.last(), + Some(&(original_voter, 0, epoch_offset)) + ); + + // Trying to set authorized voter for same epoch again should fail + assert_eq!( + vote_state.set_new_authorized_voter(&new_voter, 0, epoch_offset, |_| Ok(())), + Err(VoteError::TooSoonToReauthorize.into()) + ); + + // Setting the same authorized voter again should succeed + vote_state + .set_new_authorized_voter(&new_voter, 2, 2 + epoch_offset, |_| Ok(())) + .unwrap(); + + // Set a third and fourth authorized voter + let new_voter2 = Pubkey::new_unique(); + vote_state + .set_new_authorized_voter(&new_voter2, 3, 3 + epoch_offset, |_| Ok(())) + .unwrap(); + assert_eq!(vote_state.prior_voters.idx, 1); + assert_eq!( + vote_state.prior_voters.last(), + Some(&(new_voter, epoch_offset, 3 + epoch_offset)) + ); + + let new_voter3 = Pubkey::new_unique(); + vote_state + .set_new_authorized_voter(&new_voter3, 6, 6 + epoch_offset, |_| Ok(())) + .unwrap(); + assert_eq!(vote_state.prior_voters.idx, 2); + assert_eq!( + vote_state.prior_voters.last(), + Some(&(new_voter2, 3 + epoch_offset, 6 + epoch_offset)) + ); + + // Check can set back to original voter + vote_state + .set_new_authorized_voter(&original_voter, 9, 9 + epoch_offset, |_| Ok(())) + .unwrap(); + + // Run with these voters for a while, check the ranges of authorized + // voters is correct + for i in 9..epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + original_voter + ); + } + for i in epoch_offset..3 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_voter + ); + } + for i in 3 + epoch_offset..6 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_voter2 + ); + } + for i in 6 + epoch_offset..9 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + new_voter3 + ); + } + for i in 9 + epoch_offset..=10 + epoch_offset { + assert_eq!( + vote_state.get_and_update_authorized_voter(i).unwrap(), + original_voter + ); + } + } + + #[test] + fn test_authorized_voter_is_locked_within_epoch() { + let original_voter = Pubkey::new_unique(); + let mut vote_state = VoteState::new( + &VoteInit { + node_pubkey: original_voter, + authorized_voter: original_voter, + authorized_withdrawer: original_voter, + commission: 0, + }, + &Clock::default(), + ); + + // Test that it's not possible to set a new authorized + // voter within the same epoch, even if none has been + // explicitly set before + let new_voter = Pubkey::new_unique(); + assert_eq!( + vote_state.set_new_authorized_voter(&new_voter, 1, 1, |_| Ok(())), + Err(VoteError::TooSoonToReauthorize.into()) + ); + + assert_eq!(vote_state.get_authorized_voter(1), Some(original_voter)); + + // Set a new authorized voter for a future epoch + assert_eq!( + vote_state.set_new_authorized_voter(&new_voter, 1, 2, |_| Ok(())), + Ok(()) + ); + + // Test that it's not possible to set a new authorized + // voter within the same epoch, even if none has been + // explicitly set before + assert_eq!( + vote_state.set_new_authorized_voter(&original_voter, 3, 3, |_| Ok(())), + Err(VoteError::TooSoonToReauthorize.into()) + ); + + assert_eq!(vote_state.get_authorized_voter(3), Some(new_voter)); + } + + #[test] + fn test_vote_state_size_of() { + let vote_state = VoteState::get_max_sized_vote_state(); + let vote_state = VoteStateVersions::new_current(vote_state); + let size = bincode::serialized_size(&vote_state).unwrap(); + assert_eq!(VoteState::size_of() as u64, size); + } + + #[test] + fn test_vote_state_max_size() { + let mut max_sized_data = vec![0; VoteState::size_of()]; + let vote_state = VoteState::get_max_sized_vote_state(); + let (start_leader_schedule_epoch, _) = vote_state.authorized_voters.last().unwrap(); + let start_current_epoch = + start_leader_schedule_epoch - MAX_LEADER_SCHEDULE_EPOCH_OFFSET + 1; + + let mut vote_state = Some(vote_state); + for i in start_current_epoch..start_current_epoch + 2 * MAX_LEADER_SCHEDULE_EPOCH_OFFSET { + vote_state.as_mut().map(|vote_state| { + vote_state.set_new_authorized_voter( + &Pubkey::new_unique(), + i, + i + MAX_LEADER_SCHEDULE_EPOCH_OFFSET, + |_| Ok(()), + ) + }); + + let versioned = VoteStateVersions::new_current(vote_state.take().unwrap()); + VoteState::serialize(&versioned, &mut max_sized_data).unwrap(); + vote_state = Some(versioned.convert_to_current()); + } + } + + #[test] + fn test_default_vote_state_is_uninitialized() { + // The default `VoteState` is stored to de-initialize a zero-balance vote account, + // so must remain such that `VoteStateVersions::is_uninitialized()` returns true + // when called on a `VoteStateVersions` that stores it + assert!(VoteStateVersions::new_current(VoteState::default()).is_uninitialized()); + } + + #[test] + fn test_is_correct_size_and_initialized() { + // Check all zeroes + let mut vote_account_data = vec![0; VoteState::size_of()]; + assert!(!VoteState::is_correct_size_and_initialized( + &vote_account_data + )); + + // Check default VoteState + let default_account_state = VoteStateVersions::new_current(VoteState::default()); + VoteState::serialize(&default_account_state, &mut vote_account_data).unwrap(); + assert!(!VoteState::is_correct_size_and_initialized( + &vote_account_data + )); + + // Check non-zero data shorter than offset index used + let short_data = vec![1; DEFAULT_PRIOR_VOTERS_OFFSET]; + assert!(!VoteState::is_correct_size_and_initialized(&short_data)); + + // Check non-zero large account + let mut large_vote_data = vec![1; 2 * VoteState::size_of()]; + let default_account_state = VoteStateVersions::new_current(VoteState::default()); + VoteState::serialize(&default_account_state, &mut large_vote_data).unwrap(); + assert!(!VoteState::is_correct_size_and_initialized( + &vote_account_data + )); + + // Check populated VoteState + let account_state = VoteStateVersions::new_current(VoteState::new( + &VoteInit { + node_pubkey: Pubkey::new_unique(), + authorized_voter: Pubkey::new_unique(), + authorized_withdrawer: Pubkey::new_unique(), + commission: 0, + }, + &Clock::default(), + )); + VoteState::serialize(&account_state, &mut vote_account_data).unwrap(); + assert!(VoteState::is_correct_size_and_initialized( + &vote_account_data + )); + } + + #[test] + fn test_minimum_balance() { + let rent = solana_program::rent::Rent::default(); + let minimum_balance = rent.minimum_balance(VoteState::size_of()); + // golden, may need updating when vote_state grows + assert!(minimum_balance as f64 / 10f64.powf(9.0) < 0.04) + } +} diff --git a/programs/vote/src/vote_state/vote_state_0_23_5.rs b/sdk/program/src/vote/state/vote_state_0_23_5.rs similarity index 97% rename from programs/vote/src/vote_state/vote_state_0_23_5.rs rename to sdk/program/src/vote/state/vote_state_0_23_5.rs index 89b99dc2f4..7ba4e361ee 100644 --- a/programs/vote/src/vote_state/vote_state_0_23_5.rs +++ b/sdk/program/src/vote/state/vote_state_0_23_5.rs @@ -1,3 +1,4 @@ +#![allow(clippy::integer_arithmetic)] use super::*; const MAX_ITEMS: usize = 32; diff --git a/programs/vote/src/vote_state/vote_state_versions.rs b/sdk/program/src/vote/state/vote_state_versions.rs similarity index 96% rename from programs/vote/src/vote_state/vote_state_versions.rs rename to sdk/program/src/vote/state/vote_state_versions.rs index 3f6b9ec14d..50bfed0521 100644 --- a/programs/vote/src/vote_state/vote_state_versions.rs +++ b/sdk/program/src/vote/state/vote_state_versions.rs @@ -1,4 +1,4 @@ -use {super::*, crate::vote_state::vote_state_0_23_5::VoteState0_23_5}; +use super::{vote_state_0_23_5::VoteState0_23_5, *}; #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] pub enum VoteStateVersions { diff --git a/sdk/src/builtins.rs b/sdk/src/builtins.rs index 9d412d6379..4c3b7c21f1 100644 --- a/sdk/src/builtins.rs +++ b/sdk/src/builtins.rs @@ -65,67 +65,6 @@ macro_rules! declare_builtin_name { /// entrypoint: Program's entrypoint, must be of `type Entrypoint` /// id: Path to the program id access function, used if this macro is not /// called in `src/lib` -/// -/// # Examples -/// -/// ``` -/// use std::str::FromStr; -/// // wrapper is used so that the macro invocation occurs in the item position -/// // rather than in the statement position which isn't allowed. -/// mod item_wrapper { -/// use solana_sdk::keyed_account::KeyedAccount; -/// use solana_sdk::instruction::InstructionError; -/// use solana_sdk::pubkey::Pubkey; -/// use solana_sdk::declare_builtin; -/// -/// fn my_process_instruction( -/// first_instruction_account: usize, -/// keyed_accounts: &[KeyedAccount], -/// ) -> Result<(), InstructionError> { -/// // Process an instruction -/// Ok(()) -/// } -/// -/// declare_builtin!( -/// "My11111111111111111111111111111111111111111", -/// solana_my_program, -/// my_process_instruction -/// ); -/// -/// # } -/// # use solana_sdk::pubkey::Pubkey; -/// # use item_wrapper::id; -/// let my_id = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap(); -/// assert_eq!(id(), my_id); -/// ``` -/// ``` -/// use std::str::FromStr; -/// # // wrapper is used so that the macro invocation occurs in the item position -/// # // rather than in the statement position which isn't allowed. -/// # mod item_wrapper { -/// use solana_sdk::keyed_account::KeyedAccount; -/// use solana_sdk::instruction::InstructionError; -/// use solana_sdk::pubkey::Pubkey; -/// use solana_sdk::declare_builtin; -/// -/// fn my_process_instruction( -/// first_instruction_account: usize, -/// keyed_accounts: &[KeyedAccount], -/// ) -> Result<(), InstructionError> { -/// // Process an instruction -/// Ok(()) -/// } -/// -/// declare_builtin!( -/// solana_sdk::system_program::ID, -/// solana_my_program, -/// my_process_instruction -/// ); -/// } -/// -/// # use item_wrapper::id; -/// assert_eq!(id(), solana_sdk::system_program::ID); -/// ``` #[macro_export] macro_rules! declare_builtin { ($bs58_string:expr, $name:ident, $entrypoint:expr) => { diff --git a/sdk/src/bundle/error.rs b/sdk/src/bundle/error.rs index a856c90661..503446f97e 100644 --- a/sdk/src/bundle/error.rs +++ b/sdk/src/bundle/error.rs @@ -5,7 +5,7 @@ use { solana_sdk::transaction::TransactionError, std::time::Duration, thiserror::Error, }; -#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq)] +#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum BundleExecutionError { #[error("PoH max height reached in the middle of a bundle.")] PohMaxHeightError, @@ -29,7 +29,7 @@ pub enum BundleExecutionError { LockError, } -#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq)] +#[derive(Error, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum TipPaymentError { #[error("account is missing from bank: {0}")] AccountMissing(Pubkey), diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index c1af6f70cc..2f77c22830 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -201,8 +201,6 @@ pub mod do_support_realloc { solana_sdk::declare_id!("75m6ysz33AfLA5DDEzWM1obBrnPQRSsdVQ2nRmc8Vuu1"); } -// Note: when this feature is cleaned up, also remove the secp256k1 program from -// the list of builtins and remove its files from /programs pub mod prevent_calling_precompiles_as_programs { solana_sdk::declare_id!("4ApgRX3ud6p7LNMJmsuaAcZY5HWctGPr5obAsjB3A54d"); } @@ -488,10 +486,22 @@ pub mod concurrent_replay_of_forks { solana_sdk::declare_id!("9F2Dcu8xkBPKxiiy65XKPZYdCG3VZDpjDTuSmeYLozJe"); } +pub mod check_ping_ancestor_requests { + solana_sdk::declare_id!("AXLB87anNaUQtqBSsxkm4gvNzYY985aLtNtpJC94uWLJ"); +} + pub mod incremental_snapshot_only_incremental_hash_calculation { solana_sdk::declare_id!("25vqsfjk7Nv1prsQJmA4Xu1bN61s8LXCBGUPp8Rfy1UF"); } +pub mod disable_cpi_setting_executable_and_rent_epoch { + solana_sdk::declare_id!("B9cdB55u4jQsDNsdTK525yE9dmSc5Ga7YBaBrDFvEhM9"); +} + +pub mod relax_authority_signer_check_for_lookup_table_creation { + solana_sdk::declare_id!("FKAcEvNgSY79RpqsPNUV5gDyumopH4cEHqUxyfm8b8Ap"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -608,7 +618,10 @@ lazy_static! { (compact_vote_state_updates::id(), "Compact vote state updates to lower block size"), (sign_repair_requests::id(), "sign repair requests #26834"), (concurrent_replay_of_forks::id(), "Allow slots from different forks to be replayed concurrently #26465"), + (check_ping_ancestor_requests::id(), "ancestor hash repair socket ping/pong support #26963"), (incremental_snapshot_only_incremental_hash_calculation::id(), "only hash accounts in incremental snapshot during incremental snapshot creation #26799"), + (disable_cpi_setting_executable_and_rent_epoch::id(), "disable setting is_executable and_rent_epoch in CPI #26987"), + (relax_authority_signer_check_for_lookup_table_creation::id(), "relax authority signer check for lookup table creation #27205"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/keyed_account.rs b/sdk/src/keyed_account.rs deleted file mode 100644 index f21876ddaa..0000000000 --- a/sdk/src/keyed_account.rs +++ /dev/null @@ -1,258 +0,0 @@ -#![deprecated( - since = "1.11.0", - note = "Please use BorrowedAccount instead of KeyedAccount" -)] -#![allow(deprecated)] -use { - crate::{ - account::{AccountSharedData, ReadableAccount}, - account_utils::{State, StateMut}, - }, - solana_program::{clock::Epoch, instruction::InstructionError, pubkey::Pubkey}, - std::{ - cell::{Ref, RefCell, RefMut}, - iter::FromIterator, - rc::Rc, - }, -}; - -#[repr(C)] -#[derive(Debug, Clone)] -pub struct KeyedAccount<'a> { - is_signer: bool, // Transaction was signed by this account's key - is_writable: bool, - key: &'a Pubkey, - pub account: &'a RefCell, -} - -impl<'a> KeyedAccount<'a> { - pub fn signer_key(&self) -> Option<&Pubkey> { - if self.is_signer { - Some(self.key) - } else { - None - } - } - - pub fn unsigned_key(&self) -> &Pubkey { - self.key - } - - pub fn is_writable(&self) -> bool { - self.is_writable - } - - pub fn lamports(&self) -> Result { - Ok(self.try_borrow()?.lamports()) - } - - pub fn data_len(&self) -> Result { - Ok(self.try_borrow()?.data().len()) - } - - pub fn data_is_empty(&self) -> Result { - Ok(self.try_borrow()?.data().is_empty()) - } - - pub fn owner(&self) -> Result { - Ok(*self.try_borrow()?.owner()) - } - - pub fn executable(&self) -> Result { - Ok(self.try_borrow()?.executable()) - } - - pub fn rent_epoch(&self) -> Result { - Ok(self.try_borrow()?.rent_epoch()) - } - - pub fn try_account_ref(&'a self) -> Result, InstructionError> { - self.try_borrow() - } - - pub fn try_account_ref_mut(&'a self) -> Result, InstructionError> { - self.try_borrow_mut() - } - - fn try_borrow(&self) -> Result, InstructionError> { - self.account - .try_borrow() - .map_err(|_| InstructionError::AccountBorrowFailed) - } - fn try_borrow_mut(&self) -> Result, InstructionError> { - self.account - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowFailed) - } - - pub fn new(key: &'a Pubkey, is_signer: bool, account: &'a RefCell) -> Self { - Self { - is_signer, - is_writable: true, - key, - account, - } - } - - pub fn new_readonly( - key: &'a Pubkey, - is_signer: bool, - account: &'a RefCell, - ) -> Self { - Self { - is_signer, - is_writable: false, - key, - account, - } - } -} - -impl<'a> PartialEq for KeyedAccount<'a> { - fn eq(&self, other: &Self) -> bool { - self.key == other.key - } -} - -impl<'a> From<(&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { - fn from((key, account): (&'a Pubkey, &'a RefCell)) -> Self { - Self { - is_signer: false, - is_writable: true, - key, - account, - } - } -} - -impl<'a> From<(&'a Pubkey, bool, &'a RefCell)> for KeyedAccount<'a> { - fn from((key, is_signer, account): (&'a Pubkey, bool, &'a RefCell)) -> Self { - Self { - is_signer, - is_writable: true, - key, - account, - } - } -} - -impl<'a> From<&'a (&'a Pubkey, &'a RefCell)> for KeyedAccount<'a> { - fn from((key, account): &'a (&'a Pubkey, &'a RefCell)) -> Self { - Self { - is_signer: false, - is_writable: true, - key, - account, - } - } -} - -pub fn create_keyed_accounts<'a>( - accounts: &'a [(&'a Pubkey, &'a RefCell)], -) -> Vec> { - accounts.iter().map(Into::into).collect() -} - -#[deprecated( - since = "1.7.0", - note = "Please use create_keyed_accounts_unified instead" -)] -pub fn create_keyed_is_signer_accounts<'a>( - accounts: &'a [(&'a Pubkey, bool, &'a RefCell)], -) -> Vec> { - accounts - .iter() - .map(|(key, is_signer, account)| KeyedAccount { - is_signer: *is_signer, - is_writable: false, - key, - account, - }) - .collect() -} - -#[deprecated( - since = "1.7.0", - note = "Please use create_keyed_accounts_unified instead" -)] -pub fn create_keyed_readonly_accounts( - accounts: &[(Pubkey, Rc>)], -) -> Vec { - accounts - .iter() - .map(|(key, account)| KeyedAccount { - is_signer: false, - is_writable: false, - key, - account, - }) - .collect() -} - -pub fn create_keyed_accounts_unified<'a>( - accounts: &[(bool, bool, &'a Pubkey, &'a RefCell)], -) -> Vec> { - accounts - .iter() - .map(|(is_signer, is_writable, key, account)| KeyedAccount { - is_signer: *is_signer, - is_writable: *is_writable, - key, - account, - }) - .collect() -} - -#[deprecated( - since = "1.11.0", - note = "Please use InstructionContext::get_signers() instead" -)] -/// Return all the signers from a set of KeyedAccounts -pub fn get_signers(keyed_accounts: &[KeyedAccount]) -> A -where - A: FromIterator, -{ - keyed_accounts - .iter() - .filter_map(|keyed_account| keyed_account.signer_key()) - .cloned() - .collect::() -} - -#[deprecated(since = "1.7.0", note = "Please use keyed_account_at_index instead")] -/// Return the next KeyedAccount or a NotEnoughAccountKeys error -pub fn next_keyed_account<'a, 'b, I: Iterator>>( - iter: &mut I, -) -> Result { - iter.next().ok_or(InstructionError::NotEnoughAccountKeys) -} - -/// Return the KeyedAccount at the specified index or a NotEnoughAccountKeys error -/// -/// Index zero starts at the chain of program accounts, followed by the instruction accounts. -pub fn keyed_account_at_index<'a>( - keyed_accounts: &'a [KeyedAccount], - index: usize, -) -> Result<&'a KeyedAccount<'a>, InstructionError> { - keyed_accounts - .get(index) - .ok_or(InstructionError::NotEnoughAccountKeys) -} - -/// Return true if the first keyed_account is executable, used to determine if -/// the loader should call a program's 'main' -pub fn is_executable(keyed_accounts: &[KeyedAccount]) -> Result { - Ok(!keyed_accounts.is_empty() && keyed_accounts[0].executable()?) -} - -impl<'a, T> State for crate::keyed_account::KeyedAccount<'a> -where - T: serde::Serialize + serde::de::DeserializeOwned, -{ - fn state(&self) -> Result { - self.try_account_ref()?.state() - } - fn set_state(&self, state: &T) -> Result<(), InstructionError> { - self.try_account_ref_mut()?.set_state(state) - } -} diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 3e4230a33d..65ff4cda1b 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -7,7 +7,21 @@ extern crate self as solana_sdk; #[cfg(feature = "full")] pub use signer::signers; -pub use solana_program::*; +// These solana_program imports could be *-imported, but that causes a bunch of +// confusing duplication in the docs due to a rustdoc bug. #26211 +#[cfg(not(target_os = "solana"))] +pub use solana_program::program_stubs; +pub use solana_program::{ + account_info, address_lookup_table_account, blake3, borsh, bpf_loader, bpf_loader_deprecated, + bpf_loader_upgradeable, clock, clone_zeroed, config, copy_field, custom_heap_default, + custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, declare_sysvar_id, + decode_error, ed25519_program, epoch_schedule, fee_calculator, impl_sysvar_get, incinerator, + instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, message, + msg, native_token, nonce, program, program_error, program_memory, program_option, program_pack, + rent, sanitize, sdk_ids, secp256k1_program, secp256k1_recover, serialize_utils, short_vec, + slot_hashes, slot_history, stake, stake_history, syscalls, system_instruction, system_program, + sysvar, unchecked_div_by_const, vote, wasm_bindgen, +}; pub mod account; pub mod account_utils; @@ -31,7 +45,6 @@ pub mod genesis_config; pub mod hard_forks; pub mod hash; pub mod inflation; -pub mod keyed_account; pub mod log; pub mod native_loader; pub mod nonce_account; diff --git a/sdk/src/precompiles.rs b/sdk/src/precompiles.rs index 1f6149772c..e97474b25c 100644 --- a/sdk/src/precompiles.rs +++ b/sdk/src/precompiles.rs @@ -4,9 +4,7 @@ use { crate::{ - decode_error::DecodeError, - feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, - instruction::CompiledInstruction, + decode_error::DecodeError, feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, }, lazy_static::lazy_static, @@ -81,12 +79,12 @@ lazy_static! { static ref PRECOMPILES: Vec = vec![ Precompile::new( crate::secp256k1_program::id(), - Some(prevent_calling_precompiles_as_programs::id()), + None, // always enabled crate::secp256k1_instruction::verify, ), Precompile::new( crate::ed25519_program::id(), - Some(prevent_calling_precompiles_as_programs::id()), + None, // always enabled crate::ed25519_instruction::verify, ), ]; diff --git a/sdk/src/quic.rs b/sdk/src/quic.rs index e00c4d7d70..1cd93be9c0 100644 --- a/sdk/src/quic.rs +++ b/sdk/src/quic.rs @@ -12,3 +12,15 @@ pub const QUIC_KEEP_ALIVE_MS: u64 = 1_000; // applications. Different applications vary, but most seem to // be in the 30-60 second range pub const QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS: u64 = 60_000; + +/// The receive window for QUIC connection from unstaked nodes is +/// set to this ratio times [`solana_sdk::packet::PACKET_DATA_SIZE`] +pub const QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO: u64 = 1; + +/// The receive window for QUIC connection from minimum staked nodes is +/// set to this ratio times [`solana_sdk::packet::PACKET_DATA_SIZE`] +pub const QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO: u64 = 2; + +/// The receive window for QUIC connection from maximum staked nodes is +/// set to this ratio times [`solana_sdk::packet::PACKET_DATA_SIZE`] +pub const QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO: u64 = 10; diff --git a/sdk/src/transaction/error.rs b/sdk/src/transaction/error.rs index 2ff117dc52..2845a8e4b5 100644 --- a/sdk/src/transaction/error.rs +++ b/sdk/src/transaction/error.rs @@ -142,9 +142,9 @@ pub enum TransactionError { #[error("Transaction contains a duplicate instruction ({0}) that is not allowed")] DuplicateInstruction(u8), - /// Transaction results in an account without insufficient funds for rent + /// Transaction results in an account with insufficient funds for rent #[error( - "Transaction results in an account ({account_index}) without insufficient funds for rent" + "Transaction results in an account ({account_index}) with insufficient funds for rent" )] InsufficientFundsForRent { account_index: u8 }, diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 4d314ef962..9a0e7b4af5 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -208,10 +208,13 @@ impl SanitizedTransaction { } /// Validate and return the account keys locked by this transaction - pub fn get_account_locks(&self) -> Result { + pub fn get_account_locks( + &self, + tx_account_lock_limit: usize, + ) -> Result { if self.message.has_duplicates() { Err(TransactionError::AccountLoadedTwice) - } else if self.message.account_keys().len() > MAX_TX_ACCOUNT_LOCKS { + } else if self.message.account_keys().len() > tx_account_lock_limit { Err(TransactionError::TooManyAccountLocks) } else { Ok(self.get_account_locks_unchecked()) diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index eabd59c176..0c944787cf 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -1,4 +1,4 @@ -//! Successors of instruction_context_context::StackFrame, KeyedAccount and AccountInfo +//! Data shared between program runtime and built-in programs as well as SBF programs use { crate::{ @@ -48,8 +48,7 @@ pub struct TransactionContext { account_touched_flags: RefCell>>, instruction_context_capacity: usize, instruction_stack: Vec, - number_of_instructions_at_transaction_level: usize, - instruction_trace: InstructionTrace, + instruction_trace: Vec, return_data: TransactionReturnData, accounts_resize_delta: RefCell, rent: Option, @@ -61,7 +60,7 @@ impl TransactionContext { transaction_accounts: Vec, rent: Option, instruction_context_capacity: usize, - number_of_instructions_at_transaction_level: usize, + _number_of_instructions_at_transaction_level: usize, ) -> Self { let (account_keys, accounts): (Vec, Vec>) = transaction_accounts @@ -75,8 +74,7 @@ impl TransactionContext { account_touched_flags: RefCell::new(Pin::new(account_touched_flags.into_boxed_slice())), instruction_context_capacity, instruction_stack: Vec::with_capacity(instruction_context_capacity), - number_of_instructions_at_transaction_level, - instruction_trace: Vec::with_capacity(number_of_instructions_at_transaction_level), + instruction_trace: Vec::new(), return_data: TransactionReturnData::default(), accounts_resize_delta: RefCell::new(0), rent, @@ -139,29 +137,32 @@ impl TransactionContext { self.account_keys.iter().rposition(|key| key == pubkey) } + /// Returns instruction trace length + pub fn get_instruction_trace_length(&self) -> usize { + self.instruction_trace.len() + } + + /// Gets an InstructionContext by its index in the trace + pub fn get_instruction_context_at_index_in_trace( + &self, + index_in_trace: usize, + ) -> Result<&InstructionContext, InstructionError> { + self.instruction_trace + .get(index_in_trace) + .ok_or(InstructionError::CallDepth) + } + /// Gets an InstructionContext by its nesting level in the stack - pub fn get_instruction_context_at( + pub fn get_instruction_context_at_nesting_level( &self, - level: usize, + nesting_level: usize, ) -> Result<&InstructionContext, InstructionError> { - let top_level_index = *self + let index_in_trace = *self .instruction_stack - .first() + .get(nesting_level) .ok_or(InstructionError::CallDepth)?; - let cpi_index = if level == 0 { - 0 - } else { - *self - .instruction_stack - .get(level) - .ok_or(InstructionError::CallDepth)? - }; - let instruction_context = self - .instruction_trace - .get(top_level_index) - .and_then(|instruction_trace| instruction_trace.get(cpi_index)) - .ok_or(InstructionError::CallDepth)?; - debug_assert_eq!(instruction_context.nesting_level, level); + let instruction_context = self.get_instruction_context_at_index_in_trace(index_in_trace)?; + debug_assert_eq!(instruction_context.nesting_level, nesting_level); Ok(instruction_context) } @@ -182,7 +183,7 @@ impl TransactionContext { .get_instruction_context_stack_height() .checked_sub(1) .ok_or(InstructionError::CallDepth)?; - self.get_instruction_context_at(level) + self.get_instruction_context_at_nesting_level(level) } /// Pushes a new InstructionContext @@ -193,49 +194,32 @@ impl TransactionContext { instruction_data: &[u8], ) -> Result<(), InstructionError> { let callee_instruction_accounts_lamport_sum = - self.instruction_accounts_lamport_sum(instruction_accounts)?; - let index_in_trace = if self.instruction_stack.is_empty() { - debug_assert!( - self.instruction_trace.len() < self.number_of_instructions_at_transaction_level - ); - let instruction_context = InstructionContext { - nesting_level: self.instruction_stack.len(), - instruction_accounts_lamport_sum: callee_instruction_accounts_lamport_sum, - program_accounts: program_accounts.to_vec(), - instruction_accounts: instruction_accounts.to_vec(), - instruction_data: instruction_data.to_vec(), - }; - self.instruction_trace.push(vec![instruction_context]); - self.instruction_trace.len().saturating_sub(1) - } else { - if self.is_early_verification_of_account_modifications_enabled() { - let caller_instruction_context = self.get_current_instruction_context()?; - let original_caller_instruction_accounts_lamport_sum = - caller_instruction_context.instruction_accounts_lamport_sum; - let current_caller_instruction_accounts_lamport_sum = self - .instruction_accounts_lamport_sum( - &caller_instruction_context.instruction_accounts, - )?; - if original_caller_instruction_accounts_lamport_sum - != current_caller_instruction_accounts_lamport_sum - { - return Err(InstructionError::UnbalancedInstruction); - } - } - if let Some(instruction_trace) = self.instruction_trace.last_mut() { - let instruction_context = InstructionContext { - nesting_level: self.instruction_stack.len(), - instruction_accounts_lamport_sum: callee_instruction_accounts_lamport_sum, - program_accounts: program_accounts.to_vec(), - instruction_accounts: instruction_accounts.to_vec(), - instruction_data: instruction_data.to_vec(), - }; - instruction_trace.push(instruction_context); - instruction_trace.len().saturating_sub(1) - } else { - return Err(InstructionError::CallDepth); + self.instruction_accounts_lamport_sum(instruction_accounts.iter())?; + if !self.instruction_stack.is_empty() + && self.is_early_verification_of_account_modifications_enabled() + { + let caller_instruction_context = self.get_current_instruction_context()?; + let original_caller_instruction_accounts_lamport_sum = + caller_instruction_context.instruction_accounts_lamport_sum; + let current_caller_instruction_accounts_lamport_sum = self + .instruction_accounts_lamport_sum( + caller_instruction_context.instruction_accounts.iter(), + )?; + if original_caller_instruction_accounts_lamport_sum + != current_caller_instruction_accounts_lamport_sum + { + return Err(InstructionError::UnbalancedInstruction); } - }; + } + let instruction_context = InstructionContext::new( + self.instruction_stack.len(), + callee_instruction_accounts_lamport_sum, + program_accounts.to_vec(), + instruction_accounts.to_vec(), + instruction_data.to_vec(), + ); + let index_in_trace = self.instruction_trace.len(); + self.instruction_trace.push(instruction_context); if self.instruction_stack.len() >= self.instruction_context_capacity { return Err(InstructionError::CallDepth); } @@ -249,26 +233,27 @@ impl TransactionContext { return Err(InstructionError::CallDepth); } // Verify (before we pop) that the total sum of all lamports in this instruction did not change - let detected_an_unbalanced_instruction = if self - .is_early_verification_of_account_modifications_enabled() - { - self.get_current_instruction_context() - .and_then(|instruction_context| { - // Verify all executable accounts have no outstanding refs - for account_index in instruction_context.program_accounts.iter() { - self.get_account_at_index(*account_index)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - } - self.instruction_accounts_lamport_sum(&instruction_context.instruction_accounts) + let detected_an_unbalanced_instruction = + if self.is_early_verification_of_account_modifications_enabled() { + self.get_current_instruction_context() + .and_then(|instruction_context| { + // Verify all executable accounts have no outstanding refs + for account_index in instruction_context.program_accounts.iter() { + self.get_account_at_index(*account_index)? + .try_borrow_mut() + .map_err(|_| InstructionError::AccountBorrowOutstanding)?; + } + self.instruction_accounts_lamport_sum( + instruction_context.instruction_accounts.iter(), + ) .map(|instruction_accounts_lamport_sum| { instruction_context.instruction_accounts_lamport_sum != instruction_accounts_lamport_sum }) - }) - } else { - Ok(false) - }; + }) + } else { + Ok(false) + }; // Always pop, even if we `detected_an_unbalanced_instruction` self.instruction_stack.pop(); if detected_an_unbalanced_instruction? { @@ -293,23 +278,19 @@ impl TransactionContext { Ok(()) } - /// Returns instruction trace - pub fn get_instruction_trace(&self) -> &InstructionTrace { - &self.instruction_trace - } - /// Calculates the sum of all lamports within an instruction - fn instruction_accounts_lamport_sum( - &self, - instruction_accounts: &[InstructionAccount], - ) -> Result { + fn instruction_accounts_lamport_sum<'a, I>( + &'a self, + instruction_accounts: I, + ) -> Result + where + I: Iterator, + { if !self.is_early_verification_of_account_modifications_enabled() { return Ok(0); } let mut instruction_accounts_lamport_sum: u128 = 0; - for (instruction_account_index, instruction_account) in - instruction_accounts.iter().enumerate() - { + for (instruction_account_index, instruction_account) in instruction_accounts.enumerate() { if instruction_account_index != instruction_account.index_in_callee { continue; // Skip duplicate account } @@ -340,9 +321,6 @@ pub struct TransactionReturnData { pub data: Vec, } -/// List of (stack height, instruction) for each top-level instruction -pub type InstructionTrace = Vec>; - /// Loaded instruction shared between runtime and programs. /// /// This context is valid for the entire duration of a (possibly cross program) instruction being processed. @@ -357,19 +335,19 @@ pub struct InstructionContext { impl InstructionContext { /// New - pub fn new( + fn new( nesting_level: usize, instruction_accounts_lamport_sum: u128, - program_accounts: &[usize], - instruction_accounts: &[InstructionAccount], - instruction_data: &[u8], + program_accounts: Vec, + instruction_accounts: Vec, + instruction_data: Vec, ) -> Self { InstructionContext { nesting_level, instruction_accounts_lamport_sum, - program_accounts: program_accounts.to_vec(), - instruction_accounts: instruction_accounts.to_vec(), - instruction_data: instruction_data.to_vec(), + program_accounts, + instruction_accounts, + instruction_data, } } @@ -912,40 +890,23 @@ impl<'a> BorrowedAccount<'a> { /// Everything that needs to be recorded from a TransactionContext after execution pub struct ExecutionRecord { pub accounts: Vec, - pub instruction_trace: InstructionTrace, pub return_data: TransactionReturnData, - pub changed_account_count: u64, - pub total_size_of_all_accounts: u64, - pub total_size_of_touched_accounts: u64, + pub touched_account_count: u64, pub accounts_resize_delta: i64, } /// Used by the bank in the runtime to write back the processed accounts and recorded instructions impl From for ExecutionRecord { fn from(context: TransactionContext) -> Self { - let mut changed_account_count = 0u64; - let mut total_size_of_all_accounts = 0u64; - let mut total_size_of_touched_accounts = 0u64; let account_touched_flags = context .account_touched_flags .try_borrow() .expect("borrowing transaction_context.account_touched_flags failed"); - for (index_in_transaction, was_touched) in account_touched_flags.iter().enumerate() { - let account_data_size = context - .get_account_at_index(index_in_transaction) - .expect("index_in_transaction out of bounds") - .try_borrow() - .expect("borrowing a transaction_context.account failed") - .data() - .len() as u64; - total_size_of_all_accounts = - total_size_of_all_accounts.saturating_add(account_data_size); - if *was_touched { - changed_account_count = changed_account_count.saturating_add(1); - total_size_of_touched_accounts = - total_size_of_touched_accounts.saturating_add(account_data_size); - } - } + let touched_account_count = account_touched_flags + .iter() + .fold(0u64, |accumulator, was_touched| { + accumulator.saturating_add(*was_touched as u64) + }); Self { accounts: Vec::from(Pin::into_inner(context.account_keys)) .into_iter() @@ -955,11 +916,8 @@ impl From for ExecutionRecord { .map(|account| account.into_inner()), ) .collect(), - instruction_trace: context.instruction_trace, return_data: context.return_data, - changed_account_count, - total_size_of_all_accounts, - total_size_of_touched_accounts, + touched_account_count, accounts_resize_delta: RefCell::into_inner(context.accounts_resize_delta), } } diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 70c685f838..628ef9d54c 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -411,7 +411,7 @@ impl SendTransactionService { config ); Builder::new() - .name("send-tx-receive".to_string()) + .name("solStxReceive".to_string()) .spawn(move || loop { let recv_timeout_ms = config.batch_send_rate_ms; let stats = &stats_report.stats; @@ -512,7 +512,7 @@ impl SendTransactionService { config ); Builder::new() - .name("send-tx-retry".to_string()) + .name("solStxRetry".to_string()) .spawn(move || loop { let retry_interval_ms = config.retry_rate_ms; let stats = &stats_report.stats; diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index c44b8176a7..0958288e3d 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -12,19 +12,19 @@ edition = "2021" [dependencies] backoff = { version = "0.4.0", features = ["tokio"] } bincode = "1.3.3" -bytes = "1.0" +bytes = "1.2" bzip2 = "0.4.3" enum-iterator = "0.8.1" flate2 = "1.0.24" -futures = "0.3.21" -goauth = "0.13.0" +futures = "0.3.23" +goauth = "0.13.1" http = "0.2.8" hyper = "0.14.20" hyper-proxy = "0.9.1" log = "0.4.17" prost = "0.11.0" prost-types = "0.11.1" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" smpl_jwt = "0.7.1" solana-metrics = { path = "../metrics", version = "=1.12.0" } diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index f35a4ebbe3..110356f435 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -31,9 +31,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bytes" -version = "1.0.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" @@ -116,9 +116,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" [[package]] name = "log" diff --git a/storage-proto/Cargo.toml b/storage-proto/Cargo.toml index 7df7736f52..7c179ff0ae 100644 --- a/storage-proto/Cargo.toml +++ b/storage-proto/Cargo.toml @@ -13,7 +13,7 @@ edition = "2021" bincode = "1.3.3" bs58 = "0.4.0" prost = "0.11.0" -serde = "1.0.138" +serde = "1.0.143" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-sdk = { path = "../sdk", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 7aabcdd69a..7aa5c1f768 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -15,13 +15,16 @@ futures-util = "0.3.21" histogram = "0.6.9" indexmap = "1.9.1" itertools = "0.10.3" -libc = "0.2.126" +libc = "0.2.131" log = "0.4.17" -nix = "0.24.2" +nix = "0.25.0" pem = "1.0.2" percentage = "0.1.0" pkcs8 = { version = "0.8.0", features = ["alloc"] } -quinn = "0.8.3" +quinn = "0.8.4" +quinn-proto = "0.8.4" +quinn-udp = "0.1.3" + rand = "0.7.0" rcgen = "0.9.2" rustls = { version = "0.20.6", features = ["dangerous_configuration"] } diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index b0aefbef8d..05faa3f507 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -12,14 +12,16 @@ use { Connecting, Connection, Endpoint, EndpointConfig, Incoming, IncomingUniStreams, NewConnection, VarInt, }, + quinn_proto::VarIntBoundsExceeded, rand::{thread_rng, Rng}, solana_perf::packet::PacketBatch, solana_sdk::{ packet::{Packet, PACKET_DATA_SIZE}, pubkey::Pubkey, quic::{ - QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS, QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, - QUIC_MIN_STAKED_CONCURRENT_STREAMS, + QUIC_CONNECTION_HANDSHAKE_TIMEOUT_MS, QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO, + QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS, QUIC_MIN_STAKED_CONCURRENT_STREAMS, + QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO, QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO, }, signature::Keypair, timing, @@ -142,7 +144,7 @@ fn prune_unstaked_connection_table( fn get_connection_stake( connection: &Connection, staked_nodes: Arc>, -) -> Option<(Pubkey, u64, u64)> { +) -> Option<(Pubkey, u64, u64, u64, u64)> { connection .peer_identity() .and_then(|der_cert_any| der_cert_any.downcast::>().ok()) @@ -152,10 +154,12 @@ fn get_connection_stake( let staked_nodes = staked_nodes.read().unwrap(); let total_stake = staked_nodes.total_stake; + let max_stake = staked_nodes.max_stake; + let min_stake = staked_nodes.min_stake; staked_nodes .pubkey_stake_map .get(&pubkey) - .map(|stake| (pubkey, *stake, total_stake)) + .map(|stake| (pubkey, *stake, total_stake, max_stake, min_stake)) }) }) } @@ -198,6 +202,8 @@ struct NewConnectionHandlerParams { total_stake: u64, max_connections_per_peer: usize, stats: Arc, + max_stake: u64, + min_stake: u64, } impl NewConnectionHandlerParams { @@ -213,6 +219,8 @@ impl NewConnectionHandlerParams { total_stake: 0, max_connections_per_peer, stats, + max_stake: 0, + min_stake: 0, } } } @@ -236,16 +244,29 @@ fn handle_and_cache_new_connection( ) as u64) { connection.set_max_concurrent_uni_streams(max_uni_streams); - debug!( - "Peer type: {:?}, stake {}, total stake {}, max streams {}", + let receive_window = compute_recieve_window( + params.max_stake, + params.min_stake, connection_table_l.peer_type, params.stake, - params.total_stake, - max_uni_streams.into_inner() ); + if let Ok(receive_window) = receive_window { + connection.set_receive_window(receive_window); + } + let remote_addr = connection.remote_address(); + debug!( + "Peer type: {:?}, stake {}, total stake {}, max streams {} receive_window {:?} from peer {}", + connection_table_l.peer_type, + params.stake, + params.total_stake, + max_uni_streams.into_inner(), + receive_window, + remote_addr, + ); + if let Some((last_update, stream_exit)) = connection_table_l.try_add_connection( ConnectionTableKey::new(remote_addr.ip(), params.remote_pubkey), remote_addr.port(), @@ -254,6 +275,7 @@ fn handle_and_cache_new_connection( timing::timestamp(), params.max_connections_per_peer, ) { + let peer_type = connection_table_l.peer_type; drop(connection_table_l); tokio::spawn(handle_connection( uni_streams, @@ -265,6 +287,7 @@ fn handle_and_cache_new_connection( stream_exit, params.stats.clone(), params.stake, + peer_type, )); Ok(()) } else { @@ -305,6 +328,50 @@ fn prune_unstaked_connections_and_add_new_connection( } } +/// Calculate the ratio for per connection receive window from a staked peer +fn compute_receive_window_ratio_for_staked_node(max_stake: u64, min_stake: u64, stake: u64) -> u64 { + // Testing shows the maximum througput from a connection is achieved at receive_window = + // PACKET_DATA_SIZE * 10. Beyond that, there is not much gain. We linearly map the + // stake to the ratio range from QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO to + // QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO. Where the linear algebra of finding the ratio 'r' + // for stake 's' is, + // r(s) = a * s + b. Given the max_stake, min_stake, max_ratio, min_ratio, we can find + // a and b. + + if stake > max_stake { + return QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO; + } + + let max_ratio = QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO; + let min_ratio = QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO; + if max_stake > min_stake { + let a = (max_ratio - min_ratio) as f64 / (max_stake - min_stake) as f64; + let b = max_ratio as f64 - ((max_stake as f64) * a); + let ratio = (a * stake as f64) + b; + ratio.round() as u64 + } else { + QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO + } +} + +fn compute_recieve_window( + max_stake: u64, + min_stake: u64, + peer_type: ConnectionPeerType, + peer_stake: u64, +) -> Result { + match peer_type { + ConnectionPeerType::Unstaked => { + VarInt::from_u64((PACKET_DATA_SIZE as u64 * QUIC_UNSTAKED_RECEIVE_WINDOW_RATIO) as u64) + } + ConnectionPeerType::Staked => { + let ratio = + compute_receive_window_ratio_for_staked_node(max_stake, min_stake, peer_stake); + VarInt::from_u64((PACKET_DATA_SIZE as u64 * ratio) as u64) + } + } +} + async fn setup_connection( connecting: Connecting, unstaked_connection_table: Arc>, @@ -333,13 +400,17 @@ async fn setup_connection( max_connections_per_peer, stats.clone(), ), - |(pubkey, stake, total_stake)| NewConnectionHandlerParams { - packet_sender, - remote_pubkey: Some(pubkey), - stake, - total_stake, - max_connections_per_peer, - stats: stats.clone(), + |(pubkey, stake, total_stake, max_stake, min_stake)| { + NewConnectionHandlerParams { + packet_sender, + remote_pubkey: Some(pubkey), + stake, + total_stake, + max_connections_per_peer, + stats: stats.clone(), + max_stake, + min_stake, + } }, ); @@ -409,6 +480,7 @@ async fn setup_connection( } } +#[allow(clippy::too_many_arguments)] async fn handle_connection( mut uni_streams: IncomingUniStreams, packet_sender: Sender, @@ -419,6 +491,7 @@ async fn handle_connection( stream_exit: Arc, stats: Arc, stake: u64, + peer_type: ConnectionPeerType, ) { debug!( "quic new connection {} streams: {} connections: {}", @@ -458,6 +531,7 @@ async fn handle_connection( &packet_sender, stats.clone(), stake, + peer_type, ) { last_update.store(timing::timestamp(), Ordering::Relaxed); break; @@ -505,6 +579,7 @@ fn handle_chunk( packet_sender: &Sender, stats: Arc, stake: u64, + peer_type: ConnectionPeerType, ) -> bool { match chunk { Ok(maybe_chunk) => { @@ -551,6 +626,18 @@ fn handle_chunk( .copy_from_slice(&chunk.bytes); batch[0].meta.size = std::cmp::max(batch[0].meta.size, end_of_chunk); stats.total_chunks_received.fetch_add(1, Ordering::Relaxed); + match peer_type { + ConnectionPeerType::Staked => { + stats + .total_staked_chunks_received + .fetch_add(1, Ordering::Relaxed); + } + ConnectionPeerType::Unstaked => { + stats + .total_unstaked_chunks_received + .fetch_add(1, Ordering::Relaxed); + } + } } } else { trace!("chunk is none"); @@ -1475,6 +1562,7 @@ pub mod test { } #[test] + fn test_max_allowed_uni_streams() { assert_eq!( compute_max_allowed_uni_streams(ConnectionPeerType::Unstaked, 0, 0), @@ -1525,4 +1613,38 @@ pub mod test { QUIC_MAX_UNSTAKED_CONCURRENT_STREAMS ); } + + #[test] + fn test_cacluate_receive_window_ratio_for_staked_node() { + let mut max_stake = 10000; + let mut min_stake = 0; + let ratio = compute_receive_window_ratio_for_staked_node(max_stake, min_stake, min_stake); + assert_eq!(ratio, QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO); + + let ratio = compute_receive_window_ratio_for_staked_node(max_stake, min_stake, max_stake); + let max_ratio = QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO; + assert_eq!(ratio, max_ratio); + + let ratio = + compute_receive_window_ratio_for_staked_node(max_stake, min_stake, max_stake / 2); + let average_ratio = + (QUIC_MAX_STAKED_RECEIVE_WINDOW_RATIO + QUIC_MIN_STAKED_RECEIVE_WINDOW_RATIO) / 2; + assert_eq!(ratio, average_ratio); + + max_stake = 10000; + min_stake = 10000; + let ratio = compute_receive_window_ratio_for_staked_node(max_stake, min_stake, max_stake); + assert_eq!(ratio, max_ratio); + + max_stake = 0; + min_stake = 0; + let ratio = compute_receive_window_ratio_for_staked_node(max_stake, min_stake, max_stake); + assert_eq!(ratio, max_ratio); + + max_stake = 1000; + min_stake = 10; + let ratio = + compute_receive_window_ratio_for_staked_node(max_stake, min_stake, max_stake + 10); + assert_eq!(ratio, max_ratio); + } } diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index c350fc2430..39295fa82a 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -123,6 +123,8 @@ pub struct StreamStats { pub(crate) total_invalid_chunk_size: AtomicUsize, pub(crate) total_packets_allocated: AtomicUsize, pub(crate) total_chunks_received: AtomicUsize, + pub(crate) total_staked_chunks_received: AtomicUsize, + pub(crate) total_unstaked_chunks_received: AtomicUsize, pub(crate) total_packet_batch_send_err: AtomicUsize, pub(crate) total_packet_batches_sent: AtomicUsize, pub(crate) total_packet_batches_none: AtomicUsize, @@ -252,6 +254,17 @@ impl StreamStats { self.total_chunks_received.swap(0, Ordering::Relaxed), i64 ), + ( + "staked_chunks_received", + self.total_staked_chunks_received.swap(0, Ordering::Relaxed), + i64 + ), + ( + "unstaked_chunks_received", + self.total_unstaked_chunks_received + .swap(0, Ordering::Relaxed), + i64 + ), ( "packet_batch_send_error", self.total_packet_batch_send_err.swap(0, Ordering::Relaxed), @@ -310,11 +323,14 @@ pub fn spawn_server( stats, ) }?; - let handle = thread::spawn(move || { - if let Err(e) = runtime.block_on(task) { - warn!("error from runtime.block_on: {:?}", e); - } - }); + let handle = thread::Builder::new() + .name("solQuicServer".into()) + .spawn(move || { + if let Err(e) = runtime.block_on(task) { + warn!("error from runtime.block_on: {:?}", e); + } + }) + .unwrap(); Ok(handle) } diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 4d8ee2d1c0..6269852433 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -28,6 +28,8 @@ use { #[derive(Default)] pub struct StakedNodes { pub total_stake: u64, + pub max_stake: u64, + pub min_stake: u64, pub ip_stake_map: HashMap, pub pubkey_stake_map: HashMap, } @@ -166,7 +168,7 @@ pub fn receiver( let res = socket.set_read_timeout(Some(Duration::new(1, 0))); assert!(res.is_ok(), "streamer::receiver set_read_timeout error"); Builder::new() - .name("solana-receiver".to_string()) + .name("solReceiver".to_string()) .spawn(move || { let _ = recv_loop( &socket, @@ -305,7 +307,7 @@ fn recv_send( let packets = packet_batch.iter().filter_map(|pkt| { let addr = pkt.meta.socket_addr(); let data = pkt.data(..)?; - socket_addr_space.check(&addr).then(|| (data, addr)) + socket_addr_space.check(&addr).then_some((data, addr)) }); batch_send(sock, &packets.collect::>())?; Ok(()) @@ -370,7 +372,7 @@ pub fn responder( stats_reporter_sender: Option>>, ) -> JoinHandle<()> { Builder::new() - .name(format!("solana-responder-{}", name)) + .name(format!("solRspndr{}", name)) .spawn(move || { let mut errors = 0; let mut last_error = None; @@ -475,7 +477,7 @@ mod test { let t_responder = { let (s_responder, r_responder) = unbounded(); let t_responder = responder( - "streamer_send_test", + "SendTest", Arc::new(send), r_responder, SocketAddrSpace::Unspecified, diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index 04f1f016c3..f1cc3c97e2 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -12,7 +12,7 @@ publish = true [dependencies] clap = "2.33.1" -libc = "0.2.126" +libc = "0.2.131" log = "0.4.17" solana-logger = { path = "../logger", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } @@ -20,7 +20,7 @@ solana-version = { path = "../version", version = "=1.12.0" } [target."cfg(unix)".dependencies] unix_socket2 = "0.5.4" users = "0.10.0" -nix = "0.24.2" +nix = "0.25.0" sysctl = "0.4.4" [lib] diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 96274f120d..339ff46a1b 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -14,7 +14,7 @@ edition = "2021" base64 = "0.13.0" log = "0.4.17" serde_derive = "1.0.103" -serde_json = "1.0.81" +serde_json = "1.0.83" solana-cli-output = { path = "../cli-output", version = "=1.12.0" } solana-client = { path = "../client", version = "=1.12.0" } solana-core = { path = "../core", version = "=1.12.0" } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 73f4f70056..f2fb1f37a3 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -1,4 +1,5 @@ #![allow(clippy::integer_arithmetic)] + use { log::*, solana_cli_output::CliAccount, @@ -115,6 +116,7 @@ pub struct TestValidatorGenesis { pub validator_exit: Arc>, pub start_progress: Arc>, pub authorized_voter_keypairs: Arc>>>, + pub staked_nodes_overrides: Arc>>, pub max_ledger_shreds: Option, pub max_genesis_archive_unpacked_size: Option, pub geyser_plugin_config_files: Option>, @@ -122,6 +124,7 @@ pub struct TestValidatorGenesis { deactivate_feature_set: HashSet, compute_unit_limit: Option, pub log_messages_bytes_limit: Option, + pub transaction_account_lock_limit: Option, } impl Default for TestValidatorGenesis { @@ -144,6 +147,7 @@ impl Default for TestValidatorGenesis { validator_exit: Arc::>::default(), start_progress: Arc::>::default(), authorized_voter_keypairs: Arc::>>>::default(), + staked_nodes_overrides: Arc::new(RwLock::new(HashMap::new())), max_ledger_shreds: Option::::default(), max_genesis_archive_unpacked_size: Option::::default(), geyser_plugin_config_files: Option::>::default(), @@ -151,6 +155,7 @@ impl Default for TestValidatorGenesis { deactivate_feature_set: HashSet::::default(), compute_unit_limit: Option::::default(), log_messages_bytes_limit: Option::::default(), + transaction_account_lock_limit: Option::::default(), } } } @@ -279,7 +284,7 @@ impl TestValidatorGenesis { addresses: T, rpc_client: &RpcClient, skip_missing: bool, - ) -> &mut Self + ) -> Result<&mut Self, String> where T: IntoIterator, { @@ -291,20 +296,21 @@ impl TestValidatorGenesis { } else if skip_missing { warn!("Could not find {}, skipping.", address); } else { - error!("Failed to fetch {}: {}", address, res.unwrap_err()); - solana_core::validator::abort(); + return Err(format!("Failed to fetch {}: {}", address, res.unwrap_err())); } } - self + Ok(self) } - pub fn add_accounts_from_json_files(&mut self, accounts: &[AccountInfo]) -> &mut Self { + pub fn add_accounts_from_json_files( + &mut self, + accounts: &[AccountInfo], + ) -> Result<&mut Self, String> { for account in accounts { - let account_path = - solana_program_test::find_file(account.filename).unwrap_or_else(|| { - error!("Unable to locate {}", account.filename); - solana_core::validator::abort(); - }); + let account_path = match solana_program_test::find_file(account.filename) { + Some(path) => path, + None => return Err(format!("Unable to locate {}", account.filename)), + }; let mut file = File::open(&account_path).unwrap(); let mut account_info_raw = String::new(); file.read_to_string(&mut account_info_raw).unwrap(); @@ -312,12 +318,11 @@ impl TestValidatorGenesis { let result: serde_json::Result = serde_json::from_str(&account_info_raw); let account_info = match result { Err(err) => { - error!( + return Err(format!( "Unable to deserialize {}: {}", account_path.to_str().unwrap(), err - ); - solana_core::validator::abort(); + )); } Ok(deserialized) => deserialized, }; @@ -333,25 +338,24 @@ impl TestValidatorGenesis { self.add_account(address, account); } - self + Ok(self) } - pub fn add_accounts_from_directories(&mut self, dirs: T) -> &mut Self + pub fn add_accounts_from_directories(&mut self, dirs: T) -> Result<&mut Self, String> where T: IntoIterator, P: AsRef + Display, { let mut json_files: HashSet = HashSet::new(); for dir in dirs { - let matched_files = fs::read_dir(&dir) - .unwrap_or_else(|err| { - error!("Cannot read directory {}: {}", dir, err); - solana_core::validator::abort(); - }) - .flatten() - .map(|entry| entry.path()) - .filter(|path| path.is_file() && path.extension() == Some(OsStr::new("json"))) - .map(|path| String::from(path.to_string_lossy())); + let matched_files = match fs::read_dir(&dir) { + Ok(dir) => dir, + Err(e) => return Err(format!("Cannot read directory {}: {}", &dir, e)), + } + .flatten() + .map(|entry| entry.path()) + .filter(|path| path.is_file() && path.extension() == Some(OsStr::new("json"))) + .map(|path| String::from(path.to_string_lossy())); json_files.extend(matched_files); } @@ -366,9 +370,9 @@ impl TestValidatorGenesis { }) .collect(); - self.add_accounts_from_json_files(&accounts); + self.add_accounts_from_json_files(&accounts)?; - self + Ok(self) } /// Add an account to the test environment with the account data in the provided `filename` @@ -754,6 +758,7 @@ impl TestValidator { ..ComputeBudget::default() }), log_messages_bytes_limit: config.log_messages_bytes_limit, + transaction_account_lock_limit: config.transaction_account_lock_limit, }; let mut validator_config = ValidatorConfig { @@ -785,6 +790,7 @@ impl TestValidator { rocksdb_compaction_interval: Some(100), // Compact every 100 slots max_ledger_shreds: config.max_ledger_shreds, no_wait_for_vote_to_start_leader: true, + staked_nodes_overrides: config.staked_nodes_overrides.clone(), accounts_db_config, runtime_config, ..ValidatorConfig::default_for_test() @@ -806,7 +812,7 @@ impl TestValidator { socket_addr_space, DEFAULT_TPU_USE_QUIC, DEFAULT_TPU_CONNECTION_POOL_SIZE, - )); + )?); // Needed to avoid panics in `solana-responder-gossip` in tests that create a number of // test validators concurrently... diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index f7ce05aa47..7447704864 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -833,7 +833,11 @@ fn check_payer_balances( Ok(()) } -pub fn process_balances(client: &RpcClient, args: &BalancesArgs) -> Result<(), Error> { +pub fn process_balances( + client: &RpcClient, + args: &BalancesArgs, + exit: Arc, +) -> Result<(), Error> { let allocations: Vec = read_allocations(&args.input_csv, None, false, args.spl_token_args.is_some())?; let allocations = merge_allocations(&allocations); @@ -855,6 +859,10 @@ pub fn process_balances(client: &RpcClient, args: &BalancesArgs) -> Result<(), E ); for allocation in &allocations { + if exit.load(Ordering::SeqCst) { + return Err(Error::ExitSignal); + } + if let Some(spl_token_args) = &args.spl_token_args { print_token_balances(client, allocation, spl_token_args)?; } else { diff --git a/tokens/src/main.rs b/tokens/src/main.rs index bbd8c0e435..1bf81b01fe 100644 --- a/tokens/src/main.rs +++ b/tokens/src/main.rs @@ -44,7 +44,7 @@ fn main() -> Result<(), Box> { } Command::Balances(mut args) => { spl_token::update_decimals(&client, &mut args.spl_token_args)?; - commands::process_balances(&client, &args)?; + commands::process_balances(&client, &args, exit)?; } Command::TransactionLog(args) => { commands::process_transaction_log(&args)?; diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index f16fc76112..9d59696e00 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -17,9 +17,9 @@ borsh = "0.9.1" bs58 = "0.4.0" lazy_static = "1.4.0" log = "0.4.17" -serde = "1.0.138" +serde = "1.0.143" serde_derive = "1.0.103" -serde_json = "1.0.81" +serde_json = "1.0.83" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-measure = { path = "../measure", version = "=1.12.0" } solana-metrics = { path = "../metrics", version = "=1.12.0" } @@ -28,7 +28,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-associated-token-account = { version = "=1.1.1", features = ["no-entrypoint"] } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" [package.metadata.docs.rs] diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 4a677606fc..9bf5eb9ffa 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -599,7 +599,7 @@ impl From for EncodedConfirmedBlock { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct UiConfirmedBlock { pub previous_blockhash: String, diff --git a/transaction-status/src/parse_stake.rs b/transaction-status/src/parse_stake.rs index dfa3e9608a..d7da25b173 100644 --- a/transaction-status/src/parse_stake.rs +++ b/transaction-status/src/parse_stake.rs @@ -276,7 +276,7 @@ pub fn parse_stake( StakeInstruction::DeactivateDelinquent => { check_num_stake_accounts(&instruction.accounts, 3)?; Ok(ParsedInstructionEnum { - instruction_type: "deactivateDeactive".to_string(), + instruction_type: "deactivateDelinquent".to_string(), info: json!({ "stakeAccount": account_keys[instruction.accounts[0] as usize].to_string(), "voteAccount": account_keys[instruction.accounts[1] as usize].to_string(), diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index 3c43cfbf42..f820883c74 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -3,8 +3,8 @@ use { check_num_accounts, ParsableProgram, ParseInstructionError, ParsedInstructionEnum, }, extension::{ - default_account_state::*, interest_bearing_mint::*, memo_transfer::*, - mint_close_authority::*, reallocate::*, transfer_fee::*, + confidential_transfer::*, default_account_state::*, interest_bearing_mint::*, + memo_transfer::*, mint_close_authority::*, reallocate::*, transfer_fee::*, }, serde_json::{json, Map, Value}, solana_account_decoder::parse_token::{ @@ -510,8 +510,10 @@ pub fn parse_token( account_keys, ) } - TokenInstruction::ConfidentialTransferExtension => Err( - ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken), + TokenInstruction::ConfidentialTransferExtension => parse_confidential_transfer_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, ), TokenInstruction::DefaultAccountStateExtension => { if instruction.data.len() <= 2 { diff --git a/transaction-status/src/parse_token/extension/confidential_transfer.rs b/transaction-status/src/parse_token/extension/confidential_transfer.rs new file mode 100644 index 0000000000..867f90e97b --- /dev/null +++ b/transaction-status/src/parse_token/extension/confidential_transfer.rs @@ -0,0 +1,399 @@ +use { + super::*, + solana_account_decoder::parse_token_extension::UiConfidentialTransferMint, + spl_token_2022::{ + extension::confidential_transfer::{instruction::*, ConfidentialTransferMint}, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_confidential_transfer_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + ConfidentialTransferInstruction::InitializeMint => { + check_num_token_accounts(account_indexes, 1)?; + let confidential_transfer_mint: ConfidentialTransferMint = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let confidential_transfer_mint: UiConfidentialTransferMint = + confidential_transfer_mint.into(); + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + map.append(json!(confidential_transfer_mint).as_object_mut().unwrap()); + Ok(ParsedInstructionEnum { + instruction_type: "initializeConfidentialTransferMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::UpdateMint => { + check_num_token_accounts(account_indexes, 3)?; + let confidential_transfer_mint: ConfidentialTransferMint = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let confidential_transfer_mint: UiConfidentialTransferMint = + confidential_transfer_mint.into(); + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "confidentialTransferMintAuthority": account_keys[account_indexes[1] as usize].to_string(), + "newConfidentialTransferMintAuthority": account_keys[account_indexes[2] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + map.append(json!(confidential_transfer_mint).as_object_mut().unwrap()); + Ok(ParsedInstructionEnum { + instruction_type: "updateConfidentialTransferMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ConfigureAccount => { + check_num_token_accounts(account_indexes, 3)?; + let configure_account_data: ConfigureAccountInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let maximum_pending_balance_credit_counter: u64 = configure_account_data + .maximum_pending_balance_credit_counter + .into(); + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "encryptionPubkey": format!("{}", configure_account_data.encryption_pubkey), + "decryptableZeroBalance": format!("{}", configure_account_data.decryptable_zero_balance), + "maximumPendingBalanceCreditCounter": maximum_pending_balance_credit_counter, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 2, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "configureConfidentialTransferAccount".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ApproveAccount => { + check_num_token_accounts(account_indexes, 3)?; + Ok(ParsedInstructionEnum { + instruction_type: "approveConfidentialTransferAccount".to_string(), + info: json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "confidentialTransferAuditorAuthority": account_keys[account_indexes[2] as usize].to_string(), + }), + }) + } + ConfidentialTransferInstruction::EmptyAccount => { + check_num_token_accounts(account_indexes, 3)?; + let empty_account_data: EmptyAccountInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = empty_account_data.proof_instruction_offset; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[1] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 2, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "emptyConfidentialTransferAccount".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Deposit => { + check_num_token_accounts(account_indexes, 4)?; + let deposit_data: DepositInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let amount: u64 = deposit_data.amount.into(); + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "amount": amount, + "decimals": deposit_data.decimals, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 3, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "depositConfidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Withdraw => { + check_num_token_accounts(account_indexes, 5)?; + let withdrawal_data: WithdrawInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let amount: u64 = withdrawal_data.amount.into(); + let proof_instruction_offset: i8 = withdrawal_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "amount": amount, + "decimals": withdrawal_data.decimals, + "newDecryptableAvailableBalance": format!("{}", withdrawal_data.new_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawConfidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Transfer => { + check_num_token_accounts(account_indexes, 5)?; + let transfer_data: TransferInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = transfer_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "confidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::TransferWithFee => { + check_num_token_accounts(account_indexes, 5)?; + let transfer_data: TransferInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = transfer_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "confidentialTransferWithFee".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ApplyPendingBalance => { + check_num_token_accounts(account_indexes, 2)?; + let apply_pending_balance_data: ApplyPendingBalanceData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let expected_pending_balance_credit_counter: u64 = apply_pending_balance_data + .expected_pending_balance_credit_counter + .into(); + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "newDecryptableAvailableBalance": format!("{}", apply_pending_balance_data.new_decryptable_available_balance), + "expectedPendingBalanceCreditCounter": expected_pending_balance_credit_counter, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "applyPendingConfidentialTransferBalance".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::EnableBalanceCredits => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "enableConfidentialTransferBalanceCredits".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::DisableBalanceCredits => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "disableConfidentialTransferBalanceCredits".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::WithdrawWithheldTokensFromMint => { + check_num_token_accounts(account_indexes, 4)?; + let withdraw_withheld_data: WithdrawWithheldTokensFromMintData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 3, + account_keys, + account_indexes, + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::WithdrawWithheldTokensFromAccounts => { + let withdraw_withheld_data: WithdrawWithheldTokensFromAccountsData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let num_token_accounts = withdraw_withheld_data.num_token_accounts; + check_num_token_accounts(account_indexes, 4 + num_token_accounts as usize)?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + let first_source_account_index = account_indexes + .len() + .saturating_sub(num_token_accounts as usize); + for i in account_indexes[first_source_account_index..].iter() { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + parse_signers( + map, + 3, + account_keys, + &account_indexes[..first_source_account_index], + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromAccounts" + .to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::HarvestWithheldTokensToMint => { + check_num_token_accounts(account_indexes, 1)?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + for i in account_indexes.iter().skip(1) { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + Ok(ParsedInstructionEnum { + instruction_type: "harvestWithheldConfidentialTransferTokensToMint".to_string(), + info: value, + }) + } + } +} diff --git a/transaction-status/src/parse_token/extension/mod.rs b/transaction-status/src/parse_token/extension/mod.rs index 3c84942651..f5d8e41f4a 100644 --- a/transaction-status/src/parse_token/extension/mod.rs +++ b/transaction-status/src/parse_token/extension/mod.rs @@ -1,5 +1,6 @@ use super::*; +pub(super) mod confidential_transfer; pub(super) mod default_account_state; pub(super) mod interest_bearing_mint; pub(super) mod memo_transfer; diff --git a/upload-perf/Cargo.toml b/upload-perf/Cargo.toml index 9b08199287..10a53e7186 100644 --- a/upload-perf/Cargo.toml +++ b/upload-perf/Cargo.toml @@ -10,7 +10,7 @@ homepage = "https://solana.com/" publish = false [dependencies] -serde_json = "1.0.81" +serde_json = "1.0.83" solana-metrics = { path = "../metrics", version = "=1.12.0" } [[bin]] diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 601d8a0342..3d1b79478d 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/solana-validator" default-run = "solana-validator" [dependencies] -chrono = { version = "0.4.11", features = ["serde"] } +chrono = { version = "0.4.21", features = ["serde"] } clap = "2.33.1" console = "0.15.0" core_affinity = "0.5.10" @@ -26,8 +26,10 @@ jsonrpc-server-utils = "18.0.0" log = "0.4.17" num_cpus = "1.13.1" rand = "0.7.0" -serde = "1.0.138" -serde_json = "1.0.81" +rayon = "1.5.3" +serde = "1.0.143" +serde_json = "1.0.83" +serde_yaml = "0.8.26" solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } solana-client = { path = "../client", version = "=1.12.0" } @@ -58,7 +60,7 @@ symlink = "0.1.0" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } [target."cfg(unix)".dependencies] -libc = "0.2.126" +libc = "0.2.131" signal-hook = "0.3.14" [package.metadata.docs.rs] diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index 2c32cc8e24..75a3971c7c 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -5,7 +5,7 @@ use { jsonrpc_ipc_server::{RequestContext, ServerBuilder}, jsonrpc_server_utils::tokio, log::*, - serde::{Deserialize, Serialize}, + serde::{de::Deserializer, Deserialize, Serialize}, solana_core::{ consensus::Tower, tower_storage::TowerStorage, validator::ValidatorStartProgress, }, @@ -17,6 +17,8 @@ use { signature::{read_keypair_file, Keypair, Signer}, }, std::{ + collections::HashMap, + error, fmt::{self, Display}, net::SocketAddr, path::{Path, PathBuf}, @@ -41,6 +43,7 @@ pub struct AdminRpcRequestMetadata { pub validator_exit: Arc>, pub authorized_voter_keypairs: Arc>>>, pub tower_storage: Arc, + pub staked_nodes_overrides: Arc>>, pub post_init: Arc>>, } impl Metadata for AdminRpcRequestMetadata {} @@ -175,6 +178,9 @@ pub trait AdminRpc { require_tower: bool, ) -> Result<()>; + #[rpc(meta, name = "setStakedNodesOverrides")] + fn set_staked_nodes_overrides(&self, meta: Self::Metadata, path: String) -> Result<()>; + #[rpc(meta, name = "contactInfo")] fn contact_info(&self, meta: Self::Metadata) -> Result; } @@ -186,22 +192,25 @@ impl AdminRpc for AdminRpcImpl { fn exit(&self, meta: Self::Metadata) -> Result<()> { debug!("exit admin rpc request received"); - thread::spawn(move || { - // Delay exit signal until this RPC request completes, otherwise the caller of `exit` might - // receive a confusing error as the validator shuts down before a response is sent back. - thread::sleep(Duration::from_millis(100)); - - warn!("validator exit requested"); - meta.validator_exit.write().unwrap().exit(); - - // TODO: Debug why Exit doesn't always cause the validator to fully exit - // (rocksdb background processing or some other stuck thread perhaps?). - // - // If the process is still alive after five seconds, exit harder - thread::sleep(Duration::from_secs(5)); - warn!("validator exit timeout"); - std::process::exit(0); - }); + thread::Builder::new() + .name("solProcessExit".into()) + .spawn(move || { + // Delay exit signal until this RPC request completes, otherwise the caller of `exit` might + // receive a confusing error as the validator shuts down before a response is sent back. + thread::sleep(Duration::from_millis(100)); + + warn!("validator exit requested"); + meta.validator_exit.write().unwrap().exit(); + + // TODO: Debug why Exit doesn't always cause the validator to fully exit + // (rocksdb background processing or some other stuck thread perhaps?). + // + // If the process is still alive after five seconds, exit harder + thread::sleep(Duration::from_secs(5)); + warn!("validator exit timeout"); + std::process::exit(0); + }) + .unwrap(); Ok(()) } @@ -294,6 +303,24 @@ impl AdminRpc for AdminRpcImpl { AdminRpcImpl::set_identity_keypair(meta, identity_keypair, require_tower) } + fn set_staked_nodes_overrides(&self, meta: Self::Metadata, path: String) -> Result<()> { + let loaded_config = load_staked_nodes_overrides(&path) + .map_err(|err| { + error!( + "Failed to load staked nodes overrides from {}: {}", + &path, err + ); + jsonrpc_core::error::Error::internal_error() + })? + .staked_map_id; + let mut write_staked_nodes = meta.staked_nodes_overrides.write().unwrap(); + write_staked_nodes.clear(); + write_staked_nodes.extend(loaded_config.into_iter()); + info!("Staked nodes overrides loaded from {}", path); + debug!("overrides map: {:?}", write_staked_nodes); + Ok(()) + } + fn contact_info(&self, meta: Self::Metadata) -> Result { meta.with_post_init(|post_init| Ok(post_init.cluster_info.my_contact_info().into())) } @@ -351,14 +378,14 @@ pub fn run(ledger_path: &Path, metadata: AdminRpcRequestMetadata) { let admin_rpc_path = admin_rpc_path(ledger_path); let event_loop = tokio::runtime::Builder::new_multi_thread() - .thread_name("sol-adminrpc-el") + .thread_name("solAdminRpcEl") .worker_threads(3) // Three still seems like a lot, and better than the default of available core count .enable_all() .build() .unwrap(); Builder::new() - .name("solana-adminrpc".to_string()) + .name("solAdminRpc".to_string()) .spawn(move || { let mut io = MetaIoHandler::default(); io.extend_with(AdminRpcImpl.to_delegate()); @@ -426,3 +453,39 @@ pub async fn connect(ledger_path: &Path) -> std::result::Result jsonrpc_server_utils::tokio::runtime::Runtime { jsonrpc_server_utils::tokio::runtime::Runtime::new().expect("new tokio runtime") } + +#[derive(Default, Deserialize, Clone)] +pub struct StakedNodesOverrides { + #[serde(deserialize_with = "deserialize_pubkey_map")] + pub staked_map_id: HashMap, +} + +pub fn deserialize_pubkey_map<'de, D>(des: D) -> std::result::Result, D::Error> +where + D: Deserializer<'de>, +{ + let container: HashMap = serde::Deserialize::deserialize(des)?; + let mut container_typed: HashMap = HashMap::new(); + for (key, value) in container.iter() { + let typed_key = Pubkey::try_from(key.as_str()) + .map_err(|_| serde::de::Error::invalid_type(serde::de::Unexpected::Map, &"PubKey"))?; + container_typed.insert(typed_key, *value); + } + Ok(container_typed) +} + +pub fn load_staked_nodes_overrides( + path: &String, +) -> std::result::Result> { + debug!("Loading staked nodes overrides configuration from {}", path); + if Path::new(&path).exists() { + let file = std::fs::File::open(path)?; + Ok(serde_yaml::from_reader(file)?) + } else { + Err(format!( + "Staked nodes overrides provided '{}' a non-existing file path.", + path + ) + .into()) + } +} diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 84f032e969..46092b651c 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -193,20 +193,20 @@ fn main() { .arg( Arg::with_name("bpf_program") .long("bpf-program") - .value_name("ADDRESS_OR_PATH BPF_PROGRAM.SO") + .value_names(&["ADDRESS_OR_KEYPAIR", "BPF_PROGRAM.SO"]) .takes_value(true) .number_of_values(2) .multiple(true) .help( "Add a BPF program to the genesis configuration. \ If the ledger already exists then this parameter is silently ignored. \ - First argument can be a public key or path to file that can be parsed as a keypair", + First argument can be a pubkey string or path to a keypair", ), ) .arg( Arg::with_name("account") .long("account") - .value_name("ADDRESS FILENAME.JSON") + .value_names(&["ADDRESS", "DUMP.JSON"]) .takes_value(true) .number_of_values(2) .allow_hyphen_values(true) @@ -420,6 +420,14 @@ fn main() { .takes_value(true) .help("Maximum number of bytes written to the program log before truncation") ) + .arg( + Arg::with_name("transaction_account_lock_limit") + .long("transaction-account-lock-limit") + .value_name("NUM_ACCOUNTS") + .validator(is_parsable::) + .takes_value(true) + .help("Override the runtime's account lock limit per transaction") + ) .get_matches(); let output = if matches.is_present("quiet") { @@ -687,6 +695,8 @@ fn main() { genesis.max_genesis_archive_unpacked_size = Some(u64::MAX); genesis.accounts_db_caching_enabled = !matches.is_present("no_accounts_db_caching"); genesis.log_messages_bytes_limit = value_t!(matches, "log_messages_bytes_limit", usize).ok(); + genesis.transaction_account_lock_limit = + value_t!(matches, "transaction_account_lock_limit", usize).ok(); let tower_storage = Arc::new(FileTowerStorage::new(ledger_path.clone())); @@ -702,6 +712,7 @@ fn main() { start_time: std::time::SystemTime::now(), validator_exit: genesis.validator_exit.clone(), authorized_voter_keypairs: genesis.authorized_voter_keypairs.clone(), + staked_nodes_overrides: genesis.staked_nodes_overrides.clone(), post_init: admin_service_post_init.clone(), tower_storage: tower_storage.clone(), }, @@ -756,27 +767,41 @@ fn main() { .rpc_port(rpc_port) .add_programs_with_path(&programs_to_load) .add_accounts_from_json_files(&accounts_to_load) + .unwrap_or_else(|e| { + println!("Error: add_accounts_from_json_files failed: {}", e); + exit(1); + }) .add_accounts_from_directories(&accounts_from_dirs) + .unwrap_or_else(|e| { + println!("Error: add_accounts_from_directories failed: {}", e); + exit(1); + }) .deactivate_features(&features_to_deactivate); if !accounts_to_clone.is_empty() { - genesis.clone_accounts( + if let Err(e) = genesis.clone_accounts( accounts_to_clone, cluster_rpc_client .as_ref() .expect("bug: --url argument missing?"), false, - ); + ) { + println!("Error: clone_accounts failed: {}", e); + exit(1); + } } if !accounts_to_maybe_clone.is_empty() { - genesis.clone_accounts( + if let Err(e) = genesis.clone_accounts( accounts_to_maybe_clone, cluster_rpc_client .as_ref() .expect("bug: --url argument missing?"), true, - ); + ) { + println!("Error: clone_accounts failed: {}", e); + exit(1); + } } if let Some(warp_slot) = warp_slot { diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index 9998b986c9..c5a4b65d4b 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -1,6 +1,7 @@ use { log::*, rand::{seq::SliceRandom, thread_rng, Rng}, + rayon::prelude::*, solana_client::rpc_client::RpcClient, solana_core::validator::{ValidatorConfig, ValidatorStartProgress}, solana_download_utils::{download_snapshot_archive, DownloadProgressRecord}, @@ -28,7 +29,7 @@ use { }, solana_streamer::socket::SocketAddrSpace, std::{ - collections::{HashMap, HashSet}, + collections::{hash_map::RandomState, HashMap, HashSet}, net::{SocketAddr, TcpListener, UdpSocket}, path::Path, process::exit, @@ -36,11 +37,12 @@ use { atomic::{AtomicBool, Ordering}, Arc, RwLock, }, - thread::sleep, time::{Duration, Instant}, }, }; +pub const MAX_RPC_CONNECTIONS_EVALUATED_PER_ITERATION: usize = 32; + #[derive(Debug)] pub struct RpcBootstrapConfig { pub no_genesis_fetch: bool, @@ -264,7 +266,7 @@ fn check_vote_account( .value .ok_or_else(|| format!("identity account does not exist: {}", identity_pubkey))?; - let vote_state = solana_vote_program::vote_state::VoteState::from(&vote_account); + let vote_state = solana_vote_program::vote_state::from(&vote_account); if let Some(vote_state) = vote_state { if vote_state.authorized_voters().is_empty() { return Err("Vote account not yet initialized".to_string()); @@ -303,7 +305,7 @@ fn check_vote_account( Ok(()) } -/// Struct to wrap the return value from get_rpc_node(). The `rpc_contact_info` is the peer to +/// Struct to wrap the return value from get_rpc_nodes(). The `rpc_contact_info` is the peer to /// download from, and `snapshot_hash` is the (optional) full and (optional) incremental /// snapshots to download. #[derive(Debug)] @@ -322,11 +324,133 @@ struct PeerSnapshotHash { /// A snapshot hash. In this context (bootstrap *with* incremental snapshots), a snapshot hash /// is _both_ a full snapshot hash and an (optional) incremental snapshot hash. #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] -struct SnapshotHash { +pub struct SnapshotHash { full: (Slot, Hash), incr: Option<(Slot, Hash)>, } +pub fn fail_rpc_node( + err: String, + known_validators: &Option>, + rpc_id: &Pubkey, + blacklisted_rpc_nodes: &mut HashSet, +) { + warn!("{}", err); + if let Some(ref known_validators) = known_validators { + if known_validators.contains(rpc_id) { + return; + } + } + + info!("Excluding {} as a future RPC candidate", rpc_id); + blacklisted_rpc_nodes.insert(*rpc_id); +} + +#[allow(clippy::too_many_arguments)] +pub fn attempt_download_genesis_and_snapshot( + rpc_contact_info: &ContactInfo, + ledger_path: &Path, + validator_config: &mut ValidatorConfig, + bootstrap_config: &RpcBootstrapConfig, + use_progress_bar: bool, + gossip: &mut Option<(Arc, Arc, GossipService)>, + rpc_client: &RpcClient, + full_snapshot_archives_dir: &Path, + incremental_snapshot_archives_dir: &Path, + maximum_local_snapshot_age: Slot, + start_progress: &Arc>, + minimal_snapshot_download_speed: f32, + maximum_snapshot_download_abort: u64, + download_abort_count: &mut u64, + snapshot_hash: Option, + identity_keypair: &Arc, + vote_account: &Pubkey, + authorized_voter_keypairs: Arc>>>, +) -> Result<(), String> { + let genesis_config = download_then_check_genesis_hash( + &rpc_contact_info.rpc, + ledger_path, + validator_config.expected_genesis_hash, + bootstrap_config.max_genesis_archive_unpacked_size, + bootstrap_config.no_genesis_fetch, + use_progress_bar, + ); + + if let Ok(genesis_config) = genesis_config { + let genesis_hash = genesis_config.hash(); + if validator_config.expected_genesis_hash.is_none() { + info!("Expected genesis hash set to {}", genesis_hash); + validator_config.expected_genesis_hash = Some(genesis_hash); + } + } + + if let Some(expected_genesis_hash) = validator_config.expected_genesis_hash { + // Sanity check that the RPC node is using the expected genesis hash before + // downloading a snapshot from it + let rpc_genesis_hash = rpc_client + .get_genesis_hash() + .map_err(|err| format!("Failed to get genesis hash: {}", err))?; + + if expected_genesis_hash != rpc_genesis_hash { + return Err(format!( + "Genesis hash mismatch: expected {} but RPC node genesis hash is {}", + expected_genesis_hash, rpc_genesis_hash + )); + } + } + + let (cluster_info, gossip_exit_flag, gossip_service) = gossip.take().unwrap(); + cluster_info.save_contact_info(); + gossip_exit_flag.store(true, Ordering::Relaxed); + gossip_service.join().unwrap(); + + let rpc_client_slot = rpc_client + .get_slot_with_commitment(CommitmentConfig::finalized()) + .map_err(|err| format!("Failed to get RPC node slot: {}", err))?; + info!("RPC node root slot: {}", rpc_client_slot); + + download_snapshots( + full_snapshot_archives_dir, + incremental_snapshot_archives_dir, + validator_config, + bootstrap_config, + use_progress_bar, + maximum_local_snapshot_age, + start_progress, + minimal_snapshot_download_speed, + maximum_snapshot_download_abort, + download_abort_count, + snapshot_hash, + rpc_contact_info, + )?; + + if let Some(url) = bootstrap_config.check_vote_account.as_ref() { + let rpc_client = RpcClient::new(url); + check_vote_account( + &rpc_client, + &identity_keypair.pubkey(), + vote_account, + &authorized_voter_keypairs + .read() + .unwrap() + .iter() + .map(|k| k.pubkey()) + .collect::>(), + ) + .unwrap_or_else(|err| { + // Consider failures here to be more likely due to user error (eg, + // incorrect `solana-validator` command-line arguments) rather than the + // RPC node failing. + // + // Power users can always use the `--no-check-vote-account` option to + // bypass this check entirely + error!("{}", err); + exit(1); + }); + } + Ok(()) +} + #[allow(clippy::too_many_arguments)] pub fn rpc_bootstrap( node: &Node, @@ -367,8 +491,9 @@ pub fn rpc_bootstrap( return; } - let mut blacklisted_rpc_nodes = HashSet::new(); + let blacklisted_rpc_nodes = RwLock::new(HashSet::new()); let mut gossip = None; + let mut vetted_rpc_nodes: Vec<(ContactInfo, Option, RpcClient)> = vec![]; let mut download_abort_count = 0; loop { if gossip.is_none() { @@ -387,136 +512,91 @@ pub fn rpc_bootstrap( )); } - let rpc_node_details = get_rpc_node( - &gossip.as_ref().unwrap().0, - cluster_entrypoints, - validator_config, - &mut blacklisted_rpc_nodes, - &bootstrap_config, - ); - if rpc_node_details.is_none() { - return; - } - let GetRpcNodeResult { - rpc_contact_info, - snapshot_hash, - } = rpc_node_details.unwrap(); - - info!( - "Using RPC service from node {}: {:?}", - rpc_contact_info.id, rpc_contact_info.rpc - ); - let rpc_client = RpcClient::new_socket(rpc_contact_info.rpc); - - let result = match rpc_client.get_version() { - Ok(rpc_version) => { - info!("RPC node version: {}", rpc_version.solana_core); - Ok(()) - } - Err(err) => Err(format!("Failed to get RPC node version: {}", err)), - } - .and_then(|_| { - let genesis_config = download_then_check_genesis_hash( - &rpc_contact_info.rpc, - ledger_path, - validator_config.expected_genesis_hash, - bootstrap_config.max_genesis_archive_unpacked_size, - bootstrap_config.no_genesis_fetch, - use_progress_bar, - ); - - if let Ok(genesis_config) = genesis_config { - let genesis_hash = genesis_config.hash(); - if validator_config.expected_genesis_hash.is_none() { - info!("Expected genesis hash set to {}", genesis_hash); - validator_config.expected_genesis_hash = Some(genesis_hash); - } - } - - if let Some(expected_genesis_hash) = validator_config.expected_genesis_hash { - // Sanity check that the RPC node is using the expected genesis hash before - // downloading a snapshot from it - let rpc_genesis_hash = rpc_client - .get_genesis_hash() - .map_err(|err| format!("Failed to get genesis hash: {}", err))?; - - if expected_genesis_hash != rpc_genesis_hash { - return Err(format!( - "Genesis hash mismatch: expected {} but RPC node genesis hash is {}", - expected_genesis_hash, rpc_genesis_hash - )); - } - } - - let (cluster_info, gossip_exit_flag, gossip_service) = gossip.take().unwrap(); - cluster_info.save_contact_info(); - gossip_exit_flag.store(true, Ordering::Relaxed); - gossip_service.join().unwrap(); - - let rpc_client_slot = rpc_client - .get_slot_with_commitment(CommitmentConfig::finalized()) - .map_err(|err| format!("Failed to get RPC node slot: {}", err))?; - info!("RPC node root slot: {}", rpc_client_slot); - - download_snapshots( - full_snapshot_archives_dir, - incremental_snapshot_archives_dir, + while vetted_rpc_nodes.is_empty() { + let rpc_node_details_vec = get_rpc_nodes( + &gossip.as_ref().unwrap().0, + cluster_entrypoints, validator_config, + &mut blacklisted_rpc_nodes.write().unwrap(), &bootstrap_config, - use_progress_bar, - maximum_local_snapshot_age, - start_progress, - minimal_snapshot_download_speed, - maximum_snapshot_download_abort, - &mut download_abort_count, - snapshot_hash, - &rpc_contact_info, - ) - }) - .map(|_| { - if let Some(url) = bootstrap_config.check_vote_account.as_ref() { - let rpc_client = RpcClient::new(url); - check_vote_account( - &rpc_client, - &identity_keypair.pubkey(), - vote_account, - &authorized_voter_keypairs - .read() - .unwrap() - .iter() - .map(|k| k.pubkey()) - .collect::>(), - ) - .unwrap_or_else(|err| { - // Consider failures here to be more likely due to user error (eg, - // incorrect `solana-validator` command-line arguments) rather than the - // RPC node failing. - // - // Power users can always use the `--no-check-vote-account` option to - // bypass this check entirely - error!("{}", err); - exit(1); - }); + ); + if rpc_node_details_vec.is_empty() { + return; } - }); - if result.is_ok() { - break; + vetted_rpc_nodes = rpc_node_details_vec + .into_par_iter() + .map(|rpc_node_details| { + let GetRpcNodeResult { + rpc_contact_info, + snapshot_hash, + } = rpc_node_details; + + info!( + "Using RPC service from node {}: {:?}", + rpc_contact_info.id, rpc_contact_info.rpc + ); + let rpc_client = RpcClient::new_socket_with_timeout( + rpc_contact_info.rpc, + Duration::from_secs(5), + ); + + (rpc_contact_info, snapshot_hash, rpc_client) + }) + .filter(|(rpc_contact_info, _snapshot_hash, rpc_client)| { + match rpc_client.get_version() { + Ok(rpc_version) => { + info!("RPC node version: {}", rpc_version.solana_core); + true + } + Err(err) => { + fail_rpc_node( + format!("Failed to get RPC node version: {}", err), + &validator_config.known_validators, + &rpc_contact_info.id, + &mut blacklisted_rpc_nodes.write().unwrap(), + ); + false + } + } + }) + .collect(); } - warn!("{}", result.unwrap_err()); - if let Some(ref known_validators) = validator_config.known_validators { - if known_validators.contains(&rpc_contact_info.id) { - continue; // Never blacklist a known node + let (rpc_contact_info, snapshot_hash, rpc_client) = vetted_rpc_nodes.pop().unwrap(); + + match attempt_download_genesis_and_snapshot( + &rpc_contact_info, + ledger_path, + validator_config, + &bootstrap_config, + use_progress_bar, + &mut gossip, + &rpc_client, + full_snapshot_archives_dir, + incremental_snapshot_archives_dir, + maximum_local_snapshot_age, + start_progress, + minimal_snapshot_download_speed, + maximum_snapshot_download_abort, + &mut download_abort_count, + snapshot_hash, + identity_keypair, + vote_account, + authorized_voter_keypairs.clone(), + ) { + Ok(()) => break, + Err(err) => { + fail_rpc_node( + err, + &validator_config.known_validators, + &rpc_contact_info.id, + &mut blacklisted_rpc_nodes.write().unwrap(), + ); } } - - info!( - "Excluding {} as a future RPC candidate", - rpc_contact_info.id - ); - blacklisted_rpc_nodes.insert(rpc_contact_info.id); } + if let Some((cluster_info, gossip_exit_flag, gossip_service)) = gossip.take() { cluster_info.save_contact_info(); gossip_exit_flag.store(true, Ordering::Relaxed); @@ -524,22 +604,20 @@ pub fn rpc_bootstrap( } } -/// Get an RPC peer node to download from. +/// Get RPC peer node candidates to download from. /// -/// This function finds the highest compatible snapshots from the cluster, then picks one peer -/// at random to use (return). -fn get_rpc_node( +/// This function finds the highest compatible snapshots from the cluster and returns RPC peers. +fn get_rpc_nodes( cluster_info: &ClusterInfo, cluster_entrypoints: &[ContactInfo], validator_config: &ValidatorConfig, blacklisted_rpc_nodes: &mut HashSet, bootstrap_config: &RpcBootstrapConfig, -) -> Option { +) -> Vec { let mut blacklist_timeout = Instant::now(); let mut newer_cluster_snapshot_timeout = None; let mut retry_reason = None; loop { - sleep(Duration::from_secs(1)); info!("\n{}", cluster_info.rpc_info_trace()); let rpc_peers = get_rpc_peers( @@ -556,17 +634,16 @@ fn get_rpc_node( } let rpc_peers = rpc_peers.unwrap(); blacklist_timeout = Instant::now(); - if bootstrap_config.no_snapshot_fetch { if rpc_peers.is_empty() { retry_reason = Some("No RPC peers available.".to_owned()); continue; } else { let random_peer = &rpc_peers[thread_rng().gen_range(0, rpc_peers.len())]; - return Some(GetRpcNodeResult { + return vec![GetRpcNodeResult { rpc_contact_info: random_peer.clone(), snapshot_hash: None, - }); + }]; } } @@ -576,14 +653,13 @@ fn get_rpc_node( validator_config.known_validators.as_ref(), bootstrap_config.incremental_snapshot_fetch, ); - if peer_snapshot_hashes.is_empty() { match newer_cluster_snapshot_timeout { None => newer_cluster_snapshot_timeout = Some(Instant::now()), Some(newer_cluster_snapshot_timeout) => { if newer_cluster_snapshot_timeout.elapsed().as_secs() > 180 { warn!("Giving up, did not get newer snapshots from the cluster."); - return None; + return vec![]; } } } @@ -594,10 +670,7 @@ fn get_rpc_node( .iter() .map(|peer_snapshot_hash| peer_snapshot_hash.rpc_contact_info.id) .collect::>(); - let PeerSnapshotHash { - rpc_contact_info: final_rpc_contact_info, - snapshot_hash: final_snapshot_hash, - } = get_final_peer_snapshot_hash(&peer_snapshot_hashes); + let final_snapshot_hash = peer_snapshot_hashes[0].snapshot_hash; info!( "Highest available snapshot slot is {}, available from {} node{}: {:?}", final_snapshot_hash @@ -608,11 +681,15 @@ fn get_rpc_node( if rpc_peers.len() > 1 { "s" } else { "" }, rpc_peers, ); - - return Some(GetRpcNodeResult { - rpc_contact_info: final_rpc_contact_info, - snapshot_hash: Some(final_snapshot_hash), - }); + let rpc_node_results = peer_snapshot_hashes + .iter() + .map(|peer_snapshot_hash| GetRpcNodeResult { + rpc_contact_info: peer_snapshot_hash.rpc_contact_info.clone(), + snapshot_hash: Some(peer_snapshot_hash.snapshot_hash), + }) + .take(MAX_RPC_CONNECTIONS_EVALUATED_PER_ITERATION) + .collect(); + return rpc_node_results; } } } @@ -971,29 +1048,6 @@ fn retain_peer_snapshot_hashes_with_highest_incremental_snapshot_slot( ); } -/// Get a final peer from the remaining peer snapshot hashes. At this point all the snapshot -/// hashes should (must) be the same, and only the peers are different. Pick an element from -/// the slice at random and return it. -fn get_final_peer_snapshot_hash(peer_snapshot_hashes: &[PeerSnapshotHash]) -> PeerSnapshotHash { - assert!(!peer_snapshot_hashes.is_empty()); - - // pick a final rpc peer at random - let final_peer_snapshot_hash = - &peer_snapshot_hashes[thread_rng().gen_range(0, peer_snapshot_hashes.len())]; - - // It is a programmer bug if the assert fires! By the time this function is called, the - // only remaining `incremental_snapshot_hashes` should all be the same. - assert!( - peer_snapshot_hashes.iter().all(|peer_snapshot_hash| { - peer_snapshot_hash.snapshot_hash == final_peer_snapshot_hash.snapshot_hash - }), - "To safely pick a peer at random, all the snapshot hashes must be the same" - ); - - trace!("final peer snapshot hash: {:?}", final_peer_snapshot_hash); - final_peer_snapshot_hash.clone() -} - /// Check to see if we can use our local snapshots, otherwise download newer ones. #[allow(clippy::too_many_arguments)] fn download_snapshots( diff --git a/validator/src/lib.rs b/validator/src/lib.rs index fa40f4dfb0..3289a221e5 100644 --- a/validator/src/lib.rs +++ b/validator/src/lib.rs @@ -66,15 +66,20 @@ pub fn redirect_stderr_to_file(logfile: Option) -> Option solana_logger::setup_with_default(filter); redirect_stderr(&logfile); - Some(std::thread::spawn(move || { - for signal in signals.forever() { - info!( - "received SIGUSR1 ({}), reopening log file: {:?}", - signal, logfile - ); - redirect_stderr(&logfile); - } - })) + Some( + std::thread::Builder::new() + .name("solSigUsr1".into()) + .spawn(move || { + for signal in signals.forever() { + info!( + "received SIGUSR1 ({}), reopening log file: {:?}", + signal, logfile + ); + redirect_stderr(&logfile); + } + }) + .unwrap(), + ) } #[cfg(not(unix))] { diff --git a/validator/src/main.rs b/validator/src/main.rs index c68fa1fdeb..f7c06db82b 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -76,8 +76,12 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_validator::{ - admin_rpc_service, bootstrap, dashboard::Dashboard, ledger_lockfile, lock_ledger, - new_spinner_progress_bar, println_name_value, redirect_stderr_to_file, + admin_rpc_service, + admin_rpc_service::{load_staked_nodes_overrides, StakedNodesOverrides}, + bootstrap, + dashboard::Dashboard, + ledger_lockfile, lock_ledger, new_spinner_progress_bar, println_name_value, + redirect_stderr_to_file, }, std::{ collections::{HashSet, VecDeque}, @@ -1231,7 +1235,7 @@ pub fn main() { Arg::with_name("disable_quic_servers") .long("disable-quic-servers") .takes_value(false) - .help("Disable QUIC TPU servers"), + .hidden(true) ) .arg( Arg::with_name("enable_quic_servers") @@ -1244,7 +1248,18 @@ pub fn main() { .takes_value(true) .default_value(default_tpu_connection_pool_size) .validator(is_parsable::) - .help("Controls the TPU connection pool size per remote addresss"), + .help("Controls the TPU connection pool size per remote address"), + ) + .arg( + Arg::with_name("staked_nodes_overrides") + .long("staked-nodes-overrides") + .value_name("PATH") + .takes_value(true) + .help("Provide path to a yaml file with custom overrides for stakes of specific + identities. Overriding the amount of stake this validator considers + as valid for other peers in network. The stake amount is used for calculating + number of QUIC streams permitted from the peer and vote packet sender stage. + Format of the file: `staked_map_id: {: }"), ) .arg( Arg::with_name("rocksdb_max_compaction_jitter") @@ -2017,6 +2032,19 @@ pub fn main() { ) .after_help("Note: the new filter only applies to the currently running validator instance") ) + .subcommand( + SubCommand::with_name("staked-nodes-overrides") + .about("Overrides stakes of specific node identities.") + .arg( + Arg::with_name("path") + .value_name("PATH") + .takes_value(true) + .required(true) + .help("Provide path to a file with custom overrides for stakes of specific validator identities."), + ) + .after_help("Note: the new staked nodes overrides only applies to the \ + currently running validator instance") + ) .subcommand( SubCommand::with_name("wait-for-restart-window") .about("Monitor the validator for a good time to restart") @@ -2201,6 +2229,30 @@ pub fn main() { monitor_validator(&ledger_path); return; } + ("staked-nodes-overrides", Some(subcommand_matches)) => { + if !subcommand_matches.is_present("path") { + println!( + "staked-nodes-overrides requires argument of location of the configuration" + ); + exit(1); + } + + let path = subcommand_matches.value_of("path").unwrap(); + + let admin_client = admin_rpc_service::connect(&ledger_path); + admin_rpc_service::runtime() + .block_on(async move { + admin_client + .await? + .set_staked_nodes_overrides(path.to_string()) + .await + }) + .unwrap_or_else(|err| { + println!("setStakedNodesOverrides request failed: {}", err); + exit(1); + }); + return; + } ("set-identity", Some(subcommand_matches)) => { let require_tower = subcommand_matches.is_present("require_tower"); @@ -2331,6 +2383,24 @@ pub fn main() { }); let authorized_voter_keypairs = Arc::new(RwLock::new(authorized_voter_keypairs)); + let staked_nodes_overrides_path = matches + .value_of("staked_nodes_overrides") + .map(str::to_string); + let staked_nodes_overrides = Arc::new(RwLock::new( + match staked_nodes_overrides_path { + None => StakedNodesOverrides::default(), + Some(p) => load_staked_nodes_overrides(&p).unwrap_or_else(|err| { + error!("Failed to load stake-nodes-overrides from {}: {}", &p, err); + clap::Error::with_description( + "Failed to load configuration of stake-nodes-overrides argument", + clap::ErrorKind::InvalidValue, + ) + .exit() + }), + } + .staked_map_id, + )); + let init_complete_file = matches.value_of("init_complete_file"); if matches.is_present("no_check_vote_account") { @@ -2418,7 +2488,6 @@ pub fn main() { let accounts_shrink_optimize_total_space = value_t_or_exit!(matches, "accounts_shrink_optimize_total_space", bool); let tpu_use_quic = !matches.is_present("tpu_disable_quic"); - let enable_quic_servers = !matches.is_present("disable_quic_servers"); let tpu_connection_pool_size = value_t_or_exit!(matches, "tpu_connection_pool_size", usize); let shrink_ratio = value_t_or_exit!(matches, "accounts_shrink_ratio", f64); @@ -2588,6 +2657,9 @@ pub fn main() { if matches.is_present("enable_quic_servers") { warn!("--enable-quic-servers is now the default behavior. This flag is deprecated and can be removed from the launch args"); } + if matches.is_present("disable_quic_servers") { + warn!("--disable-quic-servers is deprecated. The quic server cannot be disabled."); + } let rpc_bigtable_config = if matches.is_present("enable_rpc_bigtable_ledger_storage") || matches.is_present("enable_bigtable_ledger_upload") @@ -2800,12 +2872,12 @@ pub fn main() { log_messages_bytes_limit: value_of(&matches, "log_messages_bytes_limit"), ..RuntimeConfig::default() }, - enable_quic_servers, maybe_relayer_config, tip_manager_config, shred_receiver_address: matches .value_of("shred_receiver_address") .map(|address| SocketAddr::from_str(address).expect("shred_receiver_address invalid")), + staked_nodes_overrides: staked_nodes_overrides.clone(), ..ValidatorConfig::default() }; @@ -3076,6 +3148,7 @@ pub fn main() { authorized_voter_keypairs: authorized_voter_keypairs.clone(), post_init: admin_service_post_init.clone(), tower_storage: validator_config.tower_storage.clone(), + staked_nodes_overrides, }, ); @@ -3230,7 +3303,11 @@ pub fn main() { socket_addr_space, tpu_use_quic, tpu_connection_pool_size, - ); + ) + .unwrap_or_else(|e| { + error!("Failed to start validator: {:?}", e); + exit(1); + }); *admin_service_post_init.write().unwrap() = Some(admin_rpc_service::AdminRpcRequestMetadataPostInit { bank_forks: validator.bank_forks.clone(), diff --git a/version/Cargo.toml b/version/Cargo.toml index 9d43684e7d..5045f303c9 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -11,8 +11,8 @@ edition = "2021" [dependencies] log = "0.4.17" -semver = "1.0.10" -serde = "1.0.138" +semver = "1.0.13" +serde = "1.0.143" serde_derive = "1.0.103" solana-frozen-abi = { path = "../frozen-abi", version = "=1.12.0" } solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.12.0" } diff --git a/web3.js/package-lock.json b/web3.js/package-lock.json index e255c9b123..b57e1a2ae4 100644 --- a/web3.js/package-lock.json +++ b/web3.js/package-lock.json @@ -124,39 +124,39 @@ "dev": true }, "node_modules/@babel/code-frame": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.7.tgz", - "integrity": "sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", + "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", "dependencies": { - "@babel/highlight": "^7.16.7" + "@babel/highlight": "^7.18.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/compat-data": { - "version": "7.17.10", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.17.10.tgz", - "integrity": "sha512-GZt/TCsG70Ms19gfZO1tM4CVnXsPgEPBCpJu+Qz3L0LUDsY5nZqFZglIoPC1kIYOtNBZlrnFT+klg12vFGZXrw==", + "version": "7.18.8", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.18.8.tgz", + "integrity": "sha512-HSmX4WZPPK3FUxYp7g2T6EyO8j96HlZJlxmKPSh6KAcqwyDrfx7hKjXpAW/0FhFfTJsR0Yt4lAjLI2coMptIHQ==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.18.0.tgz", - "integrity": "sha512-Xyw74OlJwDijToNi0+6BBI5mLLR5+5R3bcSH80LXzjzEGEUlvNzujEE71BaD/ApEZHAvFI/Mlmp4M5lIkdeeWw==", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.18.13.tgz", + "integrity": "sha512-ZisbOvRRusFktksHSG6pjj1CSvkPkcZq/KHD45LAkVP/oiHJkNBZWfpvlLmX8OtHDG8IuzsFlVRWo08w7Qxn0A==", "dependencies": { "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.16.7", - "@babel/generator": "^7.18.0", - "@babel/helper-compilation-targets": "^7.17.10", - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helpers": "^7.18.0", - "@babel/parser": "^7.18.0", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.18.0", - "@babel/types": "^7.18.0", + "@babel/code-frame": "^7.18.6", + "@babel/generator": "^7.18.13", + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-module-transforms": "^7.18.9", + "@babel/helpers": "^7.18.9", + "@babel/parser": "^7.18.13", + "@babel/template": "^7.18.10", + "@babel/traverse": "^7.18.13", + "@babel/types": "^7.18.13", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -172,12 +172,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.18.0.tgz", - "integrity": "sha512-81YO9gGx6voPXlvYdZBliFXAZU8vZ9AZ6z+CjlmcnaeOcYSFbMTpdeDUO9xD9dh/68Vq03I8ZspfUTPfitcDHg==", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.18.13.tgz", + "integrity": "sha512-CkPg8ySSPuHTYPJYo7IRALdqyjM9HCbt/3uOBEFbzyGVP6Mn8bwFPB0jX6982JVNBlYzM1nnPkfjuXSOPtQeEQ==", "dependencies": { - "@babel/types": "^7.18.0", - "@jridgewell/gen-mapping": "^0.3.0", + "@babel/types": "^7.18.13", + "@jridgewell/gen-mapping": "^0.3.2", "jsesc": "^2.5.1" }, "engines": { @@ -196,24 +196,24 @@ } }, "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.16.7.tgz", - "integrity": "sha512-C6FdbRaxYjwVu/geKW4ZeQ0Q31AftgRcdSnZ5/jsH6BzCJbtvXvhpfkbkThYSuutZA7nCXpPR6AD9zd1dprMkA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz", + "integrity": "sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw==", "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.16.7", - "@babel/types": "^7.16.7" + "@babel/helper-explode-assignable-expression": "^7.18.6", + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.17.10", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.17.10.tgz", - "integrity": "sha512-gh3RxjWbauw/dFiU/7whjd0qN9K6nPJMqe6+Er7rOavFh0CQUSwhAE3IcTho2rywPJFxej6TUUHDkWcYI6gGqQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.18.9.tgz", + "integrity": "sha512-tzLCyVmqUiFlcFoAPLA/gL9TeYrF61VLNtb+hvkuVaB5SUjW7jcfrglBIX1vUIoT7CLP3bBlIMeyEsIl2eFQNg==", "dependencies": { - "@babel/compat-data": "^7.17.10", - "@babel/helper-validator-option": "^7.16.7", + "@babel/compat-data": "^7.18.8", + "@babel/helper-validator-option": "^7.18.6", "browserslist": "^4.20.2", "semver": "^6.3.0" }, @@ -225,17 +225,17 @@ } }, "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.18.0.tgz", - "integrity": "sha512-Kh8zTGR9de3J63e5nS0rQUdRs/kbtwoeQQ0sriS0lItjC96u8XXZN6lKpuyWd2coKSU13py/y+LTmThLuVX0Pg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.18.9.tgz", + "integrity": "sha512-WvypNAYaVh23QcjpMR24CwZY2Nz6hqdOcFdPbNpV56hL5H6KiFheO7Xm1aPdlLQ7d5emYZX7VZwPp9x3z+2opw==", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.17.9", - "@babel/helper-member-expression-to-functions": "^7.17.7", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/helper-replace-supers": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7" + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-member-expression-to-functions": "^7.18.9", + "@babel/helper-optimise-call-expression": "^7.18.6", + "@babel/helper-replace-supers": "^7.18.9", + "@babel/helper-split-export-declaration": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -245,12 +245,12 @@ } }, "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.17.12.tgz", - "integrity": "sha512-b2aZrV4zvutr9AIa6/gA3wsZKRwTKYoDxYiFKcESS3Ug2GTXzwBEvMuuFLhCQpEnRXs1zng4ISAXSUxxKBIcxw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.18.6.tgz", + "integrity": "sha512-7LcpH1wnQLGrI+4v+nPp+zUvIkF9x0ddv1Hkdue10tg3gmRnLy97DXh4STiOf1qeIInyD69Qv5kKSZzKD8B/7A==", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "regexpu-core": "^5.0.1" + "@babel/helper-annotate-as-pure": "^7.18.6", + "regexpu-core": "^5.1.0" }, "engines": { "node": ">=6.9.0" @@ -260,14 +260,12 @@ } }, "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.1.tgz", - "integrity": "sha512-J9hGMpJQmtWmj46B3kBHmL38UhJGhYX7eqkcq+2gsstyYt341HmPeWspihX43yVRA0mS+8GGk2Gckc7bY/HCmA==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.2.tgz", + "integrity": "sha512-r9QJJ+uDWrd+94BSPcP6/de67ygLtvVy6cK4luE6MOuDsZIdoaPBnfSpbO/+LTifjPckbKXRuI9BB/Z2/y3iTg==", "dependencies": { - "@babel/helper-compilation-targets": "^7.13.0", - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/traverse": "^7.13.0", + "@babel/helper-compilation-targets": "^7.17.7", + "@babel/helper-plugin-utils": "^7.16.7", "debug": "^4.1.1", "lodash.debounce": "^4.0.8", "resolve": "^1.14.2", @@ -278,56 +276,53 @@ } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.16.7.tgz", - "integrity": "sha512-SLLb0AAn6PkUeAfKJCCOl9e1R53pQlGAfc4y4XuMRZfqeMYLE0dM1LMhqbGAlGQY0lfw5/ohoYWAe9V1yibRag==", - "dependencies": { - "@babel/types": "^7.16.7" - }, + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", + "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.16.7.tgz", - "integrity": "sha512-KyUenhWMC8VrxzkGP0Jizjo4/Zx+1nNZhgocs+gLzyZyB8SHidhoq9KK/8Ato4anhwsivfkBLftky7gvzbZMtQ==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz", + "integrity": "sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg==", "dependencies": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-function-name": { - "version": "7.17.9", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.17.9.tgz", - "integrity": "sha512-7cRisGlVtiVqZ0MW0/yFB4atgpGLWEHUVYnb448hZK4x+vih0YO5UoS11XIYtZYqHd0dIPMdUSv8q5K4LdMnIg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.18.9.tgz", + "integrity": "sha512-fJgWlZt7nxGksJS9a0XdSaI4XvpExnNIgRP+rVefWh5U7BL8pPuir6SJUmFKRfjWQ51OtWSzwOxhaH/EBWWc0A==", "dependencies": { - "@babel/template": "^7.16.7", - "@babel/types": "^7.17.0" + "@babel/template": "^7.18.6", + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.16.7.tgz", - "integrity": "sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", + "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", "dependencies": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.17.7.tgz", - "integrity": "sha512-thxXgnQ8qQ11W2wVUObIqDL4p148VMxkt5T/qpN5k2fboRyzFGFmKsTGViquyM5QHKUy48OZoca8kw4ajaDPyw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.18.9.tgz", + "integrity": "sha512-RxifAh2ZoVU67PyKIO4AMi1wTenGfMR/O/ae0CCRqwgBAt5v7xjdtRw7UoSbsreKrQn5t7r89eruK/9JjYHuDg==", "dependencies": { - "@babel/types": "^7.17.0" + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -345,29 +340,29 @@ } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.18.0.tgz", - "integrity": "sha512-kclUYSUBIjlvnzN2++K9f2qzYKFgjmnmjwL4zlmU5f8ZtzgWe8s0rUPSTGy2HmK4P8T52MQsS+HTQAgZd3dMEA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.18.9.tgz", + "integrity": "sha512-KYNqY0ICwfv19b31XzvmI/mfcylOzbLtowkw+mfvGPAQ3kfCnMLYbED3YecL5tPd8nAYFQFAd6JHp2LxZk/J1g==", "dependencies": { - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-module-imports": "^7.16.7", - "@babel/helper-simple-access": "^7.17.7", - "@babel/helper-split-export-declaration": "^7.16.7", - "@babel/helper-validator-identifier": "^7.16.7", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.18.0", - "@babel/types": "^7.18.0" + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-module-imports": "^7.18.6", + "@babel/helper-simple-access": "^7.18.6", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/helper-validator-identifier": "^7.18.6", + "@babel/template": "^7.18.6", + "@babel/traverse": "^7.18.9", + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.16.7.tgz", - "integrity": "sha512-EtgBhg7rd/JcnpZFXpBy0ze1YRfdm7BnBX4uKMBd3ixa3RGAE002JZB66FJyNH7g0F38U05pXmA5P8cBh7z+1w==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz", + "integrity": "sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA==", "dependencies": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -382,61 +377,65 @@ } }, "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.16.8.tgz", - "integrity": "sha512-fm0gH7Flb8H51LqJHy3HJ3wnE1+qtYR2A99K06ahwrawLdOFsCEWjZOrYricXJHoPSudNKxrMBUPEIPxiIIvBw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz", + "integrity": "sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA==", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-wrap-function": "^7.16.8", - "@babel/types": "^7.16.8" + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-wrap-function": "^7.18.9", + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-replace-supers": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.16.7.tgz", - "integrity": "sha512-y9vsWilTNaVnVh6xiJfABzsNpgDPKev9HnAgz6Gb1p6UUwf9NepdlsV7VXGCftJM+jqD5f7JIEubcpLjZj5dBw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.18.9.tgz", + "integrity": "sha512-dNsWibVI4lNT6HiuOIBr1oyxo40HvIVmbwPUm3XZ7wMh4k2WxrxTqZwSqw/eEmXDS9np0ey5M2bz9tBmO9c+YQ==", "dependencies": { - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-member-expression-to-functions": "^7.16.7", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/traverse": "^7.16.7", - "@babel/types": "^7.16.7" + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-member-expression-to-functions": "^7.18.9", + "@babel/helper-optimise-call-expression": "^7.18.6", + "@babel/traverse": "^7.18.9", + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-simple-access": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.17.7.tgz", - "integrity": "sha512-txyMCGroZ96i+Pxr3Je3lzEJjqwaRC9buMUgtomcrLe5Nd0+fk1h0LLA+ixUF5OW7AhHuQ7Es1WcQJZmZsz2XA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz", + "integrity": "sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g==", "dependencies": { - "@babel/types": "^7.17.0" + "@babel/types": "^7.18.6" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.16.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.16.0.tgz", - "integrity": "sha512-+il1gTy0oHwUsBQZyJvukbB4vPMdcYBrFHa0Uc4AizLxbq6BOYC51Rv4tWocX9BLBDLZ4kc6qUFpQ6HRgL+3zw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.18.9.tgz", + "integrity": "sha512-imytd2gHi3cJPsybLRbmFrF7u5BIEuI2cNheyKi3/iOBC63kNn3q8Crn2xVuESli0aM4KYsyEqKyS7lFL8YVtw==", "dependencies": { - "@babel/types": "^7.16.0" + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.16.7.tgz", - "integrity": "sha512-xbWoy/PFoxSWazIToT9Sif+jJTlrMcndIsaOKvTA6u7QEo7ilkRZpjew18/W3c7nm8fXdUDXh02VXTbZ0pGDNw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", + "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", "dependencies": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -467,38 +466,38 @@ } }, "node_modules/@babel/helper-wrap-function": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.16.8.tgz", - "integrity": "sha512-8RpyRVIAW1RcDDGTA+GpPAwV22wXCfKOoM9bet6TLkGIFTkRQSkH1nMQ5Yet4MpoXe1ZwHPVtNasc2w0uZMqnw==", + "version": "7.18.11", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.18.11.tgz", + "integrity": "sha512-oBUlbv+rjZLh2Ks9SKi4aL7eKaAXBWleHzU89mP0G6BMUlRxSckk9tSIkgDGydhgFxHuGSlBQZfnaD47oBEB7w==", "dependencies": { - "@babel/helper-function-name": "^7.16.7", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.16.8", - "@babel/types": "^7.16.8" + "@babel/helper-function-name": "^7.18.9", + "@babel/template": "^7.18.10", + "@babel/traverse": "^7.18.11", + "@babel/types": "^7.18.10" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.18.0.tgz", - "integrity": "sha512-AE+HMYhmlMIbho9nbvicHyxFwhrO+xhKB6AhRxzl8w46Yj0VXTZjEsAoBVC7rB2I0jzX+yWyVybnO08qkfx6kg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.18.9.tgz", + "integrity": "sha512-Jf5a+rbrLoR4eNdUmnFu8cN5eNJT6qdTdOg5IHIzq87WwyRw9PwguLFOWYgktN/60IP4fgDUawJvs7PjQIzELQ==", "dependencies": { - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.18.0", - "@babel/types": "^7.18.0" + "@babel/template": "^7.18.6", + "@babel/traverse": "^7.18.9", + "@babel/types": "^7.18.9" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.16.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.10.tgz", - "integrity": "sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", + "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", "dependencies": { - "@babel/helper-validator-identifier": "^7.16.7", + "@babel/helper-validator-identifier": "^7.18.6", "chalk": "^2.0.0", "js-tokens": "^4.0.0" }, @@ -507,9 +506,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.18.0.tgz", - "integrity": "sha512-AqDccGC+m5O/iUStSJy3DGRIUFu7WbY/CppZYwrEUB4N0tZlnI8CSTsgL7v5fHVFmUbRv2sd+yy27o8Ydt4MGg==", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.18.13.tgz", + "integrity": "sha512-dgXcIfMuQ0kgzLB2b9tRZs7TTFFaGM2AbtA4fJgUUYukzGH4jwsS7hzQHEGs67jdehpm22vkgKwvbU+aEflgwg==", "bin": { "parser": "bin/babel-parser.js" }, @@ -518,11 +517,11 @@ } }, "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.17.12.tgz", - "integrity": "sha512-xCJQXl4EeQ3J9C4yOmpTrtVGmzpm2iSzyxbkZHw7UCnZBftHpF/hpII80uWVyVrc40ytIClHjgWGTG1g/yB+aw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz", + "integrity": "sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -532,13 +531,13 @@ } }, "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.17.12.tgz", - "integrity": "sha512-/vt0hpIw0x4b6BLKUkwlvEoiGZYYLNZ96CzyHYPbtG2jZGz6LBe7/V+drYrc/d+ovrF9NBi0pmtvmNb/FsWtRQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.18.9.tgz", + "integrity": "sha512-AHrP9jadvH7qlOj6PINbgSuphjQUAK7AOT7DPjBo9EHoLhQTnnK5u45e1Hd4DbSQEO9nqPWtQ89r+XEOWFScKg==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", - "@babel/plugin-proposal-optional-chaining": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", + "@babel/plugin-proposal-optional-chaining": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -548,12 +547,13 @@ } }, "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.17.12.tgz", - "integrity": "sha512-RWVvqD1ooLKP6IqWTA5GyFVX2isGEgC5iFxKzfYOIy/QEFdxYyCybBDtIGjipHpb9bDWHzcqGqFakf+mVmBTdQ==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.18.10.tgz", + "integrity": "sha512-1mFuY2TOsR1hxbjCo4QL+qlIjV07p4H4EUYw2J/WCqsvFV6V9X9z9YhXbWndc/4fw+hYGlDT7egYxliMp5O6Ew==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-remap-async-to-generator": "^7.16.8", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-remap-async-to-generator": "^7.18.9", "@babel/plugin-syntax-async-generators": "^7.8.4" }, "engines": { @@ -564,12 +564,12 @@ } }, "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.17.12.tgz", - "integrity": "sha512-U0mI9q8pW5Q9EaTHFPwSVusPMV/DV9Mm8p7csqROFLtIE9rBF5piLqyrBGigftALrBcsBGu4m38JneAe7ZDLXw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", + "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -579,12 +579,12 @@ } }, "node_modules/@babel/plugin-proposal-class-static-block": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.0.tgz", - "integrity": "sha512-t+8LsRMMDE74c6sV7KShIw13sqbqd58tlqNrsWoWBTIMw7SVQ0cZ905wLNS/FBCy/3PyooRHLFFlfrUNyyz5lA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.6.tgz", + "integrity": "sha512-+I3oIiNxrCpup3Gi8n5IGMwj0gOCAjcJUSQEcotNnCCPMEnixawOQ+KeJPlgfjzx+FKQ1QSyZOWe7wmoJp7vhw==", "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-class-static-block": "^7.14.5" }, "engines": { @@ -595,11 +595,11 @@ } }, "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.16.7.tgz", - "integrity": "sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz", + "integrity": "sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-dynamic-import": "^7.8.3" }, "engines": { @@ -626,11 +626,11 @@ } }, "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.17.12.tgz", - "integrity": "sha512-j7Ye5EWdwoXOpRmo5QmRyHPsDIe6+u70ZYZrd7uz+ebPYFKfRcLcNu3Ro0vOlJ5zuv8rU7xa+GttNiRzX56snQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz", + "integrity": "sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-export-namespace-from": "^7.8.3" }, "engines": { @@ -641,11 +641,11 @@ } }, "node_modules/@babel/plugin-proposal-json-strings": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.17.12.tgz", - "integrity": "sha512-rKJ+rKBoXwLnIn7n6o6fulViHMrOThz99ybH+hKHcOZbnN14VuMnH9fo2eHE69C8pO4uX1Q7t2HYYIDmv8VYkg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz", + "integrity": "sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-json-strings": "^7.8.3" }, "engines": { @@ -656,11 +656,11 @@ } }, "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.17.12.tgz", - "integrity": "sha512-EqFo2s1Z5yy+JeJu7SFfbIUtToJTVlC61/C7WLKDntSw4Sz6JNAIfL7zQ74VvirxpjB5kz/kIx0gCcb+5OEo2Q==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.18.9.tgz", + "integrity": "sha512-128YbMpjCrP35IOExw2Fq+x55LMP42DzhOhX2aNNIdI9avSWl2PI0yuBWarr3RYpZBSPtabfadkH2yeRiMD61Q==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" }, "engines": { @@ -671,11 +671,11 @@ } }, "node_modules/@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.17.12.tgz", - "integrity": "sha512-ws/g3FSGVzv+VH86+QvgtuJL/kR67xaEIF2x0iPqdDfYW6ra6JF3lKVBkWynRLcNtIC1oCTfDRVxmm2mKzy+ag==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", + "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" }, "engines": { @@ -686,11 +686,11 @@ } }, "node_modules/@babel/plugin-proposal-numeric-separator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.16.7.tgz", - "integrity": "sha512-vQgPMknOIgiuVqbokToyXbkY/OmmjAzr/0lhSIbG/KmnzXPGwW/AdhdKpi+O4X/VkWiWjnkKOBiqJrTaC98VKw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz", + "integrity": "sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-numeric-separator": "^7.10.4" }, "engines": { @@ -701,15 +701,15 @@ } }, "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.18.0.tgz", - "integrity": "sha512-nbTv371eTrFabDfHLElkn9oyf9VG+VKK6WMzhY2o4eHKaG19BToD9947zzGMO6I/Irstx9d8CwX6njPNIAR/yw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.18.9.tgz", + "integrity": "sha512-kDDHQ5rflIeY5xl69CEqGEZ0KY369ehsCIEbTGb4siHG5BE9sga/T0r0OUwyZNLMmZE79E1kbsqAjwFCW4ds6Q==", "dependencies": { - "@babel/compat-data": "^7.17.10", - "@babel/helper-compilation-targets": "^7.17.10", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/compat-data": "^7.18.8", + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.17.12" + "@babel/plugin-transform-parameters": "^7.18.8" }, "engines": { "node": ">=6.9.0" @@ -719,11 +719,11 @@ } }, "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.16.7.tgz", - "integrity": "sha512-eMOH/L4OvWSZAE1VkHbr1vckLG1WUcHGJSLqqQwl2GaUqG6QjddvrOaTUMNYiv77H5IKPMZ9U9P7EaHwvAShfA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz", + "integrity": "sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" }, "engines": { @@ -734,12 +734,12 @@ } }, "node_modules/@babel/plugin-proposal-optional-chaining": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.17.12.tgz", - "integrity": "sha512-7wigcOs/Z4YWlK7xxjkvaIw84vGhDv/P1dFGQap0nHkc8gFKY/r+hXc8Qzf5k1gY7CvGIcHqAnOagVKJJ1wVOQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.18.9.tgz", + "integrity": "sha512-v5nwt4IqBXihxGsW2QmCWMDS3B3bzGIk/EQVZz2ei7f3NJl8NzAJVvUmpDW5q1CRNY+Beb/k58UAH1Km1N411w==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", "@babel/plugin-syntax-optional-chaining": "^7.8.3" }, "engines": { @@ -750,12 +750,12 @@ } }, "node_modules/@babel/plugin-proposal-private-methods": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.17.12.tgz", - "integrity": "sha512-SllXoxo19HmxhDWm3luPz+cPhtoTSKLJE9PXshsfrOzBqs60QP0r8OaJItrPhAj0d7mZMnNF0Y1UUggCDgMz1A==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz", + "integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==", "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -765,13 +765,13 @@ } }, "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.17.12.tgz", - "integrity": "sha512-/6BtVi57CJfrtDNKfK5b66ydK2J5pXUKBKSPD2G1whamMuEnZWgoOIfO8Vf9F/DoD4izBLD/Au4NMQfruzzykg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.18.6.tgz", + "integrity": "sha512-9Rysx7FOctvT5ouj5JODjAFAkgGoudQuLPamZb0v1TGLpapdNaftzifU8NTWQm0IRjqoYypdrSmyWgkocDQ8Dw==", "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-create-class-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-private-property-in-object": "^7.14.5" }, "engines": { @@ -782,12 +782,12 @@ } }, "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.17.12.tgz", - "integrity": "sha512-Wb9qLjXf3ZazqXA7IvI7ozqRIXIGPtSo+L5coFmEkhTQK18ao4UDDD0zdTGAarmbLj2urpRwrc6893cu5Bfh0A==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", + "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=4" @@ -885,11 +885,11 @@ } }, "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.17.12.tgz", - "integrity": "sha512-n/loy2zkq9ZEM8tEOwON9wTQSTNDTDEz6NujPtJGLU7qObzT1N4c4YZZf8E6ATB2AjNQg/Ib2AIpO03EZaCehw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.18.6.tgz", + "integrity": "sha512-/DU3RXad9+bZwrgWJQKbr39gYbJpLJHezqEzRzi/BHRlJ9zsQb4CK2CA/5apllXNomwA1qHwzvHl+AdEmC5krQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1033,11 +1033,11 @@ } }, "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.17.12.tgz", - "integrity": "sha512-PHln3CNi/49V+mza4xMwrg+WGYevSF1oaiXaC2EQfdp4HWlSjRsrDXWJiQBKpP7749u6vQ9mcry2uuFOv5CXvA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.18.6.tgz", + "integrity": "sha512-9S9X9RUefzrsHZmKMbDXxweEH+YlE8JJEuat9FdvW9Qh1cw7W64jELCtWNkPBPX5En45uy28KGvA/AySqUh8CQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1047,13 +1047,13 @@ } }, "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.17.12.tgz", - "integrity": "sha512-J8dbrWIOO3orDzir57NRsjg4uxucvhby0L/KZuGsWDj0g7twWK3g7JhJhOrXtuXiw8MeiSdJ3E0OW9H8LYEzLQ==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.18.6.tgz", + "integrity": "sha512-ARE5wZLKnTgPW7/1ftQmSi1CmkqqHo2DNmtztFhvgtOWSDfq0Cq9/9L+KnZNYSNrydBekhW3rwShduf59RoXag==", "dependencies": { - "@babel/helper-module-imports": "^7.16.7", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-remap-async-to-generator": "^7.16.8" + "@babel/helper-module-imports": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/helper-remap-async-to-generator": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1063,11 +1063,11 @@ } }, "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.16.7.tgz", - "integrity": "sha512-JUuzlzmF40Z9cXyytcbZEZKckgrQzChbQJw/5PuEHYeqzCsvebDx0K0jWnIIVcmmDOAVctCgnYs0pMcrYj2zJg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz", + "integrity": "sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1077,11 +1077,11 @@ } }, "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.17.12.tgz", - "integrity": "sha512-jw8XW/B1i7Lqwqj2CbrViPcZijSxfguBWZP2aN59NHgxUyO/OcO1mfdCxH13QhN5LbWhPkX+f+brKGhZTiqtZQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.18.9.tgz", + "integrity": "sha512-5sDIJRV1KtQVEbt/EIBwGy4T01uYIo4KRB3VUqzkhrAIOGx7AoctL9+Ux88btY0zXdDyPJ9mW+bg+v+XEkGmtw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1091,17 +1091,17 @@ } }, "node_modules/@babel/plugin-transform-classes": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.17.12.tgz", - "integrity": "sha512-cvO7lc7pZat6BsvH6l/EGaI8zpl8paICaoGk+7x7guvtfak/TbIf66nYmJOH13EuG0H+Xx3M+9LQDtSvZFKXKw==", - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.17.9", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-replace-supers": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.18.9.tgz", + "integrity": "sha512-EkRQxsxoytpTlKJmSPYrsOMjCILacAjtSVkd4gChEe2kXjFCun3yohhW5I7plXJhCemM0gKsaGMcO8tinvCA5g==", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-optimise-call-expression": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-replace-supers": "^7.18.9", + "@babel/helper-split-export-declaration": "^7.18.6", "globals": "^11.1.0" }, "engines": { @@ -1112,11 +1112,11 @@ } }, "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.17.12.tgz", - "integrity": "sha512-a7XINeplB5cQUWMg1E/GI1tFz3LfK021IjV1rj1ypE+R7jHm+pIHmHl25VNkZxtx9uuYp7ThGk8fur1HHG7PgQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.18.9.tgz", + "integrity": "sha512-+i0ZU1bCDymKakLxn5srGHrsAPRELC2WIbzwjLhHW9SIE1cPYkLCL0NlnXMZaM1vhfgA2+M7hySk42VBvrkBRw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1126,11 +1126,11 @@ } }, "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.18.0.tgz", - "integrity": "sha512-Mo69klS79z6KEfrLg/1WkmVnB8javh75HX4pi2btjvlIoasuxilEyjtsQW6XPrubNd7AQy0MMaNIaQE4e7+PQw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.18.9.tgz", + "integrity": "sha512-p5VCYNddPLkZTq4XymQIaIfZNJwT9YsjkPOhkVEqt6QIpQFZVM9IltqqYpOEkJoN1DPznmxUDyZ5CTZs/ZCuHA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1140,12 +1140,12 @@ } }, "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.16.7.tgz", - "integrity": "sha512-Lyttaao2SjZF6Pf4vk1dVKv8YypMpomAbygW+mU5cYP3S5cWTfCJjG8xV6CFdzGFlfWK81IjL9viiTvpb6G7gQ==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz", + "integrity": "sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg==", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1155,11 +1155,11 @@ } }, "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.17.12.tgz", - "integrity": "sha512-EA5eYFUG6xeerdabina/xIoB95jJ17mAkR8ivx6ZSu9frKShBjpOGZPn511MTDTkiCO+zXnzNczvUM69YSf3Zw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz", + "integrity": "sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1169,12 +1169,12 @@ } }, "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.16.7.tgz", - "integrity": "sha512-8UYLSlyLgRixQvlYH3J2ekXFHDFLQutdy7FfFAMm3CPZ6q9wHCwnUyiXpQCe3gVVnQlHc5nsuiEVziteRNTXEA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz", + "integrity": "sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw==", "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1200,11 +1200,11 @@ } }, "node_modules/@babel/plugin-transform-for-of": { - "version": "7.18.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.1.tgz", - "integrity": "sha512-+TTB5XwvJ5hZbO8xvl2H4XaMDOAK57zF4miuC9qQJgysPNEAZZ9Z69rdF5LJkozGdZrjBIUAIyKUWRMmebI7vg==", + "version": "7.18.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.8.tgz", + "integrity": "sha512-yEfTRnjuskWYo0k1mHUqrVWaZwrdq8AYbfrpqULOJOaucGSp4mNMVps+YtA8byoevxS/urwU75vyhQIxcCgiBQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1214,13 +1214,13 @@ } }, "node_modules/@babel/plugin-transform-function-name": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.16.7.tgz", - "integrity": "sha512-SU/C68YVwTRxqWj5kgsbKINakGag0KTgq9f2iZEXdStoAbOzLHEBRYzImmA6yFo8YZhJVflvXmIHUO7GWHmxxA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz", + "integrity": "sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ==", "dependencies": { - "@babel/helper-compilation-targets": "^7.16.7", - "@babel/helper-function-name": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1230,11 +1230,11 @@ } }, "node_modules/@babel/plugin-transform-literals": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.17.12.tgz", - "integrity": "sha512-8iRkvaTjJciWycPIZ9k9duu663FT7VrBdNqNgxnVXEFwOIp55JWcZd23VBRySYbnS3PwQ3rGiabJBBBGj5APmQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz", + "integrity": "sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1244,11 +1244,11 @@ } }, "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.7.tgz", - "integrity": "sha512-mBruRMbktKQwbxaJof32LT9KLy2f3gH+27a5XSuXo6h7R3vqltl0PgZ80C8ZMKw98Bf8bqt6BEVi3svOh2PzMw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz", + "integrity": "sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1258,12 +1258,12 @@ } }, "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.18.0.tgz", - "integrity": "sha512-h8FjOlYmdZwl7Xm2Ug4iX2j7Qy63NANI+NQVWQzv6r25fqgg7k2dZl03p95kvqNclglHs4FZ+isv4p1uXMA+QA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.18.6.tgz", + "integrity": "sha512-Pra5aXsmTsOnjM3IajS8rTaLCy++nGM4v3YR4esk5PCsyg9z8NA5oQLwxzMUtDBd8F+UmVza3VxoAaWCbzH1rg==", "dependencies": { - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-module-transforms": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" }, "engines": { @@ -1274,13 +1274,13 @@ } }, "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.0.tgz", - "integrity": "sha512-cCeR0VZWtfxWS4YueAK2qtHtBPJRSaJcMlbS8jhSIm/A3E2Kpro4W1Dn4cqJtp59dtWfXjQwK7SPKF8ghs7rlw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.6.tgz", + "integrity": "sha512-Qfv2ZOWikpvmedXQJDSbxNqy7Xr/j2Y8/KfijM0iJyKkBTmWuvCA1yeH1yDM7NJhBW/2aXxeucLj6i80/LAJ/Q==", "dependencies": { - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-simple-access": "^7.17.7", + "@babel/helper-module-transforms": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/helper-simple-access": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" }, "engines": { @@ -1291,14 +1291,14 @@ } }, "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.18.0.tgz", - "integrity": "sha512-vwKpxdHnlM5tIrRt/eA0bzfbi7gUBLN08vLu38np1nZevlPySRe6yvuATJB5F/WPJ+ur4OXwpVYq9+BsxqAQuQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.18.9.tgz", + "integrity": "sha512-zY/VSIbbqtoRoJKo2cDTewL364jSlZGvn0LKOf9ntbfxOvjfmyrdtEEOAdswOswhZEb8UH3jDkCKHd1sPgsS0A==", "dependencies": { - "@babel/helper-hoist-variables": "^7.16.7", - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-validator-identifier": "^7.16.7", + "@babel/helper-hoist-variables": "^7.18.6", + "@babel/helper-module-transforms": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-validator-identifier": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" }, "engines": { @@ -1309,12 +1309,12 @@ } }, "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.0.tgz", - "integrity": "sha512-d/zZ8I3BWli1tmROLxXLc9A6YXvGK8egMxHp+E/rRwMh1Kip0AP77VwZae3snEJ33iiWwvNv2+UIIhfalqhzZA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz", + "integrity": "sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ==", "dependencies": { - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-module-transforms": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1324,12 +1324,12 @@ } }, "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.17.12.tgz", - "integrity": "sha512-vWoWFM5CKaTeHrdUJ/3SIOTRV+MBVGybOC9mhJkaprGNt5demMymDW24yC74avb915/mIRe3TgNb/d8idvnCRA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.18.6.tgz", + "integrity": "sha512-UmEOGF8XgaIqD74bC8g7iV3RYj8lMf0Bw7NJzvnS9qQhM4mg+1WHKotUIdjxgD2RGrgFLZZPCFPFj3P/kVDYhg==", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1339,11 +1339,11 @@ } }, "node_modules/@babel/plugin-transform-new-target": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.17.12.tgz", - "integrity": "sha512-CaOtzk2fDYisbjAD4Sd1MTKGVIpRtx9bWLyj24Y/k6p4s4gQ3CqDGJauFJxt8M/LEx003d0i3klVqnN73qvK3w==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz", + "integrity": "sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1353,12 +1353,12 @@ } }, "node_modules/@babel/plugin-transform-object-super": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.16.7.tgz", - "integrity": "sha512-14J1feiQVWaGvRxj2WjyMuXS2jsBkgB3MdSN5HuC2G5nRspa5RK9COcs82Pwy5BuGcjb+fYaUj94mYcOj7rCvw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz", + "integrity": "sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-replace-supers": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/helper-replace-supers": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1368,11 +1368,11 @@ } }, "node_modules/@babel/plugin-transform-parameters": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.17.12.tgz", - "integrity": "sha512-6qW4rWo1cyCdq1FkYri7AHpauchbGLXpdwnYsfxFb+KtddHENfsY5JZb35xUwkK5opOLcJ3BNd2l7PhRYGlwIA==", + "version": "7.18.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.18.8.tgz", + "integrity": "sha512-ivfbE3X2Ss+Fj8nnXvKJS6sjRG4gzwPMsP+taZC+ZzEGjAYlvENixmt1sZ5Ca6tWls+BlKSGKPJ6OOXvXCbkFg==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1382,11 +1382,11 @@ } }, "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.16.7.tgz", - "integrity": "sha512-z4FGr9NMGdoIl1RqavCqGG+ZuYjfZ/hkCIeuH6Do7tXmSm0ls11nYVSJqFEUOSJbDab5wC6lRE/w6YjVcr6Hqw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz", + "integrity": "sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1460,11 +1460,11 @@ } }, "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.0.tgz", - "integrity": "sha512-C8YdRw9uzx25HSIzwA7EM7YP0FhCe5wNvJbZzjVNHHPGVcDJ3Aie+qGYYdS1oVQgn+B3eAIJbWFLrJ4Jipv7nw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.6.tgz", + "integrity": "sha512-poqRI2+qiSdeldcz4wTSTXBRryoq3Gc70ye7m7UD5Ww0nE29IXqMl6r7Nd15WBgRd74vloEMlShtH6CKxVzfmQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.6", "regenerator-transform": "^0.15.0" }, "engines": { @@ -1475,11 +1475,11 @@ } }, "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.17.12.tgz", - "integrity": "sha512-1KYqwbJV3Co03NIi14uEHW8P50Md6KqFgt0FfpHdK6oyAHQVTosgPuPSiWud1HX0oYJ1hGRRlk0fP87jFpqXZA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz", + "integrity": "sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1508,11 +1508,11 @@ } }, "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.16.7.tgz", - "integrity": "sha512-hah2+FEnoRoATdIb05IOXf+4GzXYTq75TVhIn1PewihbpyrNWUt2JbudKQOETWw6QpLe+AIUpJ5MVLYTQbeeUg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz", + "integrity": "sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1522,12 +1522,12 @@ } }, "node_modules/@babel/plugin-transform-spread": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.17.12.tgz", - "integrity": "sha512-9pgmuQAtFi3lpNUstvG9nGfk9DkrdmWNp9KeKPFmuZCpEnxRzYlS8JgwPjYj+1AWDOSvoGN0H30p1cBOmT/Svg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.18.9.tgz", + "integrity": "sha512-39Q814wyoOPtIB/qGopNIL9xDChOE1pNU0ZY5dO0owhiVt/5kFm4li+/bBtwc7QotG0u5EPzqhZdjMtmqBqyQA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0" + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1537,11 +1537,11 @@ } }, "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.16.7.tgz", - "integrity": "sha512-NJa0Bd/87QV5NZZzTuZG5BPJjLYadeSZ9fO6oOUoL4iQx+9EEuw/eEM92SrsT19Yc2jgB1u1hsjqDtH02c3Drw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz", + "integrity": "sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1551,11 +1551,11 @@ } }, "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.17.12.tgz", - "integrity": "sha512-kAKJ7DX1dSRa2s7WN1xUAuaQmkTpN+uig4wCKWivVXIObqGbVTUlSavHyfI2iZvz89GFAMGm9p2DBJ4Y1Tp0hw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz", + "integrity": "sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1565,11 +1565,11 @@ } }, "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.17.12.tgz", - "integrity": "sha512-Q8y+Jp7ZdtSPXCThB6zjQ74N3lj0f6TDh1Hnf5B+sYlzQ8i5Pjp8gW0My79iekSpT4WnI06blqP6DT0OmaXXmw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz", + "integrity": "sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1595,11 +1595,11 @@ } }, "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.16.7.tgz", - "integrity": "sha512-TAV5IGahIz3yZ9/Hfv35TV2xEm+kaBDaZQCn2S/hG9/CZ0DktxJv9eKfPc7yYCvOYR4JGx1h8C+jcSOvgaaI/Q==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz", + "integrity": "sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.9" }, "engines": { "node": ">=6.9.0" @@ -1609,12 +1609,12 @@ } }, "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.16.7.tgz", - "integrity": "sha512-oC5tYYKw56HO75KZVLQ+R/Nl3Hro9kf8iG0hXoaHP7tjAyCpvqBiSNe6vGrZni1Z6MggmUOC6A7VP7AVmw225Q==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz", + "integrity": "sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA==", "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -1624,37 +1624,37 @@ } }, "node_modules/@babel/preset-env": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.18.0.tgz", - "integrity": "sha512-cP74OMs7ECLPeG1reiCQ/D/ypyOxgfm8uR6HRYV23vTJ7Lu1nbgj9DQDo/vH59gnn7GOAwtTDPPYV4aXzsMKHA==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.18.10.tgz", + "integrity": "sha512-wVxs1yjFdW3Z/XkNfXKoblxoHgbtUF7/l3PvvP4m02Qz9TZ6uZGxRVYjSQeR87oQmHco9zWitW5J82DJ7sCjvA==", "dependencies": { - "@babel/compat-data": "^7.17.10", - "@babel/helper-compilation-targets": "^7.17.10", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-validator-option": "^7.16.7", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.17.12", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.17.12", - "@babel/plugin-proposal-async-generator-functions": "^7.17.12", - "@babel/plugin-proposal-class-properties": "^7.17.12", - "@babel/plugin-proposal-class-static-block": "^7.18.0", - "@babel/plugin-proposal-dynamic-import": "^7.16.7", - "@babel/plugin-proposal-export-namespace-from": "^7.17.12", - "@babel/plugin-proposal-json-strings": "^7.17.12", - "@babel/plugin-proposal-logical-assignment-operators": "^7.17.12", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.17.12", - "@babel/plugin-proposal-numeric-separator": "^7.16.7", - "@babel/plugin-proposal-object-rest-spread": "^7.18.0", - "@babel/plugin-proposal-optional-catch-binding": "^7.16.7", - "@babel/plugin-proposal-optional-chaining": "^7.17.12", - "@babel/plugin-proposal-private-methods": "^7.17.12", - "@babel/plugin-proposal-private-property-in-object": "^7.17.12", - "@babel/plugin-proposal-unicode-property-regex": "^7.17.12", + "@babel/compat-data": "^7.18.8", + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-validator-option": "^7.18.6", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.18.6", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.18.9", + "@babel/plugin-proposal-async-generator-functions": "^7.18.10", + "@babel/plugin-proposal-class-properties": "^7.18.6", + "@babel/plugin-proposal-class-static-block": "^7.18.6", + "@babel/plugin-proposal-dynamic-import": "^7.18.6", + "@babel/plugin-proposal-export-namespace-from": "^7.18.9", + "@babel/plugin-proposal-json-strings": "^7.18.6", + "@babel/plugin-proposal-logical-assignment-operators": "^7.18.9", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6", + "@babel/plugin-proposal-numeric-separator": "^7.18.6", + "@babel/plugin-proposal-object-rest-spread": "^7.18.9", + "@babel/plugin-proposal-optional-catch-binding": "^7.18.6", + "@babel/plugin-proposal-optional-chaining": "^7.18.9", + "@babel/plugin-proposal-private-methods": "^7.18.6", + "@babel/plugin-proposal-private-property-in-object": "^7.18.6", + "@babel/plugin-proposal-unicode-property-regex": "^7.18.6", "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-class-properties": "^7.12.13", "@babel/plugin-syntax-class-static-block": "^7.14.5", "@babel/plugin-syntax-dynamic-import": "^7.8.3", "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.17.12", + "@babel/plugin-syntax-import-assertions": "^7.18.6", "@babel/plugin-syntax-json-strings": "^7.8.3", "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", @@ -1664,43 +1664,43 @@ "@babel/plugin-syntax-optional-chaining": "^7.8.3", "@babel/plugin-syntax-private-property-in-object": "^7.14.5", "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.17.12", - "@babel/plugin-transform-async-to-generator": "^7.17.12", - "@babel/plugin-transform-block-scoped-functions": "^7.16.7", - "@babel/plugin-transform-block-scoping": "^7.17.12", - "@babel/plugin-transform-classes": "^7.17.12", - "@babel/plugin-transform-computed-properties": "^7.17.12", - "@babel/plugin-transform-destructuring": "^7.18.0", - "@babel/plugin-transform-dotall-regex": "^7.16.7", - "@babel/plugin-transform-duplicate-keys": "^7.17.12", - "@babel/plugin-transform-exponentiation-operator": "^7.16.7", - "@babel/plugin-transform-for-of": "^7.17.12", - "@babel/plugin-transform-function-name": "^7.16.7", - "@babel/plugin-transform-literals": "^7.17.12", - "@babel/plugin-transform-member-expression-literals": "^7.16.7", - "@babel/plugin-transform-modules-amd": "^7.18.0", - "@babel/plugin-transform-modules-commonjs": "^7.18.0", - "@babel/plugin-transform-modules-systemjs": "^7.18.0", - "@babel/plugin-transform-modules-umd": "^7.18.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.17.12", - "@babel/plugin-transform-new-target": "^7.17.12", - "@babel/plugin-transform-object-super": "^7.16.7", - "@babel/plugin-transform-parameters": "^7.17.12", - "@babel/plugin-transform-property-literals": "^7.16.7", - "@babel/plugin-transform-regenerator": "^7.18.0", - "@babel/plugin-transform-reserved-words": "^7.17.12", - "@babel/plugin-transform-shorthand-properties": "^7.16.7", - "@babel/plugin-transform-spread": "^7.17.12", - "@babel/plugin-transform-sticky-regex": "^7.16.7", - "@babel/plugin-transform-template-literals": "^7.17.12", - "@babel/plugin-transform-typeof-symbol": "^7.17.12", - "@babel/plugin-transform-unicode-escapes": "^7.16.7", - "@babel/plugin-transform-unicode-regex": "^7.16.7", + "@babel/plugin-transform-arrow-functions": "^7.18.6", + "@babel/plugin-transform-async-to-generator": "^7.18.6", + "@babel/plugin-transform-block-scoped-functions": "^7.18.6", + "@babel/plugin-transform-block-scoping": "^7.18.9", + "@babel/plugin-transform-classes": "^7.18.9", + "@babel/plugin-transform-computed-properties": "^7.18.9", + "@babel/plugin-transform-destructuring": "^7.18.9", + "@babel/plugin-transform-dotall-regex": "^7.18.6", + "@babel/plugin-transform-duplicate-keys": "^7.18.9", + "@babel/plugin-transform-exponentiation-operator": "^7.18.6", + "@babel/plugin-transform-for-of": "^7.18.8", + "@babel/plugin-transform-function-name": "^7.18.9", + "@babel/plugin-transform-literals": "^7.18.9", + "@babel/plugin-transform-member-expression-literals": "^7.18.6", + "@babel/plugin-transform-modules-amd": "^7.18.6", + "@babel/plugin-transform-modules-commonjs": "^7.18.6", + "@babel/plugin-transform-modules-systemjs": "^7.18.9", + "@babel/plugin-transform-modules-umd": "^7.18.6", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.18.6", + "@babel/plugin-transform-new-target": "^7.18.6", + "@babel/plugin-transform-object-super": "^7.18.6", + "@babel/plugin-transform-parameters": "^7.18.8", + "@babel/plugin-transform-property-literals": "^7.18.6", + "@babel/plugin-transform-regenerator": "^7.18.6", + "@babel/plugin-transform-reserved-words": "^7.18.6", + "@babel/plugin-transform-shorthand-properties": "^7.18.6", + "@babel/plugin-transform-spread": "^7.18.9", + "@babel/plugin-transform-sticky-regex": "^7.18.6", + "@babel/plugin-transform-template-literals": "^7.18.9", + "@babel/plugin-transform-typeof-symbol": "^7.18.9", + "@babel/plugin-transform-unicode-escapes": "^7.18.10", + "@babel/plugin-transform-unicode-regex": "^7.18.6", "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.18.0", - "babel-plugin-polyfill-corejs2": "^0.3.0", - "babel-plugin-polyfill-corejs3": "^0.5.0", - "babel-plugin-polyfill-regenerator": "^0.3.0", + "@babel/types": "^7.18.10", + "babel-plugin-polyfill-corejs2": "^0.3.2", + "babel-plugin-polyfill-corejs3": "^0.5.3", + "babel-plugin-polyfill-regenerator": "^0.4.0", "core-js-compat": "^3.22.1", "semver": "^6.3.0" }, @@ -1711,6 +1711,17 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/preset-env/node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.0.tgz", + "integrity": "sha512-RW1cnryiADFeHmfLS+WW/G431p1PsW5qdRdz0SDRi7TKcUgc7Oh/uXkT7MZ/+tGsT1BkczEAmD5XjUyJ5SWDTw==", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.3.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, "node_modules/@babel/preset-flow": { "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/preset-flow/-/preset-flow-7.18.6.tgz", @@ -1760,9 +1771,9 @@ } }, "node_modules/@babel/register": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/register/-/register-7.17.7.tgz", - "integrity": "sha512-fg56SwvXRifootQEDQAu1mKdjh5uthPzdO0N6t358FktfL4XjAVXuH58ULoiW8mesxiOgNIrxiImqEwv0+hRRA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/register/-/register-7.18.9.tgz", + "integrity": "sha512-ZlbnXDcNYHMR25ITwwNKT88JiaukkdVj/nG7r3wnuXkOTHc60Uy05PwMCPre0hSkY68E6zK3xz+vUJSP2jWmcw==", "dependencies": { "clone-deep": "^4.0.1", "find-cache-dir": "^2.0.0", @@ -1789,31 +1800,31 @@ } }, "node_modules/@babel/template": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.16.7.tgz", - "integrity": "sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", + "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", "dependencies": { - "@babel/code-frame": "^7.16.7", - "@babel/parser": "^7.16.7", - "@babel/types": "^7.16.7" + "@babel/code-frame": "^7.18.6", + "@babel/parser": "^7.18.10", + "@babel/types": "^7.18.10" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.18.0.tgz", - "integrity": "sha512-oNOO4vaoIQoGjDQ84LgtF/IAlxlyqL4TUuoQ7xLkQETFaHkY1F7yazhB4Kt3VcZGL0ZF/jhrEpnXqUb0M7V3sw==", - "dependencies": { - "@babel/code-frame": "^7.16.7", - "@babel/generator": "^7.18.0", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.17.9", - "@babel/helper-hoist-variables": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7", - "@babel/parser": "^7.18.0", - "@babel/types": "^7.18.0", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.18.13.tgz", + "integrity": "sha512-N6kt9X1jRMLPxxxPYWi7tgvJRH/rtoU+dbKAPDM44RFHiMH8igdsaSBgFeskhSl/kLWLDUvIh1RXCrTmg0/zvA==", + "dependencies": { + "@babel/code-frame": "^7.18.6", + "@babel/generator": "^7.18.13", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-hoist-variables": "^7.18.6", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/parser": "^7.18.13", + "@babel/types": "^7.18.13", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -1822,9 +1833,9 @@ } }, "node_modules/@babel/types": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.18.10.tgz", - "integrity": "sha512-MJvnbEiiNkpjo+LknnmRrqbY1GPUUggjv+wQVjetM/AONoupqRALB7I6jGqNUAZsKcRIEu2J6FRFvsczljjsaQ==", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.18.13.tgz", + "integrity": "sha512-ePqfTihzW0W6XAU+aMw2ykilisStJfDnsejDCXRchCcMJ4O0+8DhPXf2YUbZ6wjBlsEmZwLK/sPweWtu8hcJYQ==", "dependencies": { "@babel/helper-string-parser": "^7.18.10", "@babel/helper-validator-identifier": "^7.18.6", @@ -1845,16 +1856,17 @@ } }, "node_modules/@commitlint/cli": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/cli/-/cli-17.0.0.tgz", - "integrity": "sha512-Np6slCdVVG1XwMvwbZrXIzS1INPAD5QmN4L6al04AmCd4nAPU63gxgxC5Mz0Fmx7va23Uvb0S7yEFV1JPhvPUQ==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/cli/-/cli-17.0.3.tgz", + "integrity": "sha512-oAo2vi5d8QZnAbtU5+0cR2j+A7PO8zuccux65R/EycwvsZrDVyW518FFrnJK2UQxbRtHFFIG+NjQ6vOiJV0Q8A==", "dev": true, "dependencies": { "@commitlint/format": "^17.0.0", - "@commitlint/lint": "^17.0.0", - "@commitlint/load": "^17.0.0", + "@commitlint/lint": "^17.0.3", + "@commitlint/load": "^17.0.3", "@commitlint/read": "^17.0.0", "@commitlint/types": "^17.0.0", + "execa": "^5.0.0", "lodash": "^4.17.19", "resolve-from": "5.0.0", "resolve-global": "1.0.0", @@ -1880,18 +1892,40 @@ } }, "node_modules/@commitlint/config-validator": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/config-validator/-/config-validator-17.0.0.tgz", - "integrity": "sha512-78IQjoZWR4kDHp/U5y17euEWzswJpPkA9TDL5F6oZZZaLIEreWzrDZD5PWtM8MsSRl/K2LDU/UrzYju2bKLMpA==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/config-validator/-/config-validator-17.0.3.tgz", + "integrity": "sha512-3tLRPQJKapksGE7Kee9axv+9z5I2GDHitDH4q63q7NmNA0wkB+DAorJ0RHz2/K00Zb1/MVdHzhCga34FJvDihQ==", "dev": true, "dependencies": { "@commitlint/types": "^17.0.0", - "ajv": "^6.12.6" + "ajv": "^8.11.0" }, "engines": { "node": ">=v14" } }, + "node_modules/@commitlint/config-validator/node_modules/ajv": { + "version": "8.11.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", + "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@commitlint/config-validator/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, "node_modules/@commitlint/ensure": { "version": "17.0.0", "resolved": "https://registry.npmjs.org/@commitlint/ensure/-/ensure-17.0.0.tgz", @@ -1998,9 +2032,9 @@ } }, "node_modules/@commitlint/is-ignored": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/is-ignored/-/is-ignored-17.0.0.tgz", - "integrity": "sha512-UmacD0XM/wWykgdXn5CEWVS4XGuqzU+ZGvM2hwv85+SXGnIOaG88XHrt81u37ZeVt1riWW+YdOxcJW6+nd5v5w==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/is-ignored/-/is-ignored-17.0.3.tgz", + "integrity": "sha512-/wgCXAvPtFTQZxsVxj7owLeRf5wwzcXLaYmrZPR4a87iD4sCvUIRl1/ogYrtOyUmHwWfQsvjqIB4mWE/SqWSnA==", "dev": true, "dependencies": { "@commitlint/types": "^17.0.0", @@ -2044,12 +2078,12 @@ "dev": true }, "node_modules/@commitlint/lint": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/lint/-/lint-17.0.0.tgz", - "integrity": "sha512-5FL7VLvGJQby24q0pd4UdM8FNFcL+ER1T/UBf8A9KRL5+QXV1Rkl6Zhcl7+SGpGlVo6Yo0pm6aLW716LVKWLGg==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/lint/-/lint-17.0.3.tgz", + "integrity": "sha512-2o1fk7JUdxBUgszyt41sHC/8Nd5PXNpkmuOo9jvGIjDHzOwXyV0PSdbEVTH3xGz9NEmjohFHr5l+N+T9fcxong==", "dev": true, "dependencies": { - "@commitlint/is-ignored": "^17.0.0", + "@commitlint/is-ignored": "^17.0.3", "@commitlint/parse": "^17.0.0", "@commitlint/rules": "^17.0.0", "@commitlint/types": "^17.0.0" @@ -2059,14 +2093,14 @@ } }, "node_modules/@commitlint/load": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/load/-/load-17.0.0.tgz", - "integrity": "sha512-XaiHF4yWQOPAI0O6wXvk+NYLtJn/Xb7jgZEeKd4C1ZWd7vR7u8z5h0PkWxSr0uLZGQsElGxv3fiZ32C5+q6M8w==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/load/-/load-17.0.3.tgz", + "integrity": "sha512-3Dhvr7GcKbKa/ey4QJ5MZH3+J7QFlARohUow6hftQyNjzoXXROm+RwpBes4dDFrXG1xDw9QPXA7uzrOShCd4bw==", "dev": true, "dependencies": { - "@commitlint/config-validator": "^17.0.0", + "@commitlint/config-validator": "^17.0.3", "@commitlint/execute-rule": "^17.0.0", - "@commitlint/resolve-extends": "^17.0.0", + "@commitlint/resolve-extends": "^17.0.3", "@commitlint/types": "^17.0.0", "@types/node": ">=12", "chalk": "^4.1.0", @@ -2189,12 +2223,12 @@ } }, "node_modules/@commitlint/resolve-extends": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/resolve-extends/-/resolve-extends-17.0.0.tgz", - "integrity": "sha512-wi60WiJmwaQ7lzMXK8Vbc18Hq9tE2j/6iv2AFfPUGV7fvfY6Sf1iNKuUHirSqR0fquUyufIXe4y/K9A6LVIIvw==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/resolve-extends/-/resolve-extends-17.0.3.tgz", + "integrity": "sha512-H/RFMvrcBeJCMdnVC4i8I94108UDccIHrTke2tyQEg9nXQnR5/Hd6MhyNWkREvcrxh9Y+33JLb+PiPiaBxCtBA==", "dev": true, "dependencies": { - "@commitlint/config-validator": "^17.0.0", + "@commitlint/config-validator": "^17.0.3", "@commitlint/types": "^17.0.0", "import-fresh": "^3.0.0", "lodash": "^4.17.19", @@ -2243,12 +2277,12 @@ } }, "node_modules/@commitlint/travis-cli": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/travis-cli/-/travis-cli-17.0.0.tgz", - "integrity": "sha512-0SBUjEQAHeeIakuyo1Rm0YgEtDXY0qFZYpKWgNmRqZl/QfsUddm7nz5/9pYXxbOpSbSNUpuiPJOV+dPTBVF5bg==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/travis-cli/-/travis-cli-17.0.3.tgz", + "integrity": "sha512-A2JUrh4kLxvm9UsG6LjUWlWbYJAZKM6oHUkBIRttrX/u/vnMuNkzzGvAj/Dso/C51l4TovkbFKz7hQyMgfBGRw==", "dev": true, "dependencies": { - "@commitlint/cli": "^17.0.0", + "@commitlint/cli": "^17.0.3", "execa": "^5.0.0" }, "bin": { @@ -2340,27 +2374,28 @@ "node": ">=8" } }, - "node_modules/@cspotcode/source-map-consumer": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@cspotcode/source-map-consumer/-/source-map-consumer-0.8.0.tgz", - "integrity": "sha512-41qniHzTU8yAGbCp04ohlmSrZf8bkf/iJsl3V0dRGsQN/5GFfx+LbCSsCpp2gqrqjTVg/K6O8ycoV35JIwAzAg==", - "dev": true, - "engines": { - "node": ">= 12" - } - }, "node_modules/@cspotcode/source-map-support": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.7.0.tgz", - "integrity": "sha512-X4xqRHqN8ACt2aHVe51OxeA2HjbcL4MqFqXkrmQszJ1NOUuUu5u6Vqx/0lZSVNku7velL5FC/s5uEAj1lsBMhA==", + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", "dev": true, "dependencies": { - "@cspotcode/source-map-consumer": "0.8.0" + "@jridgewell/trace-mapping": "0.3.9" }, "engines": { "node": ">=12" } }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, "node_modules/@eslint/eslintrc": { "version": "0.4.3", "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-0.4.3.tgz", @@ -2808,11 +2843,11 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.1.tgz", - "integrity": "sha512-GcHwniMlA2z+WFPWuY8lp3fsza0I8xPFMWL5+n8LYyP6PSvPrXf4+n8stDHZY2DM0zy9sVkRDy1jDI4XGzYVqg==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", + "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", "dependencies": { - "@jridgewell/set-array": "^1.0.0", + "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", "@jridgewell/trace-mapping": "^0.3.9" }, @@ -6229,12 +6264,12 @@ } }, "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.1.tgz", - "integrity": "sha512-v7/T6EQcNfVLfcN2X8Lulb7DjprieyLWJK/zOWH5DUYcAgex9sP3h25Q+DLsX9TloXe3y1O8l2q2Jv9q8UVB9w==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.2.tgz", + "integrity": "sha512-LPnodUl3lS0/4wN3Rb+m+UK8s7lj2jcLRrjho4gLw+OJs+I4bvGXshINesY5xx/apM+biTnQ9reDI8yj+0M5+Q==", "dependencies": { - "@babel/compat-data": "^7.13.11", - "@babel/helper-define-polyfill-provider": "^0.3.1", + "@babel/compat-data": "^7.17.7", + "@babel/helper-define-polyfill-provider": "^0.3.2", "semver": "^6.1.1" }, "peerDependencies": { @@ -6242,11 +6277,11 @@ } }, "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz", - "integrity": "sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ==", + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.3.tgz", + "integrity": "sha512-zKsXDh0XjnrUEW0mxIHLfjBfnXSMr5Q/goMe/fxpQnLm07mcOZiIZHBNWCMx60HmdvjxfXcalac0tfFg0wqxyw==", "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.1", + "@babel/helper-define-polyfill-provider": "^0.3.2", "core-js-compat": "^3.21.0" }, "peerDependencies": { @@ -7560,13 +7595,13 @@ } }, "node_modules/cosmiconfig-typescript-loader": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig-typescript-loader/-/cosmiconfig-typescript-loader-2.0.0.tgz", - "integrity": "sha512-2NlGul/E3vTQEANqPziqkA01vfiuUU8vT0jZAuUIjEW8u3eCcnCQWLggapCjhbF76s7KQF0fM0kXSKmzaDaG1g==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/cosmiconfig-typescript-loader/-/cosmiconfig-typescript-loader-2.0.2.tgz", + "integrity": "sha512-KmE+bMjWMXJbkWCeY4FJX/npHuZPNr9XF9q9CIQ/bpFwi1qHfCmSiKarrCcRa0LO4fWjk93pVoeRtJAkTGcYNw==", "dev": true, "dependencies": { "cosmiconfig": "^7", - "ts-node": "^10.7.0" + "ts-node": "^10.8.1" }, "engines": { "node": ">=12", @@ -8496,13 +8531,13 @@ "dev": true }, "node_modules/eslint-plugin-mocha": { - "version": "10.0.4", - "resolved": "https://registry.npmjs.org/eslint-plugin-mocha/-/eslint-plugin-mocha-10.0.4.tgz", - "integrity": "sha512-8wzAeepVY027oBHz/TmBmUr7vhVqoC1KTFeDybFLhbaWKx+aQ7fJJVuUsqcUy+L+G+XvgQBJY+cbAf7hl5DF7Q==", + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-mocha/-/eslint-plugin-mocha-10.1.0.tgz", + "integrity": "sha512-xLqqWUF17llsogVOC+8C6/jvQ+4IoOREbN7ZCHuOHuD6cT5cDD4h7f2LgsZuzMAiwswWE21tO7ExaknHVDrSkw==", "dev": true, "dependencies": { "eslint-utils": "^3.0.0", - "ramda": "^0.28.0" + "rambda": "^7.1.0" }, "engines": { "node": ">=14.0.0" @@ -9910,7 +9945,7 @@ "node_modules/global-dirs": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-0.1.1.tgz", - "integrity": "sha1-sxnA3UYH81PzvpzKTHL8FIxJ9EU=", + "integrity": "sha512-NknMLn7F2J7aflwFOlGdNIuCDpN3VGoSoB+aap3KABFWbHVn1TCgFC+np23J8W2BiZbjfEw3BFBycSMv1AFblg==", "dev": true, "dependencies": { "ini": "^1.3.4" @@ -18679,15 +18714,11 @@ "node": ">=8" } }, - "node_modules/ramda": { - "version": "0.28.0", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.28.0.tgz", - "integrity": "sha512-9QnLuG/kPVgWvMQ4aODhsBUFKOUmnbUnsSXACv+NCQZcHbeb+v8Lodp8OVxtRULN1/xOyYLLaL6npE6dMq5QTA==", - "dev": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/ramda" - } + "node_modules/rambda": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/rambda/-/rambda-7.2.1.tgz", + "integrity": "sha512-Wswj8ZvzdI3VhaGPkZAxaCTwuMmGtgWt7Zxsgyo4P+iTmVnkojvyWaOep5q3ZjMIecW0wtQa66GWxaKkZ24RAA==", + "dev": true }, "node_modules/randombytes": { "version": "2.1.0", @@ -19224,9 +19255,9 @@ } }, "node_modules/regexpu-core": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.0.1.tgz", - "integrity": "sha512-CriEZlrKK9VJw/xQGJpQM5rY88BtuL8DM+AEwvcThHilbxiTAy8vq4iJnd2tqq8wLmjbGZzP7ZcKFjbGkmEFrw==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.1.0.tgz", + "integrity": "sha512-bb6hk+xWd2PEOkj5It46A16zFMs2mv86Iwpdu94la4S3sJ7C973h2dHpYKwIBGaWSO7cIRJ+UX0IeMaWcO4qwA==", "dependencies": { "regenerate": "^1.4.2", "regenerate-unicode-properties": "^10.0.1", @@ -19270,7 +19301,7 @@ "node_modules/regjsparser/node_modules/jsesc": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", "bin": { "jsesc": "bin/jsesc" } @@ -21355,12 +21386,12 @@ } }, "node_modules/ts-node": { - "version": "10.7.0", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.7.0.tgz", - "integrity": "sha512-TbIGS4xgJoX2i3do417KSaep1uRAW/Lu+WAL2doDHC0D6ummjirVOXU5/7aiZotbQ5p1Zp9tP7U6cYhA0O7M8A==", + "version": "10.9.1", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", + "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", "dev": true, "dependencies": { - "@cspotcode/source-map-support": "0.7.0", + "@cspotcode/source-map-support": "^0.8.0", "@tsconfig/node10": "^1.0.7", "@tsconfig/node12": "^1.0.7", "@tsconfig/node14": "^1.0.0", @@ -21371,7 +21402,7 @@ "create-require": "^1.1.0", "diff": "^4.0.1", "make-error": "^1.1.1", - "v8-compile-cache-lib": "^3.0.0", + "v8-compile-cache-lib": "^3.0.1", "yn": "3.1.1" }, "bin": { @@ -21908,9 +21939,9 @@ "dev": true }, "node_modules/v8-compile-cache-lib": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.0.tgz", - "integrity": "sha512-mpSYqfsFvASnSn5qMiwrr4VKfumbPyONLCOPmsR3A6pTY/r0+tSaVbgPWSAIuzbk3lCTa+FForeTiO+wBQGkjA==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", "dev": true }, "node_modules/validate-npm-package-license": { @@ -22433,9 +22464,9 @@ } }, "node_modules/yargs/node_modules/yargs-parser": { - "version": "21.0.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.0.1.tgz", - "integrity": "sha512-9BK1jFpLzJROCI5TzwZL/TU4gqjK5xiHV/RfWLOahrjAko/e4DJkRDZQXfvqAsiZzzYhgAzbgz6lg48jcm4GLg==", + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", "dev": true, "engines": { "node": ">=12" @@ -22495,33 +22526,33 @@ } }, "@babel/code-frame": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.7.tgz", - "integrity": "sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.18.6.tgz", + "integrity": "sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q==", "requires": { - "@babel/highlight": "^7.16.7" + "@babel/highlight": "^7.18.6" } }, "@babel/compat-data": { - "version": "7.17.10", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.17.10.tgz", - "integrity": "sha512-GZt/TCsG70Ms19gfZO1tM4CVnXsPgEPBCpJu+Qz3L0LUDsY5nZqFZglIoPC1kIYOtNBZlrnFT+klg12vFGZXrw==" + "version": "7.18.8", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.18.8.tgz", + "integrity": "sha512-HSmX4WZPPK3FUxYp7g2T6EyO8j96HlZJlxmKPSh6KAcqwyDrfx7hKjXpAW/0FhFfTJsR0Yt4lAjLI2coMptIHQ==" }, "@babel/core": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.18.0.tgz", - "integrity": "sha512-Xyw74OlJwDijToNi0+6BBI5mLLR5+5R3bcSH80LXzjzEGEUlvNzujEE71BaD/ApEZHAvFI/Mlmp4M5lIkdeeWw==", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.18.13.tgz", + "integrity": "sha512-ZisbOvRRusFktksHSG6pjj1CSvkPkcZq/KHD45LAkVP/oiHJkNBZWfpvlLmX8OtHDG8IuzsFlVRWo08w7Qxn0A==", "requires": { "@ampproject/remapping": "^2.1.0", - "@babel/code-frame": "^7.16.7", - "@babel/generator": "^7.18.0", - "@babel/helper-compilation-targets": "^7.17.10", - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helpers": "^7.18.0", - "@babel/parser": "^7.18.0", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.18.0", - "@babel/types": "^7.18.0", + "@babel/code-frame": "^7.18.6", + "@babel/generator": "^7.18.13", + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-module-transforms": "^7.18.9", + "@babel/helpers": "^7.18.9", + "@babel/parser": "^7.18.13", + "@babel/template": "^7.18.10", + "@babel/traverse": "^7.18.13", + "@babel/types": "^7.18.13", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -22530,12 +22561,12 @@ } }, "@babel/generator": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.18.0.tgz", - "integrity": "sha512-81YO9gGx6voPXlvYdZBliFXAZU8vZ9AZ6z+CjlmcnaeOcYSFbMTpdeDUO9xD9dh/68Vq03I8ZspfUTPfitcDHg==", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.18.13.tgz", + "integrity": "sha512-CkPg8ySSPuHTYPJYo7IRALdqyjM9HCbt/3uOBEFbzyGVP6Mn8bwFPB0jX6982JVNBlYzM1nnPkfjuXSOPtQeEQ==", "requires": { - "@babel/types": "^7.18.0", - "@jridgewell/gen-mapping": "^0.3.0", + "@babel/types": "^7.18.13", + "@jridgewell/gen-mapping": "^0.3.2", "jsesc": "^2.5.1" } }, @@ -22548,57 +22579,55 @@ } }, "@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.16.7.tgz", - "integrity": "sha512-C6FdbRaxYjwVu/geKW4ZeQ0Q31AftgRcdSnZ5/jsH6BzCJbtvXvhpfkbkThYSuutZA7nCXpPR6AD9zd1dprMkA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz", + "integrity": "sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw==", "requires": { - "@babel/helper-explode-assignable-expression": "^7.16.7", - "@babel/types": "^7.16.7" + "@babel/helper-explode-assignable-expression": "^7.18.6", + "@babel/types": "^7.18.9" } }, "@babel/helper-compilation-targets": { - "version": "7.17.10", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.17.10.tgz", - "integrity": "sha512-gh3RxjWbauw/dFiU/7whjd0qN9K6nPJMqe6+Er7rOavFh0CQUSwhAE3IcTho2rywPJFxej6TUUHDkWcYI6gGqQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.18.9.tgz", + "integrity": "sha512-tzLCyVmqUiFlcFoAPLA/gL9TeYrF61VLNtb+hvkuVaB5SUjW7jcfrglBIX1vUIoT7CLP3bBlIMeyEsIl2eFQNg==", "requires": { - "@babel/compat-data": "^7.17.10", - "@babel/helper-validator-option": "^7.16.7", + "@babel/compat-data": "^7.18.8", + "@babel/helper-validator-option": "^7.18.6", "browserslist": "^4.20.2", "semver": "^6.3.0" } }, "@babel/helper-create-class-features-plugin": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.18.0.tgz", - "integrity": "sha512-Kh8zTGR9de3J63e5nS0rQUdRs/kbtwoeQQ0sriS0lItjC96u8XXZN6lKpuyWd2coKSU13py/y+LTmThLuVX0Pg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.18.9.tgz", + "integrity": "sha512-WvypNAYaVh23QcjpMR24CwZY2Nz6hqdOcFdPbNpV56hL5H6KiFheO7Xm1aPdlLQ7d5emYZX7VZwPp9x3z+2opw==", "requires": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.17.9", - "@babel/helper-member-expression-to-functions": "^7.17.7", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/helper-replace-supers": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7" + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-member-expression-to-functions": "^7.18.9", + "@babel/helper-optimise-call-expression": "^7.18.6", + "@babel/helper-replace-supers": "^7.18.9", + "@babel/helper-split-export-declaration": "^7.18.6" } }, "@babel/helper-create-regexp-features-plugin": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.17.12.tgz", - "integrity": "sha512-b2aZrV4zvutr9AIa6/gA3wsZKRwTKYoDxYiFKcESS3Ug2GTXzwBEvMuuFLhCQpEnRXs1zng4ISAXSUxxKBIcxw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.18.6.tgz", + "integrity": "sha512-7LcpH1wnQLGrI+4v+nPp+zUvIkF9x0ddv1Hkdue10tg3gmRnLy97DXh4STiOf1qeIInyD69Qv5kKSZzKD8B/7A==", "requires": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "regexpu-core": "^5.0.1" + "@babel/helper-annotate-as-pure": "^7.18.6", + "regexpu-core": "^5.1.0" } }, "@babel/helper-define-polyfill-provider": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.1.tgz", - "integrity": "sha512-J9hGMpJQmtWmj46B3kBHmL38UhJGhYX7eqkcq+2gsstyYt341HmPeWspihX43yVRA0mS+8GGk2Gckc7bY/HCmA==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.2.tgz", + "integrity": "sha512-r9QJJ+uDWrd+94BSPcP6/de67ygLtvVy6cK4luE6MOuDsZIdoaPBnfSpbO/+LTifjPckbKXRuI9BB/Z2/y3iTg==", "requires": { - "@babel/helper-compilation-targets": "^7.13.0", - "@babel/helper-module-imports": "^7.12.13", - "@babel/helper-plugin-utils": "^7.13.0", - "@babel/traverse": "^7.13.0", + "@babel/helper-compilation-targets": "^7.17.7", + "@babel/helper-plugin-utils": "^7.16.7", "debug": "^4.1.1", "lodash.debounce": "^4.0.8", "resolve": "^1.14.2", @@ -22606,44 +22635,41 @@ } }, "@babel/helper-environment-visitor": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.16.7.tgz", - "integrity": "sha512-SLLb0AAn6PkUeAfKJCCOl9e1R53pQlGAfc4y4XuMRZfqeMYLE0dM1LMhqbGAlGQY0lfw5/ohoYWAe9V1yibRag==", - "requires": { - "@babel/types": "^7.16.7" - } + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", + "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==" }, "@babel/helper-explode-assignable-expression": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.16.7.tgz", - "integrity": "sha512-KyUenhWMC8VrxzkGP0Jizjo4/Zx+1nNZhgocs+gLzyZyB8SHidhoq9KK/8Ato4anhwsivfkBLftky7gvzbZMtQ==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz", + "integrity": "sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg==", "requires": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" } }, "@babel/helper-function-name": { - "version": "7.17.9", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.17.9.tgz", - "integrity": "sha512-7cRisGlVtiVqZ0MW0/yFB4atgpGLWEHUVYnb448hZK4x+vih0YO5UoS11XIYtZYqHd0dIPMdUSv8q5K4LdMnIg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.18.9.tgz", + "integrity": "sha512-fJgWlZt7nxGksJS9a0XdSaI4XvpExnNIgRP+rVefWh5U7BL8pPuir6SJUmFKRfjWQ51OtWSzwOxhaH/EBWWc0A==", "requires": { - "@babel/template": "^7.16.7", - "@babel/types": "^7.17.0" + "@babel/template": "^7.18.6", + "@babel/types": "^7.18.9" } }, "@babel/helper-hoist-variables": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.16.7.tgz", - "integrity": "sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", + "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", "requires": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" } }, "@babel/helper-member-expression-to-functions": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.17.7.tgz", - "integrity": "sha512-thxXgnQ8qQ11W2wVUObIqDL4p148VMxkt5T/qpN5k2fboRyzFGFmKsTGViquyM5QHKUy48OZoca8kw4ajaDPyw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.18.9.tgz", + "integrity": "sha512-RxifAh2ZoVU67PyKIO4AMi1wTenGfMR/O/ae0CCRqwgBAt5v7xjdtRw7UoSbsreKrQn5t7r89eruK/9JjYHuDg==", "requires": { - "@babel/types": "^7.17.0" + "@babel/types": "^7.18.9" } }, "@babel/helper-module-imports": { @@ -22655,26 +22681,26 @@ } }, "@babel/helper-module-transforms": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.18.0.tgz", - "integrity": "sha512-kclUYSUBIjlvnzN2++K9f2qzYKFgjmnmjwL4zlmU5f8ZtzgWe8s0rUPSTGy2HmK4P8T52MQsS+HTQAgZd3dMEA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.18.9.tgz", + "integrity": "sha512-KYNqY0ICwfv19b31XzvmI/mfcylOzbLtowkw+mfvGPAQ3kfCnMLYbED3YecL5tPd8nAYFQFAd6JHp2LxZk/J1g==", "requires": { - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-module-imports": "^7.16.7", - "@babel/helper-simple-access": "^7.17.7", - "@babel/helper-split-export-declaration": "^7.16.7", - "@babel/helper-validator-identifier": "^7.16.7", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.18.0", - "@babel/types": "^7.18.0" + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-module-imports": "^7.18.6", + "@babel/helper-simple-access": "^7.18.6", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/helper-validator-identifier": "^7.18.6", + "@babel/template": "^7.18.6", + "@babel/traverse": "^7.18.9", + "@babel/types": "^7.18.9" } }, "@babel/helper-optimise-call-expression": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.16.7.tgz", - "integrity": "sha512-EtgBhg7rd/JcnpZFXpBy0ze1YRfdm7BnBX4uKMBd3ixa3RGAE002JZB66FJyNH7g0F38U05pXmA5P8cBh7z+1w==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz", + "integrity": "sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA==", "requires": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" } }, "@babel/helper-plugin-utils": { @@ -22683,49 +22709,50 @@ "integrity": "sha512-aBXPT3bmtLryXaoJLyYPXPlSD4p1ld9aYeR+sJNOZjJJGiOpb+fKfh3NkcCu7J54nUJwCERPBExCCpyCOHnu/w==" }, "@babel/helper-remap-async-to-generator": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.16.8.tgz", - "integrity": "sha512-fm0gH7Flb8H51LqJHy3HJ3wnE1+qtYR2A99K06ahwrawLdOFsCEWjZOrYricXJHoPSudNKxrMBUPEIPxiIIvBw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz", + "integrity": "sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA==", "requires": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-wrap-function": "^7.16.8", - "@babel/types": "^7.16.8" + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-wrap-function": "^7.18.9", + "@babel/types": "^7.18.9" } }, "@babel/helper-replace-supers": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.16.7.tgz", - "integrity": "sha512-y9vsWilTNaVnVh6xiJfABzsNpgDPKev9HnAgz6Gb1p6UUwf9NepdlsV7VXGCftJM+jqD5f7JIEubcpLjZj5dBw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.18.9.tgz", + "integrity": "sha512-dNsWibVI4lNT6HiuOIBr1oyxo40HvIVmbwPUm3XZ7wMh4k2WxrxTqZwSqw/eEmXDS9np0ey5M2bz9tBmO9c+YQ==", "requires": { - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-member-expression-to-functions": "^7.16.7", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/traverse": "^7.16.7", - "@babel/types": "^7.16.7" + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-member-expression-to-functions": "^7.18.9", + "@babel/helper-optimise-call-expression": "^7.18.6", + "@babel/traverse": "^7.18.9", + "@babel/types": "^7.18.9" } }, "@babel/helper-simple-access": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.17.7.tgz", - "integrity": "sha512-txyMCGroZ96i+Pxr3Je3lzEJjqwaRC9buMUgtomcrLe5Nd0+fk1h0LLA+ixUF5OW7AhHuQ7Es1WcQJZmZsz2XA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz", + "integrity": "sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g==", "requires": { - "@babel/types": "^7.17.0" + "@babel/types": "^7.18.6" } }, "@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.16.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.16.0.tgz", - "integrity": "sha512-+il1gTy0oHwUsBQZyJvukbB4vPMdcYBrFHa0Uc4AizLxbq6BOYC51Rv4tWocX9BLBDLZ4kc6qUFpQ6HRgL+3zw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.18.9.tgz", + "integrity": "sha512-imytd2gHi3cJPsybLRbmFrF7u5BIEuI2cNheyKi3/iOBC63kNn3q8Crn2xVuESli0aM4KYsyEqKyS7lFL8YVtw==", "requires": { - "@babel/types": "^7.16.0" + "@babel/types": "^7.18.9" } }, "@babel/helper-split-export-declaration": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.16.7.tgz", - "integrity": "sha512-xbWoy/PFoxSWazIToT9Sif+jJTlrMcndIsaOKvTA6u7QEo7ilkRZpjew18/W3c7nm8fXdUDXh02VXTbZ0pGDNw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", + "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", "requires": { - "@babel/types": "^7.16.7" + "@babel/types": "^7.18.6" } }, "@babel/helper-string-parser": { @@ -22744,94 +22771,95 @@ "integrity": "sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw==" }, "@babel/helper-wrap-function": { - "version": "7.16.8", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.16.8.tgz", - "integrity": "sha512-8RpyRVIAW1RcDDGTA+GpPAwV22wXCfKOoM9bet6TLkGIFTkRQSkH1nMQ5Yet4MpoXe1ZwHPVtNasc2w0uZMqnw==", + "version": "7.18.11", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.18.11.tgz", + "integrity": "sha512-oBUlbv+rjZLh2Ks9SKi4aL7eKaAXBWleHzU89mP0G6BMUlRxSckk9tSIkgDGydhgFxHuGSlBQZfnaD47oBEB7w==", "requires": { - "@babel/helper-function-name": "^7.16.7", - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.16.8", - "@babel/types": "^7.16.8" + "@babel/helper-function-name": "^7.18.9", + "@babel/template": "^7.18.10", + "@babel/traverse": "^7.18.11", + "@babel/types": "^7.18.10" } }, "@babel/helpers": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.18.0.tgz", - "integrity": "sha512-AE+HMYhmlMIbho9nbvicHyxFwhrO+xhKB6AhRxzl8w46Yj0VXTZjEsAoBVC7rB2I0jzX+yWyVybnO08qkfx6kg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.18.9.tgz", + "integrity": "sha512-Jf5a+rbrLoR4eNdUmnFu8cN5eNJT6qdTdOg5IHIzq87WwyRw9PwguLFOWYgktN/60IP4fgDUawJvs7PjQIzELQ==", "requires": { - "@babel/template": "^7.16.7", - "@babel/traverse": "^7.18.0", - "@babel/types": "^7.18.0" + "@babel/template": "^7.18.6", + "@babel/traverse": "^7.18.9", + "@babel/types": "^7.18.9" } }, "@babel/highlight": { - "version": "7.16.10", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.10.tgz", - "integrity": "sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", + "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", "requires": { - "@babel/helper-validator-identifier": "^7.16.7", + "@babel/helper-validator-identifier": "^7.18.6", "chalk": "^2.0.0", "js-tokens": "^4.0.0" } }, "@babel/parser": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.18.0.tgz", - "integrity": "sha512-AqDccGC+m5O/iUStSJy3DGRIUFu7WbY/CppZYwrEUB4N0tZlnI8CSTsgL7v5fHVFmUbRv2sd+yy27o8Ydt4MGg==" + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.18.13.tgz", + "integrity": "sha512-dgXcIfMuQ0kgzLB2b9tRZs7TTFFaGM2AbtA4fJgUUYukzGH4jwsS7hzQHEGs67jdehpm22vkgKwvbU+aEflgwg==" }, "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.17.12.tgz", - "integrity": "sha512-xCJQXl4EeQ3J9C4yOmpTrtVGmzpm2iSzyxbkZHw7UCnZBftHpF/hpII80uWVyVrc40ytIClHjgWGTG1g/yB+aw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz", + "integrity": "sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.17.12.tgz", - "integrity": "sha512-/vt0hpIw0x4b6BLKUkwlvEoiGZYYLNZ96CzyHYPbtG2jZGz6LBe7/V+drYrc/d+ovrF9NBi0pmtvmNb/FsWtRQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.18.9.tgz", + "integrity": "sha512-AHrP9jadvH7qlOj6PINbgSuphjQUAK7AOT7DPjBo9EHoLhQTnnK5u45e1Hd4DbSQEO9nqPWtQ89r+XEOWFScKg==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", - "@babel/plugin-proposal-optional-chaining": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", + "@babel/plugin-proposal-optional-chaining": "^7.18.9" } }, "@babel/plugin-proposal-async-generator-functions": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.17.12.tgz", - "integrity": "sha512-RWVvqD1ooLKP6IqWTA5GyFVX2isGEgC5iFxKzfYOIy/QEFdxYyCybBDtIGjipHpb9bDWHzcqGqFakf+mVmBTdQ==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.18.10.tgz", + "integrity": "sha512-1mFuY2TOsR1hxbjCo4QL+qlIjV07p4H4EUYw2J/WCqsvFV6V9X9z9YhXbWndc/4fw+hYGlDT7egYxliMp5O6Ew==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-remap-async-to-generator": "^7.16.8", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-remap-async-to-generator": "^7.18.9", "@babel/plugin-syntax-async-generators": "^7.8.4" } }, "@babel/plugin-proposal-class-properties": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.17.12.tgz", - "integrity": "sha512-U0mI9q8pW5Q9EaTHFPwSVusPMV/DV9Mm8p7csqROFLtIE9rBF5piLqyrBGigftALrBcsBGu4m38JneAe7ZDLXw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", + "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-proposal-class-static-block": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.0.tgz", - "integrity": "sha512-t+8LsRMMDE74c6sV7KShIw13sqbqd58tlqNrsWoWBTIMw7SVQ0cZ905wLNS/FBCy/3PyooRHLFFlfrUNyyz5lA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.6.tgz", + "integrity": "sha512-+I3oIiNxrCpup3Gi8n5IGMwj0gOCAjcJUSQEcotNnCCPMEnixawOQ+KeJPlgfjzx+FKQ1QSyZOWe7wmoJp7vhw==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-class-static-block": "^7.14.5" } }, "@babel/plugin-proposal-dynamic-import": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.16.7.tgz", - "integrity": "sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz", + "integrity": "sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-dynamic-import": "^7.8.3" } }, @@ -22846,108 +22874,108 @@ } }, "@babel/plugin-proposal-export-namespace-from": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.17.12.tgz", - "integrity": "sha512-j7Ye5EWdwoXOpRmo5QmRyHPsDIe6+u70ZYZrd7uz+ebPYFKfRcLcNu3Ro0vOlJ5zuv8rU7xa+GttNiRzX56snQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz", + "integrity": "sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-export-namespace-from": "^7.8.3" } }, "@babel/plugin-proposal-json-strings": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.17.12.tgz", - "integrity": "sha512-rKJ+rKBoXwLnIn7n6o6fulViHMrOThz99ybH+hKHcOZbnN14VuMnH9fo2eHE69C8pO4uX1Q7t2HYYIDmv8VYkg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz", + "integrity": "sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-json-strings": "^7.8.3" } }, "@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.17.12.tgz", - "integrity": "sha512-EqFo2s1Z5yy+JeJu7SFfbIUtToJTVlC61/C7WLKDntSw4Sz6JNAIfL7zQ74VvirxpjB5kz/kIx0gCcb+5OEo2Q==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.18.9.tgz", + "integrity": "sha512-128YbMpjCrP35IOExw2Fq+x55LMP42DzhOhX2aNNIdI9avSWl2PI0yuBWarr3RYpZBSPtabfadkH2yeRiMD61Q==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" } }, "@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.17.12.tgz", - "integrity": "sha512-ws/g3FSGVzv+VH86+QvgtuJL/kR67xaEIF2x0iPqdDfYW6ra6JF3lKVBkWynRLcNtIC1oCTfDRVxmm2mKzy+ag==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", + "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" } }, "@babel/plugin-proposal-numeric-separator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.16.7.tgz", - "integrity": "sha512-vQgPMknOIgiuVqbokToyXbkY/OmmjAzr/0lhSIbG/KmnzXPGwW/AdhdKpi+O4X/VkWiWjnkKOBiqJrTaC98VKw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz", + "integrity": "sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-numeric-separator": "^7.10.4" } }, "@babel/plugin-proposal-object-rest-spread": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.18.0.tgz", - "integrity": "sha512-nbTv371eTrFabDfHLElkn9oyf9VG+VKK6WMzhY2o4eHKaG19BToD9947zzGMO6I/Irstx9d8CwX6njPNIAR/yw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.18.9.tgz", + "integrity": "sha512-kDDHQ5rflIeY5xl69CEqGEZ0KY369ehsCIEbTGb4siHG5BE9sga/T0r0OUwyZNLMmZE79E1kbsqAjwFCW4ds6Q==", "requires": { - "@babel/compat-data": "^7.17.10", - "@babel/helper-compilation-targets": "^7.17.10", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/compat-data": "^7.18.8", + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.17.12" + "@babel/plugin-transform-parameters": "^7.18.8" } }, "@babel/plugin-proposal-optional-catch-binding": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.16.7.tgz", - "integrity": "sha512-eMOH/L4OvWSZAE1VkHbr1vckLG1WUcHGJSLqqQwl2GaUqG6QjddvrOaTUMNYiv77H5IKPMZ9U9P7EaHwvAShfA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz", + "integrity": "sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" } }, "@babel/plugin-proposal-optional-chaining": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.17.12.tgz", - "integrity": "sha512-7wigcOs/Z4YWlK7xxjkvaIw84vGhDv/P1dFGQap0nHkc8gFKY/r+hXc8Qzf5k1gY7CvGIcHqAnOagVKJJ1wVOQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.18.9.tgz", + "integrity": "sha512-v5nwt4IqBXihxGsW2QmCWMDS3B3bzGIk/EQVZz2ei7f3NJl8NzAJVvUmpDW5q1CRNY+Beb/k58UAH1Km1N411w==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9", "@babel/plugin-syntax-optional-chaining": "^7.8.3" } }, "@babel/plugin-proposal-private-methods": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.17.12.tgz", - "integrity": "sha512-SllXoxo19HmxhDWm3luPz+cPhtoTSKLJE9PXshsfrOzBqs60QP0r8OaJItrPhAj0d7mZMnNF0Y1UUggCDgMz1A==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz", + "integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==", "requires": { - "@babel/helper-create-class-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-proposal-private-property-in-object": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.17.12.tgz", - "integrity": "sha512-/6BtVi57CJfrtDNKfK5b66ydK2J5pXUKBKSPD2G1whamMuEnZWgoOIfO8Vf9F/DoD4izBLD/Au4NMQfruzzykg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.18.6.tgz", + "integrity": "sha512-9Rysx7FOctvT5ouj5JODjAFAkgGoudQuLPamZb0v1TGLpapdNaftzifU8NTWQm0IRjqoYypdrSmyWgkocDQ8Dw==", "requires": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-create-class-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", "@babel/plugin-syntax-private-property-in-object": "^7.14.5" } }, "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.17.12.tgz", - "integrity": "sha512-Wb9qLjXf3ZazqXA7IvI7ozqRIXIGPtSo+L5coFmEkhTQK18ao4UDDD0zdTGAarmbLj2urpRwrc6893cu5Bfh0A==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", + "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-syntax-async-generators": { @@ -23009,11 +23037,11 @@ } }, "@babel/plugin-syntax-import-assertions": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.17.12.tgz", - "integrity": "sha512-n/loy2zkq9ZEM8tEOwON9wTQSTNDTDEz6NujPtJGLU7qObzT1N4c4YZZf8E6ATB2AjNQg/Ib2AIpO03EZaCehw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.18.6.tgz", + "integrity": "sha512-/DU3RXad9+bZwrgWJQKbr39gYbJpLJHezqEzRzi/BHRlJ9zsQb4CK2CA/5apllXNomwA1qHwzvHl+AdEmC5krQ==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-syntax-json-strings": { @@ -23106,94 +23134,94 @@ } }, "@babel/plugin-transform-arrow-functions": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.17.12.tgz", - "integrity": "sha512-PHln3CNi/49V+mza4xMwrg+WGYevSF1oaiXaC2EQfdp4HWlSjRsrDXWJiQBKpP7749u6vQ9mcry2uuFOv5CXvA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.18.6.tgz", + "integrity": "sha512-9S9X9RUefzrsHZmKMbDXxweEH+YlE8JJEuat9FdvW9Qh1cw7W64jELCtWNkPBPX5En45uy28KGvA/AySqUh8CQ==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-async-to-generator": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.17.12.tgz", - "integrity": "sha512-J8dbrWIOO3orDzir57NRsjg4uxucvhby0L/KZuGsWDj0g7twWK3g7JhJhOrXtuXiw8MeiSdJ3E0OW9H8LYEzLQ==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.18.6.tgz", + "integrity": "sha512-ARE5wZLKnTgPW7/1ftQmSi1CmkqqHo2DNmtztFhvgtOWSDfq0Cq9/9L+KnZNYSNrydBekhW3rwShduf59RoXag==", "requires": { - "@babel/helper-module-imports": "^7.16.7", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-remap-async-to-generator": "^7.16.8" + "@babel/helper-module-imports": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/helper-remap-async-to-generator": "^7.18.6" } }, "@babel/plugin-transform-block-scoped-functions": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.16.7.tgz", - "integrity": "sha512-JUuzlzmF40Z9cXyytcbZEZKckgrQzChbQJw/5PuEHYeqzCsvebDx0K0jWnIIVcmmDOAVctCgnYs0pMcrYj2zJg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz", + "integrity": "sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-block-scoping": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.17.12.tgz", - "integrity": "sha512-jw8XW/B1i7Lqwqj2CbrViPcZijSxfguBWZP2aN59NHgxUyO/OcO1mfdCxH13QhN5LbWhPkX+f+brKGhZTiqtZQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.18.9.tgz", + "integrity": "sha512-5sDIJRV1KtQVEbt/EIBwGy4T01uYIo4KRB3VUqzkhrAIOGx7AoctL9+Ux88btY0zXdDyPJ9mW+bg+v+XEkGmtw==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-classes": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.17.12.tgz", - "integrity": "sha512-cvO7lc7pZat6BsvH6l/EGaI8zpl8paICaoGk+7x7guvtfak/TbIf66nYmJOH13EuG0H+Xx3M+9LQDtSvZFKXKw==", - "requires": { - "@babel/helper-annotate-as-pure": "^7.16.7", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.17.9", - "@babel/helper-optimise-call-expression": "^7.16.7", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-replace-supers": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.18.9.tgz", + "integrity": "sha512-EkRQxsxoytpTlKJmSPYrsOMjCILacAjtSVkd4gChEe2kXjFCun3yohhW5I7plXJhCemM0gKsaGMcO8tinvCA5g==", + "requires": { + "@babel/helper-annotate-as-pure": "^7.18.6", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-optimise-call-expression": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-replace-supers": "^7.18.9", + "@babel/helper-split-export-declaration": "^7.18.6", "globals": "^11.1.0" } }, "@babel/plugin-transform-computed-properties": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.17.12.tgz", - "integrity": "sha512-a7XINeplB5cQUWMg1E/GI1tFz3LfK021IjV1rj1ypE+R7jHm+pIHmHl25VNkZxtx9uuYp7ThGk8fur1HHG7PgQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.18.9.tgz", + "integrity": "sha512-+i0ZU1bCDymKakLxn5srGHrsAPRELC2WIbzwjLhHW9SIE1cPYkLCL0NlnXMZaM1vhfgA2+M7hySk42VBvrkBRw==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-destructuring": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.18.0.tgz", - "integrity": "sha512-Mo69klS79z6KEfrLg/1WkmVnB8javh75HX4pi2btjvlIoasuxilEyjtsQW6XPrubNd7AQy0MMaNIaQE4e7+PQw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.18.9.tgz", + "integrity": "sha512-p5VCYNddPLkZTq4XymQIaIfZNJwT9YsjkPOhkVEqt6QIpQFZVM9IltqqYpOEkJoN1DPznmxUDyZ5CTZs/ZCuHA==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-dotall-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.16.7.tgz", - "integrity": "sha512-Lyttaao2SjZF6Pf4vk1dVKv8YypMpomAbygW+mU5cYP3S5cWTfCJjG8xV6CFdzGFlfWK81IjL9viiTvpb6G7gQ==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz", + "integrity": "sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-duplicate-keys": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.17.12.tgz", - "integrity": "sha512-EA5eYFUG6xeerdabina/xIoB95jJ17mAkR8ivx6ZSu9frKShBjpOGZPn511MTDTkiCO+zXnzNczvUM69YSf3Zw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz", + "integrity": "sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-exponentiation-operator": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.16.7.tgz", - "integrity": "sha512-8UYLSlyLgRixQvlYH3J2ekXFHDFLQutdy7FfFAMm3CPZ6q9wHCwnUyiXpQCe3gVVnQlHc5nsuiEVziteRNTXEA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz", + "integrity": "sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw==", "requires": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-flow-strip-types": { @@ -23207,121 +23235,121 @@ } }, "@babel/plugin-transform-for-of": { - "version": "7.18.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.1.tgz", - "integrity": "sha512-+TTB5XwvJ5hZbO8xvl2H4XaMDOAK57zF4miuC9qQJgysPNEAZZ9Z69rdF5LJkozGdZrjBIUAIyKUWRMmebI7vg==", + "version": "7.18.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.8.tgz", + "integrity": "sha512-yEfTRnjuskWYo0k1mHUqrVWaZwrdq8AYbfrpqULOJOaucGSp4mNMVps+YtA8byoevxS/urwU75vyhQIxcCgiBQ==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-function-name": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.16.7.tgz", - "integrity": "sha512-SU/C68YVwTRxqWj5kgsbKINakGag0KTgq9f2iZEXdStoAbOzLHEBRYzImmA6yFo8YZhJVflvXmIHUO7GWHmxxA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz", + "integrity": "sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ==", "requires": { - "@babel/helper-compilation-targets": "^7.16.7", - "@babel/helper-function-name": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-literals": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.17.12.tgz", - "integrity": "sha512-8iRkvaTjJciWycPIZ9k9duu663FT7VrBdNqNgxnVXEFwOIp55JWcZd23VBRySYbnS3PwQ3rGiabJBBBGj5APmQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz", + "integrity": "sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-member-expression-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.7.tgz", - "integrity": "sha512-mBruRMbktKQwbxaJof32LT9KLy2f3gH+27a5XSuXo6h7R3vqltl0PgZ80C8ZMKw98Bf8bqt6BEVi3svOh2PzMw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz", + "integrity": "sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-modules-amd": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.18.0.tgz", - "integrity": "sha512-h8FjOlYmdZwl7Xm2Ug4iX2j7Qy63NANI+NQVWQzv6r25fqgg7k2dZl03p95kvqNclglHs4FZ+isv4p1uXMA+QA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.18.6.tgz", + "integrity": "sha512-Pra5aXsmTsOnjM3IajS8rTaLCy++nGM4v3YR4esk5PCsyg9z8NA5oQLwxzMUtDBd8F+UmVza3VxoAaWCbzH1rg==", "requires": { - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-module-transforms": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-commonjs": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.0.tgz", - "integrity": "sha512-cCeR0VZWtfxWS4YueAK2qtHtBPJRSaJcMlbS8jhSIm/A3E2Kpro4W1Dn4cqJtp59dtWfXjQwK7SPKF8ghs7rlw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.6.tgz", + "integrity": "sha512-Qfv2ZOWikpvmedXQJDSbxNqy7Xr/j2Y8/KfijM0iJyKkBTmWuvCA1yeH1yDM7NJhBW/2aXxeucLj6i80/LAJ/Q==", "requires": { - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-simple-access": "^7.17.7", + "@babel/helper-module-transforms": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/helper-simple-access": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-systemjs": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.18.0.tgz", - "integrity": "sha512-vwKpxdHnlM5tIrRt/eA0bzfbi7gUBLN08vLu38np1nZevlPySRe6yvuATJB5F/WPJ+ur4OXwpVYq9+BsxqAQuQ==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.18.9.tgz", + "integrity": "sha512-zY/VSIbbqtoRoJKo2cDTewL364jSlZGvn0LKOf9ntbfxOvjfmyrdtEEOAdswOswhZEb8UH3jDkCKHd1sPgsS0A==", "requires": { - "@babel/helper-hoist-variables": "^7.16.7", - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-validator-identifier": "^7.16.7", + "@babel/helper-hoist-variables": "^7.18.6", + "@babel/helper-module-transforms": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-validator-identifier": "^7.18.6", "babel-plugin-dynamic-import-node": "^2.3.3" } }, "@babel/plugin-transform-modules-umd": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.0.tgz", - "integrity": "sha512-d/zZ8I3BWli1tmROLxXLc9A6YXvGK8egMxHp+E/rRwMh1Kip0AP77VwZae3snEJ33iiWwvNv2+UIIhfalqhzZA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz", + "integrity": "sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ==", "requires": { - "@babel/helper-module-transforms": "^7.18.0", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-module-transforms": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.17.12.tgz", - "integrity": "sha512-vWoWFM5CKaTeHrdUJ/3SIOTRV+MBVGybOC9mhJkaprGNt5demMymDW24yC74avb915/mIRe3TgNb/d8idvnCRA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.18.6.tgz", + "integrity": "sha512-UmEOGF8XgaIqD74bC8g7iV3RYj8lMf0Bw7NJzvnS9qQhM4mg+1WHKotUIdjxgD2RGrgFLZZPCFPFj3P/kVDYhg==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.17.12", - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-new-target": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.17.12.tgz", - "integrity": "sha512-CaOtzk2fDYisbjAD4Sd1MTKGVIpRtx9bWLyj24Y/k6p4s4gQ3CqDGJauFJxt8M/LEx003d0i3klVqnN73qvK3w==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz", + "integrity": "sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-object-super": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.16.7.tgz", - "integrity": "sha512-14J1feiQVWaGvRxj2WjyMuXS2jsBkgB3MdSN5HuC2G5nRspa5RK9COcs82Pwy5BuGcjb+fYaUj94mYcOj7rCvw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz", + "integrity": "sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7", - "@babel/helper-replace-supers": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6", + "@babel/helper-replace-supers": "^7.18.6" } }, "@babel/plugin-transform-parameters": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.17.12.tgz", - "integrity": "sha512-6qW4rWo1cyCdq1FkYri7AHpauchbGLXpdwnYsfxFb+KtddHENfsY5JZb35xUwkK5opOLcJ3BNd2l7PhRYGlwIA==", + "version": "7.18.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.18.8.tgz", + "integrity": "sha512-ivfbE3X2Ss+Fj8nnXvKJS6sjRG4gzwPMsP+taZC+ZzEGjAYlvENixmt1sZ5Ca6tWls+BlKSGKPJ6OOXvXCbkFg==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-property-literals": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.16.7.tgz", - "integrity": "sha512-z4FGr9NMGdoIl1RqavCqGG+ZuYjfZ/hkCIeuH6Do7tXmSm0ls11nYVSJqFEUOSJbDab5wC6lRE/w6YjVcr6Hqw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz", + "integrity": "sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-react-display-name": { @@ -23365,20 +23393,20 @@ } }, "@babel/plugin-transform-regenerator": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.0.tgz", - "integrity": "sha512-C8YdRw9uzx25HSIzwA7EM7YP0FhCe5wNvJbZzjVNHHPGVcDJ3Aie+qGYYdS1oVQgn+B3eAIJbWFLrJ4Jipv7nw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.6.tgz", + "integrity": "sha512-poqRI2+qiSdeldcz4wTSTXBRryoq3Gc70ye7m7UD5Ww0nE29IXqMl6r7Nd15WBgRd74vloEMlShtH6CKxVzfmQ==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", + "@babel/helper-plugin-utils": "^7.18.6", "regenerator-transform": "^0.15.0" } }, "@babel/plugin-transform-reserved-words": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.17.12.tgz", - "integrity": "sha512-1KYqwbJV3Co03NIi14uEHW8P50Md6KqFgt0FfpHdK6oyAHQVTosgPuPSiWud1HX0oYJ1hGRRlk0fP87jFpqXZA==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz", + "integrity": "sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-runtime": { @@ -23395,44 +23423,44 @@ } }, "@babel/plugin-transform-shorthand-properties": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.16.7.tgz", - "integrity": "sha512-hah2+FEnoRoATdIb05IOXf+4GzXYTq75TVhIn1PewihbpyrNWUt2JbudKQOETWw6QpLe+AIUpJ5MVLYTQbeeUg==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz", + "integrity": "sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-spread": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.17.12.tgz", - "integrity": "sha512-9pgmuQAtFi3lpNUstvG9nGfk9DkrdmWNp9KeKPFmuZCpEnxRzYlS8JgwPjYj+1AWDOSvoGN0H30p1cBOmT/Svg==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.18.9.tgz", + "integrity": "sha512-39Q814wyoOPtIB/qGopNIL9xDChOE1pNU0ZY5dO0owhiVt/5kFm4li+/bBtwc7QotG0u5EPzqhZdjMtmqBqyQA==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-skip-transparent-expression-wrappers": "^7.16.0" + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.18.9" } }, "@babel/plugin-transform-sticky-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.16.7.tgz", - "integrity": "sha512-NJa0Bd/87QV5NZZzTuZG5BPJjLYadeSZ9fO6oOUoL4iQx+9EEuw/eEM92SrsT19Yc2jgB1u1hsjqDtH02c3Drw==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz", + "integrity": "sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/plugin-transform-template-literals": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.17.12.tgz", - "integrity": "sha512-kAKJ7DX1dSRa2s7WN1xUAuaQmkTpN+uig4wCKWivVXIObqGbVTUlSavHyfI2iZvz89GFAMGm9p2DBJ4Y1Tp0hw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz", + "integrity": "sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-typeof-symbol": { - "version": "7.17.12", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.17.12.tgz", - "integrity": "sha512-Q8y+Jp7ZdtSPXCThB6zjQ74N3lj0f6TDh1Hnf5B+sYlzQ8i5Pjp8gW0My79iekSpT4WnI06blqP6DT0OmaXXmw==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz", + "integrity": "sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw==", "requires": { - "@babel/helper-plugin-utils": "^7.17.12" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-typescript": { @@ -23446,54 +23474,54 @@ } }, "@babel/plugin-transform-unicode-escapes": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.16.7.tgz", - "integrity": "sha512-TAV5IGahIz3yZ9/Hfv35TV2xEm+kaBDaZQCn2S/hG9/CZ0DktxJv9eKfPc7yYCvOYR4JGx1h8C+jcSOvgaaI/Q==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz", + "integrity": "sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ==", "requires": { - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-plugin-utils": "^7.18.9" } }, "@babel/plugin-transform-unicode-regex": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.16.7.tgz", - "integrity": "sha512-oC5tYYKw56HO75KZVLQ+R/Nl3Hro9kf8iG0hXoaHP7tjAyCpvqBiSNe6vGrZni1Z6MggmUOC6A7VP7AVmw225Q==", + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz", + "integrity": "sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.16.7", - "@babel/helper-plugin-utils": "^7.16.7" + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" } }, "@babel/preset-env": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.18.0.tgz", - "integrity": "sha512-cP74OMs7ECLPeG1reiCQ/D/ypyOxgfm8uR6HRYV23vTJ7Lu1nbgj9DQDo/vH59gnn7GOAwtTDPPYV4aXzsMKHA==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.18.10.tgz", + "integrity": "sha512-wVxs1yjFdW3Z/XkNfXKoblxoHgbtUF7/l3PvvP4m02Qz9TZ6uZGxRVYjSQeR87oQmHco9zWitW5J82DJ7sCjvA==", "requires": { - "@babel/compat-data": "^7.17.10", - "@babel/helper-compilation-targets": "^7.17.10", - "@babel/helper-plugin-utils": "^7.17.12", - "@babel/helper-validator-option": "^7.16.7", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.17.12", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.17.12", - "@babel/plugin-proposal-async-generator-functions": "^7.17.12", - "@babel/plugin-proposal-class-properties": "^7.17.12", - "@babel/plugin-proposal-class-static-block": "^7.18.0", - "@babel/plugin-proposal-dynamic-import": "^7.16.7", - "@babel/plugin-proposal-export-namespace-from": "^7.17.12", - "@babel/plugin-proposal-json-strings": "^7.17.12", - "@babel/plugin-proposal-logical-assignment-operators": "^7.17.12", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.17.12", - "@babel/plugin-proposal-numeric-separator": "^7.16.7", - "@babel/plugin-proposal-object-rest-spread": "^7.18.0", - "@babel/plugin-proposal-optional-catch-binding": "^7.16.7", - "@babel/plugin-proposal-optional-chaining": "^7.17.12", - "@babel/plugin-proposal-private-methods": "^7.17.12", - "@babel/plugin-proposal-private-property-in-object": "^7.17.12", - "@babel/plugin-proposal-unicode-property-regex": "^7.17.12", + "@babel/compat-data": "^7.18.8", + "@babel/helper-compilation-targets": "^7.18.9", + "@babel/helper-plugin-utils": "^7.18.9", + "@babel/helper-validator-option": "^7.18.6", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.18.6", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.18.9", + "@babel/plugin-proposal-async-generator-functions": "^7.18.10", + "@babel/plugin-proposal-class-properties": "^7.18.6", + "@babel/plugin-proposal-class-static-block": "^7.18.6", + "@babel/plugin-proposal-dynamic-import": "^7.18.6", + "@babel/plugin-proposal-export-namespace-from": "^7.18.9", + "@babel/plugin-proposal-json-strings": "^7.18.6", + "@babel/plugin-proposal-logical-assignment-operators": "^7.18.9", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6", + "@babel/plugin-proposal-numeric-separator": "^7.18.6", + "@babel/plugin-proposal-object-rest-spread": "^7.18.9", + "@babel/plugin-proposal-optional-catch-binding": "^7.18.6", + "@babel/plugin-proposal-optional-chaining": "^7.18.9", + "@babel/plugin-proposal-private-methods": "^7.18.6", + "@babel/plugin-proposal-private-property-in-object": "^7.18.6", + "@babel/plugin-proposal-unicode-property-regex": "^7.18.6", "@babel/plugin-syntax-async-generators": "^7.8.4", "@babel/plugin-syntax-class-properties": "^7.12.13", "@babel/plugin-syntax-class-static-block": "^7.14.5", "@babel/plugin-syntax-dynamic-import": "^7.8.3", "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.17.12", + "@babel/plugin-syntax-import-assertions": "^7.18.6", "@babel/plugin-syntax-json-strings": "^7.8.3", "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", @@ -23503,45 +23531,55 @@ "@babel/plugin-syntax-optional-chaining": "^7.8.3", "@babel/plugin-syntax-private-property-in-object": "^7.14.5", "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.17.12", - "@babel/plugin-transform-async-to-generator": "^7.17.12", - "@babel/plugin-transform-block-scoped-functions": "^7.16.7", - "@babel/plugin-transform-block-scoping": "^7.17.12", - "@babel/plugin-transform-classes": "^7.17.12", - "@babel/plugin-transform-computed-properties": "^7.17.12", - "@babel/plugin-transform-destructuring": "^7.18.0", - "@babel/plugin-transform-dotall-regex": "^7.16.7", - "@babel/plugin-transform-duplicate-keys": "^7.17.12", - "@babel/plugin-transform-exponentiation-operator": "^7.16.7", - "@babel/plugin-transform-for-of": "^7.17.12", - "@babel/plugin-transform-function-name": "^7.16.7", - "@babel/plugin-transform-literals": "^7.17.12", - "@babel/plugin-transform-member-expression-literals": "^7.16.7", - "@babel/plugin-transform-modules-amd": "^7.18.0", - "@babel/plugin-transform-modules-commonjs": "^7.18.0", - "@babel/plugin-transform-modules-systemjs": "^7.18.0", - "@babel/plugin-transform-modules-umd": "^7.18.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.17.12", - "@babel/plugin-transform-new-target": "^7.17.12", - "@babel/plugin-transform-object-super": "^7.16.7", - "@babel/plugin-transform-parameters": "^7.17.12", - "@babel/plugin-transform-property-literals": "^7.16.7", - "@babel/plugin-transform-regenerator": "^7.18.0", - "@babel/plugin-transform-reserved-words": "^7.17.12", - "@babel/plugin-transform-shorthand-properties": "^7.16.7", - "@babel/plugin-transform-spread": "^7.17.12", - "@babel/plugin-transform-sticky-regex": "^7.16.7", - "@babel/plugin-transform-template-literals": "^7.17.12", - "@babel/plugin-transform-typeof-symbol": "^7.17.12", - "@babel/plugin-transform-unicode-escapes": "^7.16.7", - "@babel/plugin-transform-unicode-regex": "^7.16.7", + "@babel/plugin-transform-arrow-functions": "^7.18.6", + "@babel/plugin-transform-async-to-generator": "^7.18.6", + "@babel/plugin-transform-block-scoped-functions": "^7.18.6", + "@babel/plugin-transform-block-scoping": "^7.18.9", + "@babel/plugin-transform-classes": "^7.18.9", + "@babel/plugin-transform-computed-properties": "^7.18.9", + "@babel/plugin-transform-destructuring": "^7.18.9", + "@babel/plugin-transform-dotall-regex": "^7.18.6", + "@babel/plugin-transform-duplicate-keys": "^7.18.9", + "@babel/plugin-transform-exponentiation-operator": "^7.18.6", + "@babel/plugin-transform-for-of": "^7.18.8", + "@babel/plugin-transform-function-name": "^7.18.9", + "@babel/plugin-transform-literals": "^7.18.9", + "@babel/plugin-transform-member-expression-literals": "^7.18.6", + "@babel/plugin-transform-modules-amd": "^7.18.6", + "@babel/plugin-transform-modules-commonjs": "^7.18.6", + "@babel/plugin-transform-modules-systemjs": "^7.18.9", + "@babel/plugin-transform-modules-umd": "^7.18.6", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.18.6", + "@babel/plugin-transform-new-target": "^7.18.6", + "@babel/plugin-transform-object-super": "^7.18.6", + "@babel/plugin-transform-parameters": "^7.18.8", + "@babel/plugin-transform-property-literals": "^7.18.6", + "@babel/plugin-transform-regenerator": "^7.18.6", + "@babel/plugin-transform-reserved-words": "^7.18.6", + "@babel/plugin-transform-shorthand-properties": "^7.18.6", + "@babel/plugin-transform-spread": "^7.18.9", + "@babel/plugin-transform-sticky-regex": "^7.18.6", + "@babel/plugin-transform-template-literals": "^7.18.9", + "@babel/plugin-transform-typeof-symbol": "^7.18.9", + "@babel/plugin-transform-unicode-escapes": "^7.18.10", + "@babel/plugin-transform-unicode-regex": "^7.18.6", "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.18.0", - "babel-plugin-polyfill-corejs2": "^0.3.0", - "babel-plugin-polyfill-corejs3": "^0.5.0", - "babel-plugin-polyfill-regenerator": "^0.3.0", + "@babel/types": "^7.18.10", + "babel-plugin-polyfill-corejs2": "^0.3.2", + "babel-plugin-polyfill-corejs3": "^0.5.3", + "babel-plugin-polyfill-regenerator": "^0.4.0", "core-js-compat": "^3.22.1", "semver": "^6.3.0" + }, + "dependencies": { + "babel-plugin-polyfill-regenerator": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.0.tgz", + "integrity": "sha512-RW1cnryiADFeHmfLS+WW/G431p1PsW5qdRdz0SDRi7TKcUgc7Oh/uXkT7MZ/+tGsT1BkczEAmD5XjUyJ5SWDTw==", + "requires": { + "@babel/helper-define-polyfill-provider": "^0.3.2" + } + } } }, "@babel/preset-flow": { @@ -23578,9 +23616,9 @@ } }, "@babel/register": { - "version": "7.17.7", - "resolved": "https://registry.npmjs.org/@babel/register/-/register-7.17.7.tgz", - "integrity": "sha512-fg56SwvXRifootQEDQAu1mKdjh5uthPzdO0N6t358FktfL4XjAVXuH58ULoiW8mesxiOgNIrxiImqEwv0+hRRA==", + "version": "7.18.9", + "resolved": "https://registry.npmjs.org/@babel/register/-/register-7.18.9.tgz", + "integrity": "sha512-ZlbnXDcNYHMR25ITwwNKT88JiaukkdVj/nG7r3wnuXkOTHc60Uy05PwMCPre0hSkY68E6zK3xz+vUJSP2jWmcw==", "requires": { "clone-deep": "^4.0.1", "find-cache-dir": "^2.0.0", @@ -23598,36 +23636,36 @@ } }, "@babel/template": { - "version": "7.16.7", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.16.7.tgz", - "integrity": "sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w==", + "version": "7.18.10", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.18.10.tgz", + "integrity": "sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA==", "requires": { - "@babel/code-frame": "^7.16.7", - "@babel/parser": "^7.16.7", - "@babel/types": "^7.16.7" + "@babel/code-frame": "^7.18.6", + "@babel/parser": "^7.18.10", + "@babel/types": "^7.18.10" } }, "@babel/traverse": { - "version": "7.18.0", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.18.0.tgz", - "integrity": "sha512-oNOO4vaoIQoGjDQ84LgtF/IAlxlyqL4TUuoQ7xLkQETFaHkY1F7yazhB4Kt3VcZGL0ZF/jhrEpnXqUb0M7V3sw==", - "requires": { - "@babel/code-frame": "^7.16.7", - "@babel/generator": "^7.18.0", - "@babel/helper-environment-visitor": "^7.16.7", - "@babel/helper-function-name": "^7.17.9", - "@babel/helper-hoist-variables": "^7.16.7", - "@babel/helper-split-export-declaration": "^7.16.7", - "@babel/parser": "^7.18.0", - "@babel/types": "^7.18.0", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.18.13.tgz", + "integrity": "sha512-N6kt9X1jRMLPxxxPYWi7tgvJRH/rtoU+dbKAPDM44RFHiMH8igdsaSBgFeskhSl/kLWLDUvIh1RXCrTmg0/zvA==", + "requires": { + "@babel/code-frame": "^7.18.6", + "@babel/generator": "^7.18.13", + "@babel/helper-environment-visitor": "^7.18.9", + "@babel/helper-function-name": "^7.18.9", + "@babel/helper-hoist-variables": "^7.18.6", + "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/parser": "^7.18.13", + "@babel/types": "^7.18.13", "debug": "^4.1.0", "globals": "^11.1.0" } }, "@babel/types": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.18.10.tgz", - "integrity": "sha512-MJvnbEiiNkpjo+LknnmRrqbY1GPUUggjv+wQVjetM/AONoupqRALB7I6jGqNUAZsKcRIEu2J6FRFvsczljjsaQ==", + "version": "7.18.13", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.18.13.tgz", + "integrity": "sha512-ePqfTihzW0W6XAU+aMw2ykilisStJfDnsejDCXRchCcMJ4O0+8DhPXf2YUbZ6wjBlsEmZwLK/sPweWtu8hcJYQ==", "requires": { "@babel/helper-string-parser": "^7.18.10", "@babel/helper-validator-identifier": "^7.18.6", @@ -23642,16 +23680,17 @@ "optional": true }, "@commitlint/cli": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/cli/-/cli-17.0.0.tgz", - "integrity": "sha512-Np6slCdVVG1XwMvwbZrXIzS1INPAD5QmN4L6al04AmCd4nAPU63gxgxC5Mz0Fmx7va23Uvb0S7yEFV1JPhvPUQ==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/cli/-/cli-17.0.3.tgz", + "integrity": "sha512-oAo2vi5d8QZnAbtU5+0cR2j+A7PO8zuccux65R/EycwvsZrDVyW518FFrnJK2UQxbRtHFFIG+NjQ6vOiJV0Q8A==", "dev": true, "requires": { "@commitlint/format": "^17.0.0", - "@commitlint/lint": "^17.0.0", - "@commitlint/load": "^17.0.0", + "@commitlint/lint": "^17.0.3", + "@commitlint/load": "^17.0.3", "@commitlint/read": "^17.0.0", "@commitlint/types": "^17.0.0", + "execa": "^5.0.0", "lodash": "^4.17.19", "resolve-from": "5.0.0", "resolve-global": "1.0.0", @@ -23668,13 +23707,33 @@ } }, "@commitlint/config-validator": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/config-validator/-/config-validator-17.0.0.tgz", - "integrity": "sha512-78IQjoZWR4kDHp/U5y17euEWzswJpPkA9TDL5F6oZZZaLIEreWzrDZD5PWtM8MsSRl/K2LDU/UrzYju2bKLMpA==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/config-validator/-/config-validator-17.0.3.tgz", + "integrity": "sha512-3tLRPQJKapksGE7Kee9axv+9z5I2GDHitDH4q63q7NmNA0wkB+DAorJ0RHz2/K00Zb1/MVdHzhCga34FJvDihQ==", "dev": true, "requires": { "@commitlint/types": "^17.0.0", - "ajv": "^6.12.6" + "ajv": "^8.11.0" + }, + "dependencies": { + "ajv": { + "version": "8.11.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", + "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "dev": true, + "requires": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + } + }, + "json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + } } }, "@commitlint/ensure": { @@ -23755,9 +23814,9 @@ } }, "@commitlint/is-ignored": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/is-ignored/-/is-ignored-17.0.0.tgz", - "integrity": "sha512-UmacD0XM/wWykgdXn5CEWVS4XGuqzU+ZGvM2hwv85+SXGnIOaG88XHrt81u37ZeVt1riWW+YdOxcJW6+nd5v5w==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/is-ignored/-/is-ignored-17.0.3.tgz", + "integrity": "sha512-/wgCXAvPtFTQZxsVxj7owLeRf5wwzcXLaYmrZPR4a87iD4sCvUIRl1/ogYrtOyUmHwWfQsvjqIB4mWE/SqWSnA==", "dev": true, "requires": { "@commitlint/types": "^17.0.0", @@ -23791,26 +23850,26 @@ } }, "@commitlint/lint": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/lint/-/lint-17.0.0.tgz", - "integrity": "sha512-5FL7VLvGJQby24q0pd4UdM8FNFcL+ER1T/UBf8A9KRL5+QXV1Rkl6Zhcl7+SGpGlVo6Yo0pm6aLW716LVKWLGg==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/lint/-/lint-17.0.3.tgz", + "integrity": "sha512-2o1fk7JUdxBUgszyt41sHC/8Nd5PXNpkmuOo9jvGIjDHzOwXyV0PSdbEVTH3xGz9NEmjohFHr5l+N+T9fcxong==", "dev": true, "requires": { - "@commitlint/is-ignored": "^17.0.0", + "@commitlint/is-ignored": "^17.0.3", "@commitlint/parse": "^17.0.0", "@commitlint/rules": "^17.0.0", "@commitlint/types": "^17.0.0" } }, "@commitlint/load": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/load/-/load-17.0.0.tgz", - "integrity": "sha512-XaiHF4yWQOPAI0O6wXvk+NYLtJn/Xb7jgZEeKd4C1ZWd7vR7u8z5h0PkWxSr0uLZGQsElGxv3fiZ32C5+q6M8w==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/load/-/load-17.0.3.tgz", + "integrity": "sha512-3Dhvr7GcKbKa/ey4QJ5MZH3+J7QFlARohUow6hftQyNjzoXXROm+RwpBes4dDFrXG1xDw9QPXA7uzrOShCd4bw==", "dev": true, "requires": { - "@commitlint/config-validator": "^17.0.0", + "@commitlint/config-validator": "^17.0.3", "@commitlint/execute-rule": "^17.0.0", - "@commitlint/resolve-extends": "^17.0.0", + "@commitlint/resolve-extends": "^17.0.3", "@commitlint/types": "^17.0.0", "@types/node": ">=12", "chalk": "^4.1.0", @@ -23902,12 +23961,12 @@ } }, "@commitlint/resolve-extends": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/resolve-extends/-/resolve-extends-17.0.0.tgz", - "integrity": "sha512-wi60WiJmwaQ7lzMXK8Vbc18Hq9tE2j/6iv2AFfPUGV7fvfY6Sf1iNKuUHirSqR0fquUyufIXe4y/K9A6LVIIvw==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/resolve-extends/-/resolve-extends-17.0.3.tgz", + "integrity": "sha512-H/RFMvrcBeJCMdnVC4i8I94108UDccIHrTke2tyQEg9nXQnR5/Hd6MhyNWkREvcrxh9Y+33JLb+PiPiaBxCtBA==", "dev": true, "requires": { - "@commitlint/config-validator": "^17.0.0", + "@commitlint/config-validator": "^17.0.3", "@commitlint/types": "^17.0.0", "import-fresh": "^3.0.0", "lodash": "^4.17.19", @@ -23944,12 +24003,12 @@ } }, "@commitlint/travis-cli": { - "version": "17.0.0", - "resolved": "https://registry.npmjs.org/@commitlint/travis-cli/-/travis-cli-17.0.0.tgz", - "integrity": "sha512-0SBUjEQAHeeIakuyo1Rm0YgEtDXY0qFZYpKWgNmRqZl/QfsUddm7nz5/9pYXxbOpSbSNUpuiPJOV+dPTBVF5bg==", + "version": "17.0.3", + "resolved": "https://registry.npmjs.org/@commitlint/travis-cli/-/travis-cli-17.0.3.tgz", + "integrity": "sha512-A2JUrh4kLxvm9UsG6LjUWlWbYJAZKM6oHUkBIRttrX/u/vnMuNkzzGvAj/Dso/C51l4TovkbFKz7hQyMgfBGRw==", "dev": true, "requires": { - "@commitlint/cli": "^17.0.0", + "@commitlint/cli": "^17.0.3", "execa": "^5.0.0" } }, @@ -24013,19 +24072,25 @@ } } }, - "@cspotcode/source-map-consumer": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@cspotcode/source-map-consumer/-/source-map-consumer-0.8.0.tgz", - "integrity": "sha512-41qniHzTU8yAGbCp04ohlmSrZf8bkf/iJsl3V0dRGsQN/5GFfx+LbCSsCpp2gqrqjTVg/K6O8ycoV35JIwAzAg==", - "dev": true - }, "@cspotcode/source-map-support": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.7.0.tgz", - "integrity": "sha512-X4xqRHqN8ACt2aHVe51OxeA2HjbcL4MqFqXkrmQszJ1NOUuUu5u6Vqx/0lZSVNku7velL5FC/s5uEAj1lsBMhA==", + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", "dev": true, "requires": { - "@cspotcode/source-map-consumer": "0.8.0" + "@jridgewell/trace-mapping": "0.3.9" + }, + "dependencies": { + "@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "requires": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + } } }, "@eslint/eslintrc": { @@ -24372,11 +24437,11 @@ } }, "@jridgewell/gen-mapping": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.1.tgz", - "integrity": "sha512-GcHwniMlA2z+WFPWuY8lp3fsza0I8xPFMWL5+n8LYyP6PSvPrXf4+n8stDHZY2DM0zy9sVkRDy1jDI4XGzYVqg==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", + "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", "requires": { - "@jridgewell/set-array": "^1.0.0", + "@jridgewell/set-array": "^1.0.1", "@jridgewell/sourcemap-codec": "^1.4.10", "@jridgewell/trace-mapping": "^0.3.9" } @@ -27057,21 +27122,21 @@ } }, "babel-plugin-polyfill-corejs2": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.1.tgz", - "integrity": "sha512-v7/T6EQcNfVLfcN2X8Lulb7DjprieyLWJK/zOWH5DUYcAgex9sP3h25Q+DLsX9TloXe3y1O8l2q2Jv9q8UVB9w==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.2.tgz", + "integrity": "sha512-LPnodUl3lS0/4wN3Rb+m+UK8s7lj2jcLRrjho4gLw+OJs+I4bvGXshINesY5xx/apM+biTnQ9reDI8yj+0M5+Q==", "requires": { - "@babel/compat-data": "^7.13.11", - "@babel/helper-define-polyfill-provider": "^0.3.1", + "@babel/compat-data": "^7.17.7", + "@babel/helper-define-polyfill-provider": "^0.3.2", "semver": "^6.1.1" } }, "babel-plugin-polyfill-corejs3": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz", - "integrity": "sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ==", + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.3.tgz", + "integrity": "sha512-zKsXDh0XjnrUEW0mxIHLfjBfnXSMr5Q/goMe/fxpQnLm07mcOZiIZHBNWCMx60HmdvjxfXcalac0tfFg0wqxyw==", "requires": { - "@babel/helper-define-polyfill-provider": "^0.3.1", + "@babel/helper-define-polyfill-provider": "^0.3.2", "core-js-compat": "^3.21.0" } }, @@ -28106,13 +28171,13 @@ } }, "cosmiconfig-typescript-loader": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/cosmiconfig-typescript-loader/-/cosmiconfig-typescript-loader-2.0.0.tgz", - "integrity": "sha512-2NlGul/E3vTQEANqPziqkA01vfiuUU8vT0jZAuUIjEW8u3eCcnCQWLggapCjhbF76s7KQF0fM0kXSKmzaDaG1g==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/cosmiconfig-typescript-loader/-/cosmiconfig-typescript-loader-2.0.2.tgz", + "integrity": "sha512-KmE+bMjWMXJbkWCeY4FJX/npHuZPNr9XF9q9CIQ/bpFwi1qHfCmSiKarrCcRa0LO4fWjk93pVoeRtJAkTGcYNw==", "dev": true, "requires": { "cosmiconfig": "^7", - "ts-node": "^10.7.0" + "ts-node": "^10.8.1" } }, "create-require": { @@ -28970,13 +29035,13 @@ } }, "eslint-plugin-mocha": { - "version": "10.0.4", - "resolved": "https://registry.npmjs.org/eslint-plugin-mocha/-/eslint-plugin-mocha-10.0.4.tgz", - "integrity": "sha512-8wzAeepVY027oBHz/TmBmUr7vhVqoC1KTFeDybFLhbaWKx+aQ7fJJVuUsqcUy+L+G+XvgQBJY+cbAf7hl5DF7Q==", + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-mocha/-/eslint-plugin-mocha-10.1.0.tgz", + "integrity": "sha512-xLqqWUF17llsogVOC+8C6/jvQ+4IoOREbN7ZCHuOHuD6cT5cDD4h7f2LgsZuzMAiwswWE21tO7ExaknHVDrSkw==", "dev": true, "requires": { "eslint-utils": "^3.0.0", - "ramda": "^0.28.0" + "rambda": "^7.1.0" } }, "eslint-plugin-prettier": { @@ -29927,7 +29992,7 @@ "global-dirs": { "version": "0.1.1", "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-0.1.1.tgz", - "integrity": "sha1-sxnA3UYH81PzvpzKTHL8FIxJ9EU=", + "integrity": "sha512-NknMLn7F2J7aflwFOlGdNIuCDpN3VGoSoB+aap3KABFWbHVn1TCgFC+np23J8W2BiZbjfEw3BFBycSMv1AFblg==", "dev": true, "requires": { "ini": "^1.3.4" @@ -36584,10 +36649,10 @@ "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", "dev": true }, - "ramda": { - "version": "0.28.0", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.28.0.tgz", - "integrity": "sha512-9QnLuG/kPVgWvMQ4aODhsBUFKOUmnbUnsSXACv+NCQZcHbeb+v8Lodp8OVxtRULN1/xOyYLLaL6npE6dMq5QTA==", + "rambda": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/rambda/-/rambda-7.2.1.tgz", + "integrity": "sha512-Wswj8ZvzdI3VhaGPkZAxaCTwuMmGtgWt7Zxsgyo4P+iTmVnkojvyWaOep5q3ZjMIecW0wtQa66GWxaKkZ24RAA==", "dev": true }, "randombytes": { @@ -37034,9 +37099,9 @@ "dev": true }, "regexpu-core": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.0.1.tgz", - "integrity": "sha512-CriEZlrKK9VJw/xQGJpQM5rY88BtuL8DM+AEwvcThHilbxiTAy8vq4iJnd2tqq8wLmjbGZzP7ZcKFjbGkmEFrw==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.1.0.tgz", + "integrity": "sha512-bb6hk+xWd2PEOkj5It46A16zFMs2mv86Iwpdu94la4S3sJ7C973h2dHpYKwIBGaWSO7cIRJ+UX0IeMaWcO4qwA==", "requires": { "regenerate": "^1.4.2", "regenerate-unicode-properties": "^10.0.1", @@ -37071,7 +37136,7 @@ "jsesc": { "version": "0.5.0", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0=" + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==" } } }, @@ -38715,12 +38780,12 @@ } }, "ts-node": { - "version": "10.7.0", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.7.0.tgz", - "integrity": "sha512-TbIGS4xgJoX2i3do417KSaep1uRAW/Lu+WAL2doDHC0D6ummjirVOXU5/7aiZotbQ5p1Zp9tP7U6cYhA0O7M8A==", + "version": "10.9.1", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz", + "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==", "dev": true, "requires": { - "@cspotcode/source-map-support": "0.7.0", + "@cspotcode/source-map-support": "^0.8.0", "@tsconfig/node10": "^1.0.7", "@tsconfig/node12": "^1.0.7", "@tsconfig/node14": "^1.0.0", @@ -38731,7 +38796,7 @@ "create-require": "^1.1.0", "diff": "^4.0.1", "make-error": "^1.1.1", - "v8-compile-cache-lib": "^3.0.0", + "v8-compile-cache-lib": "^3.0.1", "yn": "3.1.1" }, "dependencies": { @@ -39126,9 +39191,9 @@ "dev": true }, "v8-compile-cache-lib": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.0.tgz", - "integrity": "sha512-mpSYqfsFvASnSn5qMiwrr4VKfumbPyONLCOPmsR3A6pTY/r0+tSaVbgPWSAIuzbk3lCTa+FForeTiO+wBQGkjA==", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", "dev": true }, "validate-npm-package-license": { @@ -39483,9 +39548,9 @@ }, "dependencies": { "yargs-parser": { - "version": "21.0.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.0.1.tgz", - "integrity": "sha512-9BK1jFpLzJROCI5TzwZL/TU4gqjK5xiHV/RfWLOahrjAko/e4DJkRDZQXfvqAsiZzzYhgAzbgz6lg48jcm4GLg==", + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", "dev": true } } diff --git a/web3.js/src/account-data.ts b/web3.js/src/account-data.ts new file mode 100644 index 0000000000..a61d695c64 --- /dev/null +++ b/web3.js/src/account-data.ts @@ -0,0 +1,39 @@ +import * as BufferLayout from '@solana/buffer-layout'; + +export interface IAccountStateData { + readonly typeIndex: number; +} + +/** + * @internal + */ +export type AccountType = { + /** The account type index (from solana upstream program) */ + index: number; + /** The BufferLayout to use to build data */ + layout: BufferLayout.Layout; +}; + +/** + * Decode account data buffer using an AccountType + * @internal + */ +export function decodeData( + type: AccountType, + data: Uint8Array, +): TAccountStateData { + let decoded: TAccountStateData; + try { + decoded = type.layout.decode(data); + } catch (err) { + throw new Error('invalid instruction; ' + err); + } + + if (decoded.typeIndex !== type.index) { + throw new Error( + `invalid account data; account type mismatch ${decoded.typeIndex} != ${type.index}`, + ); + } + + return decoded; +} diff --git a/web3.js/src/account.ts b/web3.js/src/account.ts index 13370e7835..66abe02f05 100644 --- a/web3.js/src/account.ts +++ b/web3.js/src/account.ts @@ -2,7 +2,7 @@ import nacl from 'tweetnacl'; import type {SignKeyPair as KeyPair} from 'tweetnacl'; import type {Buffer} from 'buffer'; -import {toBuffer} from './util/to-buffer'; +import {toBuffer} from './utils/to-buffer'; import {PublicKey} from './publickey'; /** diff --git a/web3.js/src/connection.ts b/web3.js/src/connection.ts index 8c3e286ab9..0fb2e5a9b1 100644 --- a/web3.js/src/connection.ts +++ b/web3.js/src/connection.ts @@ -24,7 +24,6 @@ import type {Struct} from 'superstruct'; import {Client as RpcWebSocketClient} from 'rpc-websockets'; import RpcClient from 'jayson/lib/client/browser'; -import {URL} from './util/url-impl'; import {AgentManager} from './agent-manager'; import {EpochSchedule} from './epoch-schedule'; import {SendTransactionError, SolanaJSONRPCError} from './errors'; @@ -35,14 +34,16 @@ import {Signer} from './keypair'; import {MS_PER_SLOT} from './timing'; import {Transaction, TransactionStatus} from './transaction'; import {Message} from './message'; -import assert from './util/assert'; -import {sleep} from './util/sleep'; -import {toBuffer} from './util/to-buffer'; +import {AddressLookupTableAccount} from './programs/address-lookup-table/state'; +import assert from './utils/assert'; +import {sleep} from './utils/sleep'; +import {toBuffer} from './utils/to-buffer'; import { TransactionExpiredBlockheightExceededError, TransactionExpiredTimeoutError, -} from './util/tx-expiry-custom-errors'; -import {makeWebsocketUrl} from './util/makeWebsocketUrl'; +} from './transaction/expiry-custom-errors'; +import {makeWebsocketUrl} from './utils/makeWebsocketUrl'; +import {URL} from './utils/url-impl'; import type {Blockhash} from './blockhash'; import type {FeeCalculator} from './fee-calculator'; import type {TransactionSignature} from './transaction'; @@ -804,6 +805,14 @@ export type TokenBalance = { */ export type ParsedConfirmedTransactionMeta = ParsedTransactionMeta; +/** + * Collection of addresses loaded by a transaction using address table lookups + */ +export type LoadedAddresses = { + writable: Array; + readonly: Array; +}; + /** * Metadata for a parsed transaction on the ledger */ @@ -824,6 +833,8 @@ export type ParsedTransactionMeta = { postTokenBalances?: Array | null; /** The error result of transaction processing */ err: TransactionError | null; + /** The collection of addresses loaded using address lookup tables */ + loadedAddresses?: LoadedAddresses; }; export type CompiledInnerInstruction = { @@ -874,6 +885,8 @@ export type TransactionResponse = { /** * A confirmed transaction on the ledger + * + * @deprecated Deprecated since Solana v1.8.0. */ export type ConfirmedTransaction = { /** The slot during which the transaction was processed */ @@ -1003,7 +1016,9 @@ export type BlockResponse = { }; /** - * A ConfirmedBlock on the ledger + * A confirmed block on the ledger + * + * @deprecated Deprecated since Solana v1.8.0. */ export type ConfirmedBlock = { /** Blockhash of this block */ @@ -1794,6 +1809,11 @@ const TokenBalanceResult = pick({ uiTokenAmount: TokenAmountResult, }); +const LoadedAddressesResult = pick({ + writable: array(PublicKeyFromString), + readonly: array(PublicKeyFromString), +}); + /** * @internal */ @@ -1821,6 +1841,7 @@ const ConfirmedTransactionMetaResult = pick({ logMessages: optional(nullable(array(string()))), preTokenBalances: optional(nullable(array(TokenBalanceResult))), postTokenBalances: optional(nullable(array(TokenBalanceResult))), + loadedAddresses: optional(LoadedAddressesResult), }); /** @@ -1844,6 +1865,7 @@ const ParsedConfirmedTransactionMetaResult = pick({ logMessages: optional(nullable(array(string()))), preTokenBalances: optional(nullable(array(TokenBalanceResult))), postTokenBalances: optional(nullable(array(TokenBalanceResult))), + loadedAddresses: optional(LoadedAddressesResult), }); /** @@ -2850,14 +2872,17 @@ export class Connection { */ async getParsedAccountInfo( publicKey: PublicKey, - commitment?: Commitment, + commitmentOrConfig?: Commitment | GetAccountInfoConfig, ): Promise< RpcResponseAndContext | null> > { + const {commitment, config} = + extractCommitmentFromConfig(commitmentOrConfig); const args = this._buildArgs( [publicKey.toBase58()], commitment, 'jsonParsed', + config, ); const unsafeRes = await this._rpcRequest('getAccountInfo', args); const res = create( @@ -4194,6 +4219,29 @@ export class Connection { return res.result; } + async getAddressLookupTable( + accountKey: PublicKey, + config?: GetAccountInfoConfig, + ): Promise> { + const {context, value: accountInfo} = await this.getAccountInfoAndContext( + accountKey, + config, + ); + + let value = null; + if (accountInfo !== null) { + value = new AddressLookupTableAccount({ + key: accountKey, + state: AddressLookupTableAccount.deserialize(accountInfo.data), + }); + } + + return { + context, + value, + }; + } + /** * Fetch the contents of a Nonce account from the cluster, return with context */ diff --git a/web3.js/src/fee-calculator.ts b/web3.js/src/fee-calculator.ts index f4caacf7d9..eca886ac33 100644 --- a/web3.js/src/fee-calculator.ts +++ b/web3.js/src/fee-calculator.ts @@ -9,6 +9,8 @@ export const FeeCalculatorLayout = BufferLayout.nu64('lamportsPerSignature'); /** * Calculator for transaction fees. + * + * @deprecated Deprecated since Solana v1.8.0. */ export interface FeeCalculator { /** Cost in lamports to validate a signature. */ diff --git a/web3.js/src/index.ts b/web3.js/src/index.ts index 0d6c382a90..5a0f705710 100644 --- a/web3.js/src/index.ts +++ b/web3.js/src/index.ts @@ -1,33 +1,22 @@ export * from './account'; -export * from './address-lookup-table-program'; export * from './blockhash'; export * from './bpf-loader-deprecated'; export * from './bpf-loader'; -export * from './compute-budget'; export * from './connection'; export * from './epoch-schedule'; -export * from './ed25519-program'; +export * from './errors'; export * from './fee-calculator'; export * from './keypair'; export * from './loader'; export * from './message'; export * from './nonce-account'; +export * from './programs'; export * from './publickey'; -export * from './stake-program'; -export * from './system-program'; -export * from './secp256k1-program'; export * from './transaction'; -export * from './transaction-constants'; export * from './validator-info'; export * from './vote-account'; -export * from './vote-program'; export * from './sysvar'; -export * from './errors'; -export * from './util/borsh-schema'; -export * from './util/send-and-confirm-transaction'; -export * from './util/send-and-confirm-raw-transaction'; -export * from './util/tx-expiry-custom-errors'; -export * from './util/cluster'; +export * from './utils'; /** * There are 1-billion lamports in one SOL diff --git a/web3.js/src/loader.ts b/web3.js/src/loader.ts index 9412f0f6c4..df4a212e15 100644 --- a/web3.js/src/loader.ts +++ b/web3.js/src/loader.ts @@ -2,15 +2,14 @@ import {Buffer} from 'buffer'; import * as BufferLayout from '@solana/buffer-layout'; import {PublicKey} from './publickey'; -import {Transaction} from './transaction'; +import {Transaction, PACKET_DATA_SIZE} from './transaction'; import {SYSVAR_RENT_PUBKEY} from './sysvar'; -import {sendAndConfirmTransaction} from './util/send-and-confirm-transaction'; -import {sleep} from './util/sleep'; +import {sendAndConfirmTransaction} from './utils/send-and-confirm-transaction'; +import {sleep} from './utils/sleep'; import type {Connection} from './connection'; import type {Signer} from './keypair'; -import {SystemProgram} from './system-program'; +import {SystemProgram} from './programs/system'; import {IInstructionInputData} from './instruction'; -import {PACKET_DATA_SIZE} from './transaction-constants'; // Keep program chunks under PACKET_DATA_SIZE, leaving enough room for the // rest of the Transaction fields diff --git a/web3.js/src/message/index.ts b/web3.js/src/message/index.ts new file mode 100644 index 0000000000..23a8ae60ad --- /dev/null +++ b/web3.js/src/message/index.ts @@ -0,0 +1,32 @@ +export * from './legacy'; + +/** + * The message header, identifying signed and read-only account + */ +export type MessageHeader = { + /** + * The number of signatures required for this message to be considered valid. The + * signatures must match the first `numRequiredSignatures` of `accountKeys`. + */ + numRequiredSignatures: number; + /** The last `numReadonlySignedAccounts` of the signed keys are read-only accounts */ + numReadonlySignedAccounts: number; + /** The last `numReadonlySignedAccounts` of the unsigned keys are read-only accounts */ + numReadonlyUnsignedAccounts: number; +}; + +/** + * An instruction to execute by a program + * + * @property {number} programIdIndex + * @property {number[]} accounts + * @property {string} data + */ +export type CompiledInstruction = { + /** Index into the transaction keys array indicating the program account that executes this instruction */ + programIdIndex: number; + /** Ordered indices into the transaction keys array indicating which accounts to pass to the program */ + accounts: number[]; + /** The program input data encoded as base 58 */ + data: string; +}; diff --git a/web3.js/src/message.ts b/web3.js/src/message/legacy.ts similarity index 81% rename from web3.js/src/message.ts rename to web3.js/src/message/legacy.ts index 6e86867c70..ff6ea9a000 100644 --- a/web3.js/src/message.ts +++ b/web3.js/src/message/legacy.ts @@ -2,43 +2,13 @@ import bs58 from 'bs58'; import {Buffer} from 'buffer'; import * as BufferLayout from '@solana/buffer-layout'; -import {PublicKey} from './publickey'; -import type {Blockhash} from './blockhash'; -import * as Layout from './layout'; -import {PACKET_DATA_SIZE} from './transaction-constants'; -import * as shortvec from './util/shortvec-encoding'; -import {toBuffer} from './util/to-buffer'; - -/** - * The message header, identifying signed and read-only account - */ -export type MessageHeader = { - /** - * The number of signatures required for this message to be considered valid. The - * signatures must match the first `numRequiredSignatures` of `accountKeys`. - */ - numRequiredSignatures: number; - /** The last `numReadonlySignedAccounts` of the signed keys are read-only accounts */ - numReadonlySignedAccounts: number; - /** The last `numReadonlySignedAccounts` of the unsigned keys are read-only accounts */ - numReadonlyUnsignedAccounts: number; -}; - -/** - * An instruction to execute by a program - * - * @property {number} programIdIndex - * @property {number[]} accounts - * @property {string} data - */ -export type CompiledInstruction = { - /** Index into the transaction keys array indicating the program account that executes this instruction */ - programIdIndex: number; - /** Ordered indices into the transaction keys array indicating which accounts to pass to the program */ - accounts: number[]; - /** The program input data encoded as base 58 */ - data: string; -}; +import {PublicKey, PUBLIC_KEY_LENGTH} from '../publickey'; +import type {Blockhash} from '../blockhash'; +import * as Layout from '../layout'; +import {PACKET_DATA_SIZE} from '../transaction/constants'; +import * as shortvec from '../utils/shortvec-encoding'; +import {toBuffer} from '../utils/to-buffer'; +import {CompiledInstruction, MessageHeader} from './index'; /** * Message constructor arguments @@ -54,8 +24,6 @@ export type MessageArgs = { instructions: CompiledInstruction[]; }; -const PUBKEY_LENGTH = 32; - /** * List of instructions to be processed atomically */ @@ -229,13 +197,13 @@ export class Message { const accountCount = shortvec.decodeLength(byteArray); let accountKeys = []; for (let i = 0; i < accountCount; i++) { - const account = byteArray.slice(0, PUBKEY_LENGTH); - byteArray = byteArray.slice(PUBKEY_LENGTH); + const account = byteArray.slice(0, PUBLIC_KEY_LENGTH); + byteArray = byteArray.slice(PUBLIC_KEY_LENGTH); accountKeys.push(bs58.encode(Buffer.from(account))); } - const recentBlockhash = byteArray.slice(0, PUBKEY_LENGTH); - byteArray = byteArray.slice(PUBKEY_LENGTH); + const recentBlockhash = byteArray.slice(0, PUBLIC_KEY_LENGTH); + byteArray = byteArray.slice(PUBLIC_KEY_LENGTH); const instructionCount = shortvec.decodeLength(byteArray); let instructions: CompiledInstruction[] = []; diff --git a/web3.js/src/nonce-account.ts b/web3.js/src/nonce-account.ts index e3c7f41ff3..f7b5cb625b 100644 --- a/web3.js/src/nonce-account.ts +++ b/web3.js/src/nonce-account.ts @@ -6,7 +6,7 @@ import * as Layout from './layout'; import {PublicKey} from './publickey'; import type {FeeCalculator} from './fee-calculator'; import {FeeCalculatorLayout} from './fee-calculator'; -import {toBuffer} from './util/to-buffer'; +import {toBuffer} from './utils/to-buffer'; /** * See https://github.com/solana-labs/solana/blob/0ea2843ec9cdc517572b8e62c959f41b55cf4453/sdk/src/nonce_state.rs#L29-L32 diff --git a/web3.js/src/address-lookup-table-program.ts b/web3.js/src/programs/address-lookup-table/index.ts similarity index 96% rename from web3.js/src/address-lookup-table-program.ts rename to web3.js/src/programs/address-lookup-table/index.ts index b245161a5e..da752ddb4f 100644 --- a/web3.js/src/address-lookup-table-program.ts +++ b/web3.js/src/programs/address-lookup-table/index.ts @@ -1,12 +1,14 @@ import {toBufferLE} from 'bigint-buffer'; import * as BufferLayout from '@solana/buffer-layout'; -import * as Layout from './layout'; -import {PublicKey} from './publickey'; -import * as bigintLayout from './util/bigint'; -import {SystemProgram} from './system-program'; -import {TransactionInstruction} from './transaction'; -import {decodeData, encodeData, IInstructionInputData} from './instruction'; +import * as Layout from '../../layout'; +import {PublicKey} from '../../publickey'; +import * as bigintLayout from '../../utils/bigint'; +import {SystemProgram} from '../system'; +import {TransactionInstruction} from '../../transaction'; +import {decodeData, encodeData, IInstructionInputData} from '../../instruction'; + +export * from './state'; export type CreateLookupTableParams = { /** Account used to derive and control the new address lookup table. */ diff --git a/web3.js/src/programs/address-lookup-table/state.ts b/web3.js/src/programs/address-lookup-table/state.ts new file mode 100644 index 0000000000..6f4432b25f --- /dev/null +++ b/web3.js/src/programs/address-lookup-table/state.ts @@ -0,0 +1,84 @@ +import * as BufferLayout from '@solana/buffer-layout'; + +import assert from '../../utils/assert'; +import * as Layout from '../../layout'; +import {PublicKey} from '../../publickey'; +import {u64} from '../../utils/bigint'; +import {decodeData} from '../../account-data'; + +export type AddressLookupTableState = { + deactivationSlot: bigint; + lastExtendedSlot: number; + lastExtendedSlotStartIndex: number; + authority?: PublicKey; + addresses: Array; +}; + +export type AddressLookupTableAccountArgs = { + key: PublicKey; + state: AddressLookupTableState; +}; + +/// The serialized size of lookup table metadata +const LOOKUP_TABLE_META_SIZE = 56; + +export class AddressLookupTableAccount { + key: PublicKey; + state: AddressLookupTableState; + + constructor(args: AddressLookupTableAccountArgs) { + this.key = args.key; + this.state = args.state; + } + + isActive(): boolean { + const U64_MAX = 2n ** 64n - 1n; + return this.state.deactivationSlot === U64_MAX; + } + + static deserialize(accountData: Uint8Array): AddressLookupTableState { + const meta = decodeData(LookupTableMetaLayout, accountData); + + const serializedAddressesLen = accountData.length - LOOKUP_TABLE_META_SIZE; + assert(serializedAddressesLen >= 0, 'lookup table is invalid'); + assert(serializedAddressesLen % 32 === 0, 'lookup table is invalid'); + + const numSerializedAddresses = serializedAddressesLen / 32; + const {addresses} = BufferLayout.struct<{addresses: Array}>([ + BufferLayout.seq(Layout.publicKey(), numSerializedAddresses, 'addresses'), + ]).decode(accountData.slice(LOOKUP_TABLE_META_SIZE)); + + return { + deactivationSlot: meta.deactivationSlot, + lastExtendedSlot: meta.lastExtendedSlot, + lastExtendedSlotStartIndex: meta.lastExtendedStartIndex, + authority: + meta.authority.length !== 0 + ? new PublicKey(meta.authority[0]) + : undefined, + addresses: addresses.map(address => new PublicKey(address)), + }; + } +} + +const LookupTableMetaLayout = { + index: 1, + layout: BufferLayout.struct<{ + typeIndex: number; + deactivationSlot: bigint; + lastExtendedSlot: number; + lastExtendedStartIndex: number; + authority: Array; + }>([ + BufferLayout.u32('typeIndex'), + u64('deactivationSlot'), + BufferLayout.nu64('lastExtendedSlot'), + BufferLayout.u8('lastExtendedStartIndex'), + BufferLayout.u8(), // option + BufferLayout.seq( + Layout.publicKey(), + BufferLayout.offset(BufferLayout.u8(), -1), + 'authority', + ), + ]), +}; diff --git a/web3.js/src/compute-budget.ts b/web3.js/src/programs/compute-budget.ts similarity index 97% rename from web3.js/src/compute-budget.ts rename to web3.js/src/programs/compute-budget.ts index bd4ab3d632..ebf37daa10 100644 --- a/web3.js/src/compute-budget.ts +++ b/web3.js/src/programs/compute-budget.ts @@ -5,10 +5,10 @@ import { decodeData, InstructionType, IInstructionInputData, -} from './instruction'; -import {PublicKey} from './publickey'; -import {TransactionInstruction} from './transaction'; -import {u64} from './util/bigint'; +} from '../instruction'; +import {PublicKey} from '../publickey'; +import {TransactionInstruction} from '../transaction'; +import {u64} from '../utils/bigint'; /** * Compute Budget Instruction class diff --git a/web3.js/src/ed25519-program.ts b/web3.js/src/programs/ed25519.ts similarity index 96% rename from web3.js/src/ed25519-program.ts rename to web3.js/src/programs/ed25519.ts index 4e201a8319..8d31a21b70 100644 --- a/web3.js/src/ed25519-program.ts +++ b/web3.js/src/programs/ed25519.ts @@ -2,10 +2,10 @@ import {Buffer} from 'buffer'; import * as BufferLayout from '@solana/buffer-layout'; import nacl from 'tweetnacl'; -import {Keypair} from './keypair'; -import {PublicKey} from './publickey'; -import {TransactionInstruction} from './transaction'; -import assert from './util/assert'; +import {Keypair} from '../keypair'; +import {PublicKey} from '../publickey'; +import {TransactionInstruction} from '../transaction'; +import assert from '../utils/assert'; const PRIVATE_KEY_BYTES = 64; const PUBLIC_KEY_BYTES = 32; diff --git a/web3.js/src/programs/index.ts b/web3.js/src/programs/index.ts new file mode 100644 index 0000000000..f2dc21fcb2 --- /dev/null +++ b/web3.js/src/programs/index.ts @@ -0,0 +1,7 @@ +export * from './address-lookup-table'; +export * from './compute-budget'; +export * from './ed25519'; +export * from './secp256k1'; +export * from './stake'; +export * from './system'; +export * from './vote'; diff --git a/web3.js/src/secp256k1-program.ts b/web3.js/src/programs/secp256k1.ts similarity index 97% rename from web3.js/src/secp256k1-program.ts rename to web3.js/src/programs/secp256k1.ts index 0df5e40b41..b3a358d188 100644 --- a/web3.js/src/secp256k1-program.ts +++ b/web3.js/src/programs/secp256k1.ts @@ -3,10 +3,10 @@ import * as BufferLayout from '@solana/buffer-layout'; import secp256k1 from 'secp256k1'; import sha3 from 'js-sha3'; -import {PublicKey} from './publickey'; -import {TransactionInstruction} from './transaction'; -import assert from './util/assert'; -import {toBuffer} from './util/to-buffer'; +import {PublicKey} from '../publickey'; +import {TransactionInstruction} from '../transaction'; +import assert from '../utils/assert'; +import {toBuffer} from '../utils/to-buffer'; const {publicKeyCreate, ecdsaSign} = secp256k1; diff --git a/web3.js/src/stake-program.ts b/web3.js/src/programs/stake.ts similarity index 98% rename from web3.js/src/stake-program.ts rename to web3.js/src/programs/stake.ts index cbfe850d12..587491b1af 100644 --- a/web3.js/src/stake-program.ts +++ b/web3.js/src/programs/stake.ts @@ -5,17 +5,17 @@ import { decodeData, InstructionType, IInstructionInputData, -} from './instruction'; -import * as Layout from './layout'; -import {PublicKey} from './publickey'; -import {SystemProgram} from './system-program'; +} from '../instruction'; +import * as Layout from '../layout'; +import {PublicKey} from '../publickey'; +import {SystemProgram} from './system'; import { SYSVAR_CLOCK_PUBKEY, SYSVAR_RENT_PUBKEY, SYSVAR_STAKE_HISTORY_PUBKEY, -} from './sysvar'; -import {Transaction, TransactionInstruction} from './transaction'; -import {toBuffer} from './util/to-buffer'; +} from '../sysvar'; +import {Transaction, TransactionInstruction} from '../transaction'; +import {toBuffer} from '../utils/to-buffer'; /** * Address of the stake config account which configures the rate diff --git a/web3.js/src/system-program.ts b/web3.js/src/programs/system.ts similarity index 98% rename from web3.js/src/system-program.ts rename to web3.js/src/programs/system.ts index 89e0b4c82b..e833664ea3 100644 --- a/web3.js/src/system-program.ts +++ b/web3.js/src/programs/system.ts @@ -5,14 +5,14 @@ import { decodeData, InstructionType, IInstructionInputData, -} from './instruction'; -import * as Layout from './layout'; -import {NONCE_ACCOUNT_LENGTH} from './nonce-account'; -import {PublicKey} from './publickey'; -import {SYSVAR_RECENT_BLOCKHASHES_PUBKEY, SYSVAR_RENT_PUBKEY} from './sysvar'; -import {Transaction, TransactionInstruction} from './transaction'; -import {toBuffer} from './util/to-buffer'; -import {u64} from './util/bigint'; +} from '../instruction'; +import * as Layout from '../layout'; +import {NONCE_ACCOUNT_LENGTH} from '../nonce-account'; +import {PublicKey} from '../publickey'; +import {SYSVAR_RECENT_BLOCKHASHES_PUBKEY, SYSVAR_RENT_PUBKEY} from '../sysvar'; +import {Transaction, TransactionInstruction} from '../transaction'; +import {toBuffer} from '../utils/to-buffer'; +import {u64} from '../utils/bigint'; /** * Create account system transaction params diff --git a/web3.js/src/vote-program.ts b/web3.js/src/programs/vote.ts similarity index 90% rename from web3.js/src/vote-program.ts rename to web3.js/src/programs/vote.ts index 3e33d0ba68..db1a111da9 100644 --- a/web3.js/src/vote-program.ts +++ b/web3.js/src/programs/vote.ts @@ -5,13 +5,13 @@ import { decodeData, InstructionType, IInstructionInputData, -} from './instruction'; -import * as Layout from './layout'; -import {PublicKey} from './publickey'; -import {SystemProgram} from './system-program'; -import {SYSVAR_CLOCK_PUBKEY, SYSVAR_RENT_PUBKEY} from './sysvar'; -import {Transaction, TransactionInstruction} from './transaction'; -import {toBuffer} from './util/to-buffer'; +} from '../instruction'; +import * as Layout from '../layout'; +import {PublicKey} from '../publickey'; +import {SystemProgram} from './system'; +import {SYSVAR_CLOCK_PUBKEY, SYSVAR_RENT_PUBKEY} from '../sysvar'; +import {Transaction, TransactionInstruction} from '../transaction'; +import {toBuffer} from '../utils/to-buffer'; /** * Vote account info @@ -410,4 +410,25 @@ export class VoteProgram { data, }); } + + /** + * Generate a transaction to withdraw safely from a Vote account. + * + * This function was created as a safeguard for vote accounts running validators, `safeWithdraw` + * checks that the withdraw amount will not exceed the specified balance while leaving enough left + * to cover rent. If you wish to close the vote account by withdrawing the full amount, call the + * `withdraw` method directly. + */ + static safeWithdraw( + params: WithdrawFromVoteAccountParams, + currentVoteAccountBalance: number, + rentExemptMinimum: number, + ): Transaction { + if (params.lamports > currentVoteAccountBalance - rentExemptMinimum) { + throw new Error( + 'Withdraw will leave vote account with insuffcient funds.', + ); + } + return VoteProgram.withdraw(params); + } } diff --git a/web3.js/src/publickey.ts b/web3.js/src/publickey.ts index 89b9680263..b50ebd1832 100644 --- a/web3.js/src/publickey.ts +++ b/web3.js/src/publickey.ts @@ -4,14 +4,19 @@ import {Buffer} from 'buffer'; import nacl from 'tweetnacl'; import {sha256} from '@ethersproject/sha2'; -import {Struct, SOLANA_SCHEMA} from './util/borsh-schema'; -import {toBuffer} from './util/to-buffer'; +import {Struct, SOLANA_SCHEMA} from './utils/borsh-schema'; +import {toBuffer} from './utils/to-buffer'; /** * Maximum length of derived pubkey seed */ export const MAX_SEED_LENGTH = 32; +/** + * Size of public key in bytes + */ +export const PUBLIC_KEY_LENGTH = 32; + /** * Value to be converted into public key */ @@ -54,7 +59,7 @@ export class PublicKey extends Struct { if (typeof value === 'string') { // assume base 58 encoding by default const decoded = bs58.decode(value); - if (decoded.length != 32) { + if (decoded.length != PUBLIC_KEY_LENGTH) { throw new Error(`Invalid public key input`); } this._bn = new BN(decoded); @@ -103,7 +108,7 @@ export class PublicKey extends Struct { */ toBuffer(): Buffer { const b = this._bn.toArrayLike(Buffer); - if (b.length === 32) { + if (b.length === PUBLIC_KEY_LENGTH) { return b; } diff --git a/web3.js/src/transaction-constants.ts b/web3.js/src/transaction/constants.ts similarity index 100% rename from web3.js/src/transaction-constants.ts rename to web3.js/src/transaction/constants.ts diff --git a/web3.js/src/util/tx-expiry-custom-errors.ts b/web3.js/src/transaction/expiry-custom-errors.ts similarity index 100% rename from web3.js/src/util/tx-expiry-custom-errors.ts rename to web3.js/src/transaction/expiry-custom-errors.ts diff --git a/web3.js/src/transaction/index.ts b/web3.js/src/transaction/index.ts new file mode 100644 index 0000000000..ed913f5124 --- /dev/null +++ b/web3.js/src/transaction/index.ts @@ -0,0 +1,3 @@ +export * from './constants'; +export * from './expiry-custom-errors'; +export * from './legacy'; diff --git a/web3.js/src/transaction.ts b/web3.js/src/transaction/legacy.ts similarity index 97% rename from web3.js/src/transaction.ts rename to web3.js/src/transaction/legacy.ts index 686dd587a3..f5e030099d 100644 --- a/web3.js/src/transaction.ts +++ b/web3.js/src/transaction/legacy.ts @@ -2,19 +2,16 @@ import nacl from 'tweetnacl'; import bs58 from 'bs58'; import {Buffer} from 'buffer'; -import { - PACKET_DATA_SIZE, - SIGNATURE_LENGTH_IN_BYTES, -} from './transaction-constants'; -import {Connection} from './connection'; -import {Message} from './message'; -import {PublicKey} from './publickey'; -import * as shortvec from './util/shortvec-encoding'; -import {toBuffer} from './util/to-buffer'; -import invariant from './util/assert'; -import type {Signer} from './keypair'; -import type {Blockhash} from './blockhash'; -import type {CompiledInstruction} from './message'; +import {PACKET_DATA_SIZE, SIGNATURE_LENGTH_IN_BYTES} from './constants'; +import {Connection} from '../connection'; +import {Message} from '../message'; +import {PublicKey} from '../publickey'; +import * as shortvec from '../utils/shortvec-encoding'; +import {toBuffer} from '../utils/to-buffer'; +import invariant from '../utils/assert'; +import type {Signer} from '../keypair'; +import type {Blockhash} from '../blockhash'; +import type {CompiledInstruction} from '../message'; /** * Transaction signature as base-58 encoded string diff --git a/web3.js/src/util/__forks__/react-native/url-impl.ts b/web3.js/src/utils/__forks__/react-native/url-impl.ts similarity index 100% rename from web3.js/src/util/__forks__/react-native/url-impl.ts rename to web3.js/src/utils/__forks__/react-native/url-impl.ts diff --git a/web3.js/src/util/assert.ts b/web3.js/src/utils/assert.ts similarity index 100% rename from web3.js/src/util/assert.ts rename to web3.js/src/utils/assert.ts diff --git a/web3.js/src/util/bigint.ts b/web3.js/src/utils/bigint.ts similarity index 100% rename from web3.js/src/util/bigint.ts rename to web3.js/src/utils/bigint.ts diff --git a/web3.js/src/util/borsh-schema.ts b/web3.js/src/utils/borsh-schema.ts similarity index 100% rename from web3.js/src/util/borsh-schema.ts rename to web3.js/src/utils/borsh-schema.ts diff --git a/web3.js/src/util/cluster.ts b/web3.js/src/utils/cluster.ts similarity index 100% rename from web3.js/src/util/cluster.ts rename to web3.js/src/utils/cluster.ts diff --git a/web3.js/src/utils/index.ts b/web3.js/src/utils/index.ts new file mode 100644 index 0000000000..0c28bd829b --- /dev/null +++ b/web3.js/src/utils/index.ts @@ -0,0 +1,4 @@ +export * from './borsh-schema'; +export * from './cluster'; +export * from './send-and-confirm-raw-transaction'; +export * from './send-and-confirm-transaction'; diff --git a/web3.js/src/util/makeWebsocketUrl.ts b/web3.js/src/utils/makeWebsocketUrl.ts similarity index 100% rename from web3.js/src/util/makeWebsocketUrl.ts rename to web3.js/src/utils/makeWebsocketUrl.ts diff --git a/web3.js/src/util/promise-timeout.ts b/web3.js/src/utils/promise-timeout.ts similarity index 100% rename from web3.js/src/util/promise-timeout.ts rename to web3.js/src/utils/promise-timeout.ts diff --git a/web3.js/src/util/send-and-confirm-raw-transaction.ts b/web3.js/src/utils/send-and-confirm-raw-transaction.ts similarity index 100% rename from web3.js/src/util/send-and-confirm-raw-transaction.ts rename to web3.js/src/utils/send-and-confirm-raw-transaction.ts diff --git a/web3.js/src/util/send-and-confirm-transaction.ts b/web3.js/src/utils/send-and-confirm-transaction.ts similarity index 100% rename from web3.js/src/util/send-and-confirm-transaction.ts rename to web3.js/src/utils/send-and-confirm-transaction.ts diff --git a/web3.js/src/util/shortvec-encoding.ts b/web3.js/src/utils/shortvec-encoding.ts similarity index 100% rename from web3.js/src/util/shortvec-encoding.ts rename to web3.js/src/utils/shortvec-encoding.ts diff --git a/web3.js/src/util/sleep.ts b/web3.js/src/utils/sleep.ts similarity index 100% rename from web3.js/src/util/sleep.ts rename to web3.js/src/utils/sleep.ts diff --git a/web3.js/src/util/to-buffer.ts b/web3.js/src/utils/to-buffer.ts similarity index 100% rename from web3.js/src/util/to-buffer.ts rename to web3.js/src/utils/to-buffer.ts diff --git a/web3.js/src/util/url-impl.ts b/web3.js/src/utils/url-impl.ts similarity index 100% rename from web3.js/src/util/url-impl.ts rename to web3.js/src/utils/url-impl.ts diff --git a/web3.js/src/validator-info.ts b/web3.js/src/validator-info.ts index 3d70980fc7..7a1e0a868d 100644 --- a/web3.js/src/validator-info.ts +++ b/web3.js/src/validator-info.ts @@ -7,8 +7,8 @@ import { } from 'superstruct'; import * as Layout from './layout'; -import * as shortvec from './util/shortvec-encoding'; -import {PublicKey} from './publickey'; +import * as shortvec from './utils/shortvec-encoding'; +import {PublicKey, PUBLIC_KEY_LENGTH} from './publickey'; export const VALIDATOR_INFO_KEY = new PublicKey( 'Va1idator1nfo111111111111111111111111111111', @@ -77,16 +77,14 @@ export class ValidatorInfo { static fromConfigData( buffer: Buffer | Uint8Array | Array, ): ValidatorInfo | null { - const PUBKEY_LENGTH = 32; - let byteArray = [...buffer]; const configKeyCount = shortvec.decodeLength(byteArray); if (configKeyCount !== 2) return null; const configKeys: Array = []; for (let i = 0; i < 2; i++) { - const publicKey = new PublicKey(byteArray.slice(0, PUBKEY_LENGTH)); - byteArray = byteArray.slice(PUBKEY_LENGTH); + const publicKey = new PublicKey(byteArray.slice(0, PUBLIC_KEY_LENGTH)); + byteArray = byteArray.slice(PUBLIC_KEY_LENGTH); const isSigner = byteArray.slice(0, 1)[0] === 1; byteArray = byteArray.slice(1); configKeys.push({publicKey, isSigner}); diff --git a/web3.js/src/vote-account.ts b/web3.js/src/vote-account.ts index 0a824d43b7..e764379550 100644 --- a/web3.js/src/vote-account.ts +++ b/web3.js/src/vote-account.ts @@ -3,7 +3,7 @@ import type {Buffer} from 'buffer'; import * as Layout from './layout'; import {PublicKey} from './publickey'; -import {toBuffer} from './util/to-buffer'; +import {toBuffer} from './utils/to-buffer'; export const VOTE_PROGRAM_ID = new PublicKey( 'Vote111111111111111111111111111111111111111', diff --git a/web3.js/test/agent-manager.test.ts b/web3.js/test/agent-manager.test.ts index 071144458c..929296a3be 100644 --- a/web3.js/test/agent-manager.test.ts +++ b/web3.js/test/agent-manager.test.ts @@ -1,7 +1,7 @@ import {expect} from 'chai'; import {AgentManager, DESTROY_TIMEOUT_MS} from '../src/agent-manager'; -import {sleep} from '../src/util/sleep'; +import {sleep} from '../src/utils/sleep'; describe('AgentManager', () => { it('works', async () => { diff --git a/web3.js/test/cluster.test.ts b/web3.js/test/cluster.test.ts index 4e1b81edbc..8acf6c3707 100644 --- a/web3.js/test/cluster.test.ts +++ b/web3.js/test/cluster.test.ts @@ -1,6 +1,6 @@ import {expect} from 'chai'; -import {clusterApiUrl} from '../src/util/cluster'; +import {clusterApiUrl} from '../src/utils/cluster'; describe('Cluster Util', () => { it('invalid', () => { diff --git a/web3.js/test/connection.test.ts b/web3.js/test/connection.test.ts index 7cc7c987ed..10b62f04dd 100644 --- a/web3.js/test/connection.test.ts +++ b/web3.js/test/connection.test.ts @@ -18,8 +18,9 @@ import { sendAndConfirmTransaction, Keypair, Message, + AddressLookupTableProgram, } from '../src'; -import invariant from '../src/util/assert'; +import invariant from '../src/utils/assert'; import {MOCK_PORT, url} from './url'; import { AccountInfo, @@ -35,7 +36,7 @@ import { SignatureResult, SlotInfo, } from '../src/connection'; -import {sleep} from '../src/util/sleep'; +import {sleep} from '../src/utils/sleep'; import { helpers, mockErrorMessage, @@ -49,16 +50,17 @@ import { restoreRpcWebSocket, mockRpcMessage, } from './mocks/rpc-websockets'; -import {TransactionInstruction, TransactionSignature} from '../src/transaction'; +import { + TransactionInstruction, + TransactionSignature, + TransactionExpiredBlockheightExceededError, + TransactionExpiredTimeoutError, +} from '../src/transaction'; import type { SignatureStatus, TransactionError, KeyedAccountInfo, } from '../src/connection'; -import { - TransactionExpiredBlockheightExceededError, - TransactionExpiredTimeoutError, -} from '../src/util/tx-expiry-custom-errors'; use(chaiAsPromised); @@ -4242,5 +4244,90 @@ describe('Connection', function () { const version = await connection.getVersion(); expect(version['solana-core']).to.be.ok; }).timeout(20 * 1000); + + it('getAddressLookupTable', async () => { + const payer = Keypair.generate(); + + await helpers.airdrop({ + connection, + address: payer.publicKey, + amount: LAMPORTS_PER_SOL, + }); + + const lookupTableAddresses = new Array(10) + .fill(0) + .map(() => Keypair.generate().publicKey); + + const recentSlot = await connection.getSlot('finalized'); + const [createIx, lookupTableKey] = + AddressLookupTableProgram.createLookupTable({ + recentSlot, + payer: payer.publicKey, + authority: payer.publicKey, + }); + + // create, extend, and fetch + { + const transaction = new Transaction().add(createIx).add( + AddressLookupTableProgram.extendLookupTable({ + lookupTable: lookupTableKey, + addresses: lookupTableAddresses, + authority: payer.publicKey, + payer: payer.publicKey, + }), + ); + await helpers.processTransaction({ + connection, + transaction, + signers: [payer], + commitment: 'processed', + }); + + const lookupTableResponse = await connection.getAddressLookupTable( + lookupTableKey, + { + commitment: 'processed', + }, + ); + const lookupTableAccount = lookupTableResponse.value; + if (!lookupTableAccount) { + expect(lookupTableAccount).to.be.ok; + return; + } + expect(lookupTableAccount.isActive()).to.be.true; + expect(lookupTableAccount.state.authority).to.eql(payer.publicKey); + expect(lookupTableAccount.state.addresses).to.eql(lookupTableAddresses); + } + + // freeze and fetch + { + const transaction = new Transaction().add( + AddressLookupTableProgram.freezeLookupTable({ + lookupTable: lookupTableKey, + authority: payer.publicKey, + }), + ); + await helpers.processTransaction({ + connection, + transaction, + signers: [payer], + commitment: 'processed', + }); + + const lookupTableResponse = await connection.getAddressLookupTable( + lookupTableKey, + { + commitment: 'processed', + }, + ); + const lookupTableAccount = lookupTableResponse.value; + if (!lookupTableAccount) { + expect(lookupTableAccount).to.be.ok; + return; + } + expect(lookupTableAccount.isActive()).to.be.true; + expect(lookupTableAccount.state.authority).to.be.undefined; + } + }); } }); diff --git a/web3.js/test/mocks/rpc-http.ts b/web3.js/test/mocks/rpc-http.ts index c0735327b2..a56148fec3 100644 --- a/web3.js/test/mocks/rpc-http.ts +++ b/web3.js/test/mocks/rpc-http.ts @@ -4,7 +4,7 @@ import * as mockttp from 'mockttp'; import {mockRpcMessage} from './rpc-websockets'; import {Connection, PublicKey, Transaction, Signer} from '../../src'; -import invariant from '../../src/util/assert'; +import invariant from '../../src/utils/assert'; import type {Commitment, HttpHeaders, RpcParams} from '../../src/connection'; export const mockServer: mockttp.Mockttp | undefined = diff --git a/web3.js/test/address-lookup-table-program.test.ts b/web3.js/test/program-tests/address-lookup-table.test.ts similarity index 98% rename from web3.js/test/address-lookup-table-program.test.ts rename to web3.js/test/program-tests/address-lookup-table.test.ts index 3102f34dde..5b5d486052 100644 --- a/web3.js/test/address-lookup-table-program.test.ts +++ b/web3.js/test/program-tests/address-lookup-table.test.ts @@ -8,10 +8,10 @@ import { AddressLookupTableInstruction, Connection, sendAndConfirmTransaction, -} from '../src'; -import {sleep} from '../src/util/sleep'; -import {helpers} from './mocks/rpc-http'; -import {url} from './url'; +} from '../../src'; +import {sleep} from '../../src/utils/sleep'; +import {helpers} from '../mocks/rpc-http'; +import {url} from '../url'; use(chaiAsPromised); diff --git a/web3.js/test/compute-budget.test.ts b/web3.js/test/program-tests/compute-budget.test.ts similarity index 98% rename from web3.js/test/compute-budget.test.ts rename to web3.js/test/program-tests/compute-budget.test.ts index 0f387e4c22..c3ae529026 100644 --- a/web3.js/test/compute-budget.test.ts +++ b/web3.js/test/program-tests/compute-budget.test.ts @@ -9,9 +9,9 @@ import { ComputeBudgetProgram, ComputeBudgetInstruction, sendAndConfirmTransaction, -} from '../src'; -import {helpers} from './mocks/rpc-http'; -import {url} from './url'; +} from '../../src'; +import {helpers} from '../mocks/rpc-http'; +import {url} from '../url'; use(chaiAsPromised); diff --git a/web3.js/test/ed25519-program.test.ts b/web3.js/test/program-tests/ed25519.test.ts similarity index 96% rename from web3.js/test/ed25519-program.test.ts rename to web3.js/test/program-tests/ed25519.test.ts index a2598f782b..3f4d2c6051 100644 --- a/web3.js/test/ed25519-program.test.ts +++ b/web3.js/test/program-tests/ed25519.test.ts @@ -8,8 +8,8 @@ import { LAMPORTS_PER_SOL, Transaction, Ed25519Program, -} from '../src'; -import {url} from './url'; +} from '../../src'; +import {url} from '../url'; if (process.env.TEST_LIVE) { describe('ed25519', () => { diff --git a/web3.js/test/secp256k1-program.test.ts b/web3.js/test/program-tests/secp256k1.test.ts similarity index 98% rename from web3.js/test/secp256k1-program.test.ts rename to web3.js/test/program-tests/secp256k1.test.ts index 8db59ebc34..b2def5e2dd 100644 --- a/web3.js/test/secp256k1-program.test.ts +++ b/web3.js/test/program-tests/secp256k1.test.ts @@ -9,8 +9,8 @@ import { LAMPORTS_PER_SOL, Transaction, Secp256k1Program, -} from '../src'; -import {url} from './url'; +} from '../../src'; +import {url} from '../url'; const randomPrivateKey = () => { let privateKey; diff --git a/web3.js/test/stake-program.test.ts b/web3.js/test/program-tests/stake.test.ts similarity index 99% rename from web3.js/test/stake-program.test.ts rename to web3.js/test/program-tests/stake.test.ts index 381ac20ff4..15c7625a2b 100644 --- a/web3.js/test/stake-program.test.ts +++ b/web3.js/test/program-tests/stake.test.ts @@ -14,9 +14,9 @@ import { StakeProgram, SystemInstruction, Transaction, -} from '../src'; -import {helpers} from './mocks/rpc-http'; -import {url} from './url'; +} from '../../src'; +import {helpers} from '../mocks/rpc-http'; +import {url} from '../url'; use(chaiAsPromised); diff --git a/web3.js/test/system-program.test.ts b/web3.js/test/program-tests/system.test.ts similarity index 98% rename from web3.js/test/system-program.test.ts rename to web3.js/test/program-tests/system.test.ts index b85344c006..cfdca891b3 100644 --- a/web3.js/test/system-program.test.ts +++ b/web3.js/test/program-tests/system.test.ts @@ -12,11 +12,11 @@ import { TransactionInstruction, sendAndConfirmTransaction, LAMPORTS_PER_SOL, -} from '../src'; -import {NONCE_ACCOUNT_LENGTH} from '../src/nonce-account'; -import {sleep} from '../src/util/sleep'; -import {helpers} from './mocks/rpc-http'; -import {url} from './url'; +} from '../../src'; +import {NONCE_ACCOUNT_LENGTH} from '../../src/nonce-account'; +import {sleep} from '../../src/utils/sleep'; +import {helpers} from '../mocks/rpc-http'; +import {url} from '../url'; describe('SystemProgram', () => { it('createAccount', () => { diff --git a/web3.js/test/vote-program.test.ts b/web3.js/test/program-tests/vote.test.ts similarity index 93% rename from web3.js/test/vote-program.test.ts rename to web3.js/test/program-tests/vote.test.ts index aab52b07f0..6cd349a0c3 100644 --- a/web3.js/test/vote-program.test.ts +++ b/web3.js/test/program-tests/vote.test.ts @@ -11,9 +11,9 @@ import { sendAndConfirmTransaction, SystemInstruction, Connection, -} from '../src'; -import {helpers} from './mocks/rpc-http'; -import {url} from './url'; +} from '../../src'; +import {helpers} from '../mocks/rpc-http'; +import {url} from '../url'; use(chaiAsPromised); @@ -167,6 +167,21 @@ describe('VoteProgram', () => { // Withdraw from Vote account let recipient = Keypair.generate(); + const voteBalance = await connection.getBalance(newVoteAccount.publicKey); + + expect(() => + VoteProgram.safeWithdraw( + { + votePubkey: newVoteAccount.publicKey, + authorizedWithdrawerPubkey: authorized.publicKey, + lamports: voteBalance - minimumAmount + 1, + toPubkey: recipient.publicKey, + }, + voteBalance, + minimumAmount, + ), + ).to.throw('Withdraw will leave vote account with insuffcient funds.'); + let withdraw = VoteProgram.withdraw({ votePubkey: newVoteAccount.publicKey, authorizedWithdrawerPubkey: authorized.publicKey, diff --git a/web3.js/test/shortvec-encoding.test.ts b/web3.js/test/shortvec-encoding.test.ts index b4262d081f..68f8415862 100644 --- a/web3.js/test/shortvec-encoding.test.ts +++ b/web3.js/test/shortvec-encoding.test.ts @@ -1,6 +1,6 @@ import {expect} from 'chai'; -import {decodeLength, encodeLength} from '../src/util/shortvec-encoding'; +import {decodeLength, encodeLength} from '../src/utils/shortvec-encoding'; function checkDecodedArray(array: Array, expectedValue: number) { expect(decodeLength(array)).to.eq(expectedValue); diff --git a/web3.js/test/transaction-payer.test.ts b/web3.js/test/transaction-payer.test.ts index 436d792300..f4127327ce 100644 --- a/web3.js/test/transaction-payer.test.ts +++ b/web3.js/test/transaction-payer.test.ts @@ -8,7 +8,7 @@ import { SystemProgram, LAMPORTS_PER_SOL, } from '../src'; -import invariant from '../src/util/assert'; +import invariant from '../src/utils/assert'; import {MOCK_PORT, url} from './url'; import {helpers, mockRpcResponse, mockServer} from './mocks/rpc-http'; import {stubRpcWebSocket, restoreRpcWebSocket} from './mocks/rpc-websockets'; diff --git a/web3.js/test/transaction.test.ts b/web3.js/test/transaction.test.ts index bcd648bbc7..dd40d1ecf6 100644 --- a/web3.js/test/transaction.test.ts +++ b/web3.js/test/transaction.test.ts @@ -7,11 +7,10 @@ import {Connection} from '../src/connection'; import {Keypair} from '../src/keypair'; import {PublicKey} from '../src/publickey'; import {Transaction, TransactionInstruction} from '../src/transaction'; -import {StakeProgram} from '../src/stake-program'; -import {SystemProgram} from '../src/system-program'; +import {StakeProgram, SystemProgram} from '../src/programs'; import {Message} from '../src/message'; -import invariant from '../src/util/assert'; -import {toBuffer} from '../src/util/to-buffer'; +import invariant from '../src/utils/assert'; +import {toBuffer} from '../src/utils/to-buffer'; import {helpers} from './mocks/rpc-http'; import {url} from './url'; diff --git a/web3.js/test/websocket.test.ts b/web3.js/test/websocket.test.ts index 84aa9018c3..b1d9a1932f 100644 --- a/web3.js/test/websocket.test.ts +++ b/web3.js/test/websocket.test.ts @@ -5,7 +5,7 @@ import chaiAsPromised from 'chai-as-promised'; import {Connection} from '../src'; import {url, wsUrl} from './url'; -import {sleep} from '../src/util/sleep'; +import {sleep} from '../src/utils/sleep'; use(chaiAsPromised); diff --git a/web3.js/yarn.lock b/web3.js/yarn.lock index 09cd3e9a98..3bf5bfd343 100644 --- a/web3.js/yarn.lock +++ b/web3.js/yarn.lock @@ -23,74 +23,74 @@ dependencies: "@babel/highlight" "^7.10.4" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.16.7.tgz" - integrity sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg== +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.16.7", "@babel/code-frame@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.18.6.tgz#3b25d38c89600baa2dcc219edfa88a74eb2c427a" + integrity sha512-TDCmlK5eOvH+eH7cdAFlNXeVJqWIQ7gW9tY1GJIpUtFb6CmjVyq2VM3u71bOyR8CRihcCgMUYoDNyLXao3+70Q== dependencies: - "@babel/highlight" "^7.16.7" + "@babel/highlight" "^7.18.6" -"@babel/compat-data@^7.13.11", "@babel/compat-data@^7.17.10": - version "7.17.10" - resolved "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.17.10.tgz" - integrity sha512-GZt/TCsG70Ms19gfZO1tM4CVnXsPgEPBCpJu+Qz3L0LUDsY5nZqFZglIoPC1kIYOtNBZlrnFT+klg12vFGZXrw== +"@babel/compat-data@^7.17.7", "@babel/compat-data@^7.18.8": + version "7.18.8" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.18.8.tgz#2483f565faca607b8535590e84e7de323f27764d" + integrity sha512-HSmX4WZPPK3FUxYp7g2T6EyO8j96HlZJlxmKPSh6KAcqwyDrfx7hKjXpAW/0FhFfTJsR0Yt4lAjLI2coMptIHQ== "@babel/core@^7.12.13", "@babel/core@^7.7.5": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/core/-/core-7.18.0.tgz" - integrity sha512-Xyw74OlJwDijToNi0+6BBI5mLLR5+5R3bcSH80LXzjzEGEUlvNzujEE71BaD/ApEZHAvFI/Mlmp4M5lIkdeeWw== + version "7.18.13" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.18.13.tgz#9be8c44512751b05094a4d3ab05fc53a47ce00ac" + integrity sha512-ZisbOvRRusFktksHSG6pjj1CSvkPkcZq/KHD45LAkVP/oiHJkNBZWfpvlLmX8OtHDG8IuzsFlVRWo08w7Qxn0A== dependencies: "@ampproject/remapping" "^2.1.0" - "@babel/code-frame" "^7.16.7" - "@babel/generator" "^7.18.0" - "@babel/helper-compilation-targets" "^7.17.10" - "@babel/helper-module-transforms" "^7.18.0" - "@babel/helpers" "^7.18.0" - "@babel/parser" "^7.18.0" - "@babel/template" "^7.16.7" - "@babel/traverse" "^7.18.0" - "@babel/types" "^7.18.0" + "@babel/code-frame" "^7.18.6" + "@babel/generator" "^7.18.13" + "@babel/helper-compilation-targets" "^7.18.9" + "@babel/helper-module-transforms" "^7.18.9" + "@babel/helpers" "^7.18.9" + "@babel/parser" "^7.18.13" + "@babel/template" "^7.18.10" + "@babel/traverse" "^7.18.13" + "@babel/types" "^7.18.13" convert-source-map "^1.7.0" debug "^4.1.0" gensync "^1.0.0-beta.2" json5 "^2.2.1" semver "^6.3.0" -"@babel/generator@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/generator/-/generator-7.18.0.tgz" - integrity sha512-81YO9gGx6voPXlvYdZBliFXAZU8vZ9AZ6z+CjlmcnaeOcYSFbMTpdeDUO9xD9dh/68Vq03I8ZspfUTPfitcDHg== +"@babel/generator@^7.18.13": + version "7.18.13" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.18.13.tgz#59550cbb9ae79b8def15587bdfbaa388c4abf212" + integrity sha512-CkPg8ySSPuHTYPJYo7IRALdqyjM9HCbt/3uOBEFbzyGVP6Mn8bwFPB0jX6982JVNBlYzM1nnPkfjuXSOPtQeEQ== dependencies: - "@babel/types" "^7.18.0" - "@jridgewell/gen-mapping" "^0.3.0" + "@babel/types" "^7.18.13" + "@jridgewell/gen-mapping" "^0.3.2" jsesc "^2.5.1" -"@babel/helper-annotate-as-pure@^7.16.7": +"@babel/helper-annotate-as-pure@^7.16.7", "@babel/helper-annotate-as-pure@^7.18.6": version "7.18.6" resolved "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz" integrity sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA== dependencies: "@babel/types" "^7.18.6" -"@babel/helper-builder-binary-assignment-operator-visitor@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.16.7.tgz" - integrity sha512-C6FdbRaxYjwVu/geKW4ZeQ0Q31AftgRcdSnZ5/jsH6BzCJbtvXvhpfkbkThYSuutZA7nCXpPR6AD9zd1dprMkA== +"@babel/helper-builder-binary-assignment-operator-visitor@^7.18.6": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz#acd4edfd7a566d1d51ea975dff38fd52906981bb" + integrity sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw== dependencies: - "@babel/helper-explode-assignable-expression" "^7.16.7" - "@babel/types" "^7.16.7" + "@babel/helper-explode-assignable-expression" "^7.18.6" + "@babel/types" "^7.18.9" -"@babel/helper-compilation-targets@^7.13.0", "@babel/helper-compilation-targets@^7.16.7", "@babel/helper-compilation-targets@^7.17.10": - version "7.17.10" - resolved "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.17.10.tgz" - integrity sha512-gh3RxjWbauw/dFiU/7whjd0qN9K6nPJMqe6+Er7rOavFh0CQUSwhAE3IcTho2rywPJFxej6TUUHDkWcYI6gGqQ== +"@babel/helper-compilation-targets@^7.13.0", "@babel/helper-compilation-targets@^7.17.7", "@babel/helper-compilation-targets@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.18.9.tgz#69e64f57b524cde3e5ff6cc5a9f4a387ee5563bf" + integrity sha512-tzLCyVmqUiFlcFoAPLA/gL9TeYrF61VLNtb+hvkuVaB5SUjW7jcfrglBIX1vUIoT7CLP3bBlIMeyEsIl2eFQNg== dependencies: - "@babel/compat-data" "^7.17.10" - "@babel/helper-validator-option" "^7.16.7" + "@babel/compat-data" "^7.18.8" + "@babel/helper-validator-option" "^7.18.6" browserslist "^4.20.2" semver "^6.3.0" -"@babel/helper-create-class-features-plugin@^7.16.7", "@babel/helper-create-class-features-plugin@^7.17.12", "@babel/helper-create-class-features-plugin@^7.18.0": +"@babel/helper-create-class-features-plugin@^7.16.7": version "7.18.0" resolved "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.18.0.tgz" integrity sha512-Kh8zTGR9de3J63e5nS0rQUdRs/kbtwoeQQ0sriS0lItjC96u8XXZN6lKpuyWd2coKSU13py/y+LTmThLuVX0Pg== @@ -103,13 +103,26 @@ "@babel/helper-replace-supers" "^7.16.7" "@babel/helper-split-export-declaration" "^7.16.7" -"@babel/helper-create-regexp-features-plugin@^7.16.7", "@babel/helper-create-regexp-features-plugin@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.17.12.tgz" - integrity sha512-b2aZrV4zvutr9AIa6/gA3wsZKRwTKYoDxYiFKcESS3Ug2GTXzwBEvMuuFLhCQpEnRXs1zng4ISAXSUxxKBIcxw== +"@babel/helper-create-class-features-plugin@^7.18.6": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.18.9.tgz#d802ee16a64a9e824fcbf0a2ffc92f19d58550ce" + integrity sha512-WvypNAYaVh23QcjpMR24CwZY2Nz6hqdOcFdPbNpV56hL5H6KiFheO7Xm1aPdlLQ7d5emYZX7VZwPp9x3z+2opw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.18.6" + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-function-name" "^7.18.9" + "@babel/helper-member-expression-to-functions" "^7.18.9" + "@babel/helper-optimise-call-expression" "^7.18.6" + "@babel/helper-replace-supers" "^7.18.9" + "@babel/helper-split-export-declaration" "^7.18.6" + +"@babel/helper-create-regexp-features-plugin@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.18.6.tgz#3e35f4e04acbbf25f1b3534a657610a000543d3c" + integrity sha512-7LcpH1wnQLGrI+4v+nPp+zUvIkF9x0ddv1Hkdue10tg3gmRnLy97DXh4STiOf1qeIInyD69Qv5kKSZzKD8B/7A== dependencies: - "@babel/helper-annotate-as-pure" "^7.16.7" - regexpu-core "^5.0.1" + "@babel/helper-annotate-as-pure" "^7.18.6" + regexpu-core "^5.1.0" "@babel/helper-define-polyfill-provider@^0.3.1": version "0.3.1" @@ -125,6 +138,18 @@ resolve "^1.14.2" semver "^6.1.2" +"@babel/helper-define-polyfill-provider@^0.3.2": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.2.tgz#bd10d0aca18e8ce012755395b05a79f45eca5073" + integrity sha512-r9QJJ+uDWrd+94BSPcP6/de67ygLtvVy6cK4luE6MOuDsZIdoaPBnfSpbO/+LTifjPckbKXRuI9BB/Z2/y3iTg== + dependencies: + "@babel/helper-compilation-targets" "^7.17.7" + "@babel/helper-plugin-utils" "^7.16.7" + debug "^4.1.1" + lodash.debounce "^4.0.8" + resolve "^1.14.2" + semver "^6.1.2" + "@babel/helper-environment-visitor@^7.16.7": version "7.16.7" resolved "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.16.7.tgz" @@ -132,14 +157,19 @@ dependencies: "@babel/types" "^7.16.7" -"@babel/helper-explode-assignable-expression@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.16.7.tgz" - integrity sha512-KyUenhWMC8VrxzkGP0Jizjo4/Zx+1nNZhgocs+gLzyZyB8SHidhoq9KK/8Ato4anhwsivfkBLftky7gvzbZMtQ== +"@babel/helper-environment-visitor@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz#0c0cee9b35d2ca190478756865bb3528422f51be" + integrity sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg== + +"@babel/helper-explode-assignable-expression@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz#41f8228ef0a6f1a036b8dfdfec7ce94f9a6bc096" + integrity sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg== dependencies: - "@babel/types" "^7.16.7" + "@babel/types" "^7.18.6" -"@babel/helper-function-name@^7.16.7", "@babel/helper-function-name@^7.17.9": +"@babel/helper-function-name@^7.17.9": version "7.17.9" resolved "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.17.9.tgz" integrity sha512-7cRisGlVtiVqZ0MW0/yFB4atgpGLWEHUVYnb448hZK4x+vih0YO5UoS11XIYtZYqHd0dIPMdUSv8q5K4LdMnIg== @@ -147,12 +177,20 @@ "@babel/template" "^7.16.7" "@babel/types" "^7.17.0" -"@babel/helper-hoist-variables@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.16.7.tgz" - integrity sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg== +"@babel/helper-function-name@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.18.9.tgz#940e6084a55dee867d33b4e487da2676365e86b0" + integrity sha512-fJgWlZt7nxGksJS9a0XdSaI4XvpExnNIgRP+rVefWh5U7BL8pPuir6SJUmFKRfjWQ51OtWSzwOxhaH/EBWWc0A== dependencies: - "@babel/types" "^7.16.7" + "@babel/template" "^7.18.6" + "@babel/types" "^7.18.9" + +"@babel/helper-hoist-variables@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz#d4d2c8fb4baeaa5c68b99cc8245c56554f926678" + integrity sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q== + dependencies: + "@babel/types" "^7.18.6" "@babel/helper-member-expression-to-functions@^7.16.7", "@babel/helper-member-expression-to-functions@^7.17.7": version "7.17.7" @@ -161,26 +199,33 @@ dependencies: "@babel/types" "^7.17.0" -"@babel/helper-module-imports@^7.10.4", "@babel/helper-module-imports@^7.12.13", "@babel/helper-module-imports@^7.16.7": +"@babel/helper-member-expression-to-functions@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.18.9.tgz#1531661e8375af843ad37ac692c132841e2fd815" + integrity sha512-RxifAh2ZoVU67PyKIO4AMi1wTenGfMR/O/ae0CCRqwgBAt5v7xjdtRw7UoSbsreKrQn5t7r89eruK/9JjYHuDg== + dependencies: + "@babel/types" "^7.18.9" + +"@babel/helper-module-imports@^7.10.4", "@babel/helper-module-imports@^7.12.13", "@babel/helper-module-imports@^7.16.7", "@babel/helper-module-imports@^7.18.6": version "7.18.6" resolved "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.18.6.tgz" integrity sha512-0NFvs3VkuSYbFi1x2Vd6tKrywq+z/cLeYC/RJNFrIX/30Bf5aiGYbtvGXolEktzJH8o5E5KJ3tT+nkxuuZFVlA== dependencies: "@babel/types" "^7.18.6" -"@babel/helper-module-transforms@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.18.0.tgz" - integrity sha512-kclUYSUBIjlvnzN2++K9f2qzYKFgjmnmjwL4zlmU5f8ZtzgWe8s0rUPSTGy2HmK4P8T52MQsS+HTQAgZd3dMEA== +"@babel/helper-module-transforms@^7.18.6", "@babel/helper-module-transforms@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.18.9.tgz#5a1079c005135ed627442df31a42887e80fcb712" + integrity sha512-KYNqY0ICwfv19b31XzvmI/mfcylOzbLtowkw+mfvGPAQ3kfCnMLYbED3YecL5tPd8nAYFQFAd6JHp2LxZk/J1g== dependencies: - "@babel/helper-environment-visitor" "^7.16.7" - "@babel/helper-module-imports" "^7.16.7" - "@babel/helper-simple-access" "^7.17.7" - "@babel/helper-split-export-declaration" "^7.16.7" - "@babel/helper-validator-identifier" "^7.16.7" - "@babel/template" "^7.16.7" - "@babel/traverse" "^7.18.0" - "@babel/types" "^7.18.0" + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-module-imports" "^7.18.6" + "@babel/helper-simple-access" "^7.18.6" + "@babel/helper-split-export-declaration" "^7.18.6" + "@babel/helper-validator-identifier" "^7.18.6" + "@babel/template" "^7.18.6" + "@babel/traverse" "^7.18.9" + "@babel/types" "^7.18.9" "@babel/helper-optimise-call-expression@^7.16.7": version "7.16.7" @@ -189,19 +234,27 @@ dependencies: "@babel/types" "^7.16.7" -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.13.0", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.17.12", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": +"@babel/helper-optimise-call-expression@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz#9369aa943ee7da47edab2cb4e838acf09d290ffe" + integrity sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA== + dependencies: + "@babel/types" "^7.18.6" + +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.13.0", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.18.9", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": version "7.18.9" resolved "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.18.9.tgz" integrity sha512-aBXPT3bmtLryXaoJLyYPXPlSD4p1ld9aYeR+sJNOZjJJGiOpb+fKfh3NkcCu7J54nUJwCERPBExCCpyCOHnu/w== -"@babel/helper-remap-async-to-generator@^7.16.8": - version "7.16.8" - resolved "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.16.8.tgz" - integrity sha512-fm0gH7Flb8H51LqJHy3HJ3wnE1+qtYR2A99K06ahwrawLdOFsCEWjZOrYricXJHoPSudNKxrMBUPEIPxiIIvBw== +"@babel/helper-remap-async-to-generator@^7.18.6", "@babel/helper-remap-async-to-generator@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz#997458a0e3357080e54e1d79ec347f8a8cd28519" + integrity sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA== dependencies: - "@babel/helper-annotate-as-pure" "^7.16.7" - "@babel/helper-wrap-function" "^7.16.8" - "@babel/types" "^7.16.8" + "@babel/helper-annotate-as-pure" "^7.18.6" + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-wrap-function" "^7.18.9" + "@babel/types" "^7.18.9" "@babel/helper-replace-supers@^7.16.7": version "7.16.7" @@ -214,19 +267,30 @@ "@babel/traverse" "^7.16.7" "@babel/types" "^7.16.7" -"@babel/helper-simple-access@^7.17.7": - version "7.17.7" - resolved "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.17.7.tgz" - integrity sha512-txyMCGroZ96i+Pxr3Je3lzEJjqwaRC9buMUgtomcrLe5Nd0+fk1h0LLA+ixUF5OW7AhHuQ7Es1WcQJZmZsz2XA== +"@babel/helper-replace-supers@^7.18.6", "@babel/helper-replace-supers@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.18.9.tgz#1092e002feca980fbbb0bd4d51b74a65c6a500e6" + integrity sha512-dNsWibVI4lNT6HiuOIBr1oyxo40HvIVmbwPUm3XZ7wMh4k2WxrxTqZwSqw/eEmXDS9np0ey5M2bz9tBmO9c+YQ== dependencies: - "@babel/types" "^7.17.0" + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-member-expression-to-functions" "^7.18.9" + "@babel/helper-optimise-call-expression" "^7.18.6" + "@babel/traverse" "^7.18.9" + "@babel/types" "^7.18.9" -"@babel/helper-skip-transparent-expression-wrappers@^7.16.0": - version "7.16.0" - resolved "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.16.0.tgz" - integrity sha512-+il1gTy0oHwUsBQZyJvukbB4vPMdcYBrFHa0Uc4AizLxbq6BOYC51Rv4tWocX9BLBDLZ4kc6qUFpQ6HRgL+3zw== +"@babel/helper-simple-access@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.18.6.tgz#d6d8f51f4ac2978068df934b569f08f29788c7ea" + integrity sha512-iNpIgTgyAvDQpDj76POqg+YEt8fPxx3yaNBg3S30dxNKm2SWfYhD0TGrK/Eu9wHpUW63VQU894TsTg+GLbUa1g== dependencies: - "@babel/types" "^7.16.0" + "@babel/types" "^7.18.6" + +"@babel/helper-skip-transparent-expression-wrappers@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.18.9.tgz#778d87b3a758d90b471e7b9918f34a9a02eb5818" + integrity sha512-imytd2gHi3cJPsybLRbmFrF7u5BIEuI2cNheyKi3/iOBC63kNn3q8Crn2xVuESli0aM4KYsyEqKyS7lFL8YVtw== + dependencies: + "@babel/types" "^7.18.9" "@babel/helper-split-export-declaration@^7.16.7": version "7.16.7" @@ -235,6 +299,13 @@ dependencies: "@babel/types" "^7.16.7" +"@babel/helper-split-export-declaration@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz#7367949bc75b20c6d5a5d4a97bba2824ae8ef075" + integrity sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA== + dependencies: + "@babel/types" "^7.18.6" + "@babel/helper-string-parser@^7.18.10": version "7.18.10" resolved "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.18.10.tgz" @@ -245,31 +316,31 @@ resolved "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.18.6.tgz" integrity sha512-MmetCkz9ej86nJQV+sFCxoGGrUbU3q02kgLciwkrt9QqEB7cP39oKEY0PakknEO0Gu20SskMRi+AYZ3b1TpN9g== -"@babel/helper-validator-option@^7.16.7": +"@babel/helper-validator-option@^7.16.7", "@babel/helper-validator-option@^7.18.6": version "7.18.6" resolved "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.18.6.tgz" integrity sha512-XO7gESt5ouv/LRJdrVjkShckw6STTaB7l9BrpBaAHDeF5YZT+01PCwmR0SJHnkW6i8OwW/EVWRShfi4j2x+KQw== -"@babel/helper-wrap-function@^7.16.8": - version "7.16.8" - resolved "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.16.8.tgz" - integrity sha512-8RpyRVIAW1RcDDGTA+GpPAwV22wXCfKOoM9bet6TLkGIFTkRQSkH1nMQ5Yet4MpoXe1ZwHPVtNasc2w0uZMqnw== +"@babel/helper-wrap-function@^7.18.9": + version "7.18.11" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.18.11.tgz#bff23ace436e3f6aefb61f85ffae2291c80ed1fb" + integrity sha512-oBUlbv+rjZLh2Ks9SKi4aL7eKaAXBWleHzU89mP0G6BMUlRxSckk9tSIkgDGydhgFxHuGSlBQZfnaD47oBEB7w== dependencies: - "@babel/helper-function-name" "^7.16.7" - "@babel/template" "^7.16.7" - "@babel/traverse" "^7.16.8" - "@babel/types" "^7.16.8" + "@babel/helper-function-name" "^7.18.9" + "@babel/template" "^7.18.10" + "@babel/traverse" "^7.18.11" + "@babel/types" "^7.18.10" -"@babel/helpers@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/helpers/-/helpers-7.18.0.tgz" - integrity sha512-AE+HMYhmlMIbho9nbvicHyxFwhrO+xhKB6AhRxzl8w46Yj0VXTZjEsAoBVC7rB2I0jzX+yWyVybnO08qkfx6kg== +"@babel/helpers@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.18.9.tgz#4bef3b893f253a1eced04516824ede94dcfe7ff9" + integrity sha512-Jf5a+rbrLoR4eNdUmnFu8cN5eNJT6qdTdOg5IHIzq87WwyRw9PwguLFOWYgktN/60IP4fgDUawJvs7PjQIzELQ== dependencies: - "@babel/template" "^7.16.7" - "@babel/traverse" "^7.18.0" - "@babel/types" "^7.18.0" + "@babel/template" "^7.18.6" + "@babel/traverse" "^7.18.9" + "@babel/types" "^7.18.9" -"@babel/highlight@^7.10.4", "@babel/highlight@^7.16.7": +"@babel/highlight@^7.10.4": version "7.16.10" resolved "https://registry.npmjs.org/@babel/highlight/-/highlight-7.16.10.tgz" integrity sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw== @@ -278,154 +349,164 @@ chalk "^2.0.0" js-tokens "^4.0.0" -"@babel/parser@^7.16.7", "@babel/parser@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/parser/-/parser-7.18.0.tgz" - integrity sha512-AqDccGC+m5O/iUStSJy3DGRIUFu7WbY/CppZYwrEUB4N0tZlnI8CSTsgL7v5fHVFmUbRv2sd+yy27o8Ydt4MGg== +"@babel/highlight@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.18.6.tgz#81158601e93e2563795adcbfbdf5d64be3f2ecdf" + integrity sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g== + dependencies: + "@babel/helper-validator-identifier" "^7.18.6" + chalk "^2.0.0" + js-tokens "^4.0.0" -"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.17.12.tgz" - integrity sha512-xCJQXl4EeQ3J9C4yOmpTrtVGmzpm2iSzyxbkZHw7UCnZBftHpF/hpII80uWVyVrc40ytIClHjgWGTG1g/yB+aw== +"@babel/parser@^7.18.10", "@babel/parser@^7.18.13": + version "7.18.13" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.18.13.tgz#5b2dd21cae4a2c5145f1fbd8ca103f9313d3b7e4" + integrity sha512-dgXcIfMuQ0kgzLB2b9tRZs7TTFFaGM2AbtA4fJgUUYukzGH4jwsS7hzQHEGs67jdehpm22vkgKwvbU+aEflgwg== + +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz#da5b8f9a580acdfbe53494dba45ea389fb09a4d2" + integrity sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.17.12.tgz" - integrity sha512-/vt0hpIw0x4b6BLKUkwlvEoiGZYYLNZ96CzyHYPbtG2jZGz6LBe7/V+drYrc/d+ovrF9NBi0pmtvmNb/FsWtRQ== +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.18.9.tgz#a11af19aa373d68d561f08e0a57242350ed0ec50" + integrity sha512-AHrP9jadvH7qlOj6PINbgSuphjQUAK7AOT7DPjBo9EHoLhQTnnK5u45e1Hd4DbSQEO9nqPWtQ89r+XEOWFScKg== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" - "@babel/plugin-proposal-optional-chaining" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-skip-transparent-expression-wrappers" "^7.18.9" + "@babel/plugin-proposal-optional-chaining" "^7.18.9" -"@babel/plugin-proposal-async-generator-functions@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.17.12.tgz" - integrity sha512-RWVvqD1ooLKP6IqWTA5GyFVX2isGEgC5iFxKzfYOIy/QEFdxYyCybBDtIGjipHpb9bDWHzcqGqFakf+mVmBTdQ== +"@babel/plugin-proposal-async-generator-functions@^7.18.10": + version "7.18.10" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.18.10.tgz#85ea478c98b0095c3e4102bff3b67d306ed24952" + integrity sha512-1mFuY2TOsR1hxbjCo4QL+qlIjV07p4H4EUYw2J/WCqsvFV6V9X9z9YhXbWndc/4fw+hYGlDT7egYxliMp5O6Ew== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-remap-async-to-generator" "^7.16.8" + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-remap-async-to-generator" "^7.18.9" "@babel/plugin-syntax-async-generators" "^7.8.4" -"@babel/plugin-proposal-class-properties@^7.12.1", "@babel/plugin-proposal-class-properties@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.17.12.tgz" - integrity sha512-U0mI9q8pW5Q9EaTHFPwSVusPMV/DV9Mm8p7csqROFLtIE9rBF5piLqyrBGigftALrBcsBGu4m38JneAe7ZDLXw== +"@babel/plugin-proposal-class-properties@^7.12.1", "@babel/plugin-proposal-class-properties@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz#b110f59741895f7ec21a6fff696ec46265c446a3" + integrity sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ== dependencies: - "@babel/helper-create-class-features-plugin" "^7.17.12" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-create-class-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-proposal-class-static-block@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.0.tgz" - integrity sha512-t+8LsRMMDE74c6sV7KShIw13sqbqd58tlqNrsWoWBTIMw7SVQ0cZ905wLNS/FBCy/3PyooRHLFFlfrUNyyz5lA== +"@babel/plugin-proposal-class-static-block@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.18.6.tgz#8aa81d403ab72d3962fc06c26e222dacfc9b9020" + integrity sha512-+I3oIiNxrCpup3Gi8n5IGMwj0gOCAjcJUSQEcotNnCCPMEnixawOQ+KeJPlgfjzx+FKQ1QSyZOWe7wmoJp7vhw== dependencies: - "@babel/helper-create-class-features-plugin" "^7.18.0" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-create-class-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-class-static-block" "^7.14.5" -"@babel/plugin-proposal-dynamic-import@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.16.7.tgz" - integrity sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg== +"@babel/plugin-proposal-dynamic-import@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz#72bcf8d408799f547d759298c3c27c7e7faa4d94" + integrity sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-dynamic-import" "^7.8.3" -"@babel/plugin-proposal-export-namespace-from@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.17.12.tgz" - integrity sha512-j7Ye5EWdwoXOpRmo5QmRyHPsDIe6+u70ZYZrd7uz+ebPYFKfRcLcNu3Ro0vOlJ5zuv8rU7xa+GttNiRzX56snQ== +"@babel/plugin-proposal-export-namespace-from@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz#5f7313ab348cdb19d590145f9247540e94761203" + integrity sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" "@babel/plugin-syntax-export-namespace-from" "^7.8.3" -"@babel/plugin-proposal-json-strings@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.17.12.tgz" - integrity sha512-rKJ+rKBoXwLnIn7n6o6fulViHMrOThz99ybH+hKHcOZbnN14VuMnH9fo2eHE69C8pO4uX1Q7t2HYYIDmv8VYkg== +"@babel/plugin-proposal-json-strings@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz#7e8788c1811c393aff762817e7dbf1ebd0c05f0b" + integrity sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-json-strings" "^7.8.3" -"@babel/plugin-proposal-logical-assignment-operators@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.17.12.tgz" - integrity sha512-EqFo2s1Z5yy+JeJu7SFfbIUtToJTVlC61/C7WLKDntSw4Sz6JNAIfL7zQ74VvirxpjB5kz/kIx0gCcb+5OEo2Q== +"@babel/plugin-proposal-logical-assignment-operators@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.18.9.tgz#8148cbb350483bf6220af06fa6db3690e14b2e23" + integrity sha512-128YbMpjCrP35IOExw2Fq+x55LMP42DzhOhX2aNNIdI9avSWl2PI0yuBWarr3RYpZBSPtabfadkH2yeRiMD61Q== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" -"@babel/plugin-proposal-nullish-coalescing-operator@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.17.12.tgz" - integrity sha512-ws/g3FSGVzv+VH86+QvgtuJL/kR67xaEIF2x0iPqdDfYW6ra6JF3lKVBkWynRLcNtIC1oCTfDRVxmm2mKzy+ag== +"@babel/plugin-proposal-nullish-coalescing-operator@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz#fdd940a99a740e577d6c753ab6fbb43fdb9467e1" + integrity sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" -"@babel/plugin-proposal-numeric-separator@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.16.7.tgz" - integrity sha512-vQgPMknOIgiuVqbokToyXbkY/OmmjAzr/0lhSIbG/KmnzXPGwW/AdhdKpi+O4X/VkWiWjnkKOBiqJrTaC98VKw== +"@babel/plugin-proposal-numeric-separator@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz#899b14fbafe87f053d2c5ff05b36029c62e13c75" + integrity sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-numeric-separator" "^7.10.4" -"@babel/plugin-proposal-object-rest-spread@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.18.0.tgz" - integrity sha512-nbTv371eTrFabDfHLElkn9oyf9VG+VKK6WMzhY2o4eHKaG19BToD9947zzGMO6I/Irstx9d8CwX6njPNIAR/yw== +"@babel/plugin-proposal-object-rest-spread@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.18.9.tgz#f9434f6beb2c8cae9dfcf97d2a5941bbbf9ad4e7" + integrity sha512-kDDHQ5rflIeY5xl69CEqGEZ0KY369ehsCIEbTGb4siHG5BE9sga/T0r0OUwyZNLMmZE79E1kbsqAjwFCW4ds6Q== dependencies: - "@babel/compat-data" "^7.17.10" - "@babel/helper-compilation-targets" "^7.17.10" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/compat-data" "^7.18.8" + "@babel/helper-compilation-targets" "^7.18.9" + "@babel/helper-plugin-utils" "^7.18.9" "@babel/plugin-syntax-object-rest-spread" "^7.8.3" - "@babel/plugin-transform-parameters" "^7.17.12" + "@babel/plugin-transform-parameters" "^7.18.8" -"@babel/plugin-proposal-optional-catch-binding@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.16.7.tgz" - integrity sha512-eMOH/L4OvWSZAE1VkHbr1vckLG1WUcHGJSLqqQwl2GaUqG6QjddvrOaTUMNYiv77H5IKPMZ9U9P7EaHwvAShfA== +"@babel/plugin-proposal-optional-catch-binding@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz#f9400d0e6a3ea93ba9ef70b09e72dd6da638a2cb" + integrity sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" -"@babel/plugin-proposal-optional-chaining@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.17.12.tgz" - integrity sha512-7wigcOs/Z4YWlK7xxjkvaIw84vGhDv/P1dFGQap0nHkc8gFKY/r+hXc8Qzf5k1gY7CvGIcHqAnOagVKJJ1wVOQ== +"@babel/plugin-proposal-optional-chaining@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.18.9.tgz#e8e8fe0723f2563960e4bf5e9690933691915993" + integrity sha512-v5nwt4IqBXihxGsW2QmCWMDS3B3bzGIk/EQVZz2ei7f3NJl8NzAJVvUmpDW5q1CRNY+Beb/k58UAH1Km1N411w== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" + "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-skip-transparent-expression-wrappers" "^7.18.9" "@babel/plugin-syntax-optional-chaining" "^7.8.3" -"@babel/plugin-proposal-private-methods@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.17.12.tgz" - integrity sha512-SllXoxo19HmxhDWm3luPz+cPhtoTSKLJE9PXshsfrOzBqs60QP0r8OaJItrPhAj0d7mZMnNF0Y1UUggCDgMz1A== +"@babel/plugin-proposal-private-methods@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz#5209de7d213457548a98436fa2882f52f4be6bea" + integrity sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.17.12" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-create-class-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-proposal-private-property-in-object@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.17.12.tgz" - integrity sha512-/6BtVi57CJfrtDNKfK5b66ydK2J5pXUKBKSPD2G1whamMuEnZWgoOIfO8Vf9F/DoD4izBLD/Au4NMQfruzzykg== +"@babel/plugin-proposal-private-property-in-object@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.18.6.tgz#a64137b232f0aca3733a67eb1a144c192389c503" + integrity sha512-9Rysx7FOctvT5ouj5JODjAFAkgGoudQuLPamZb0v1TGLpapdNaftzifU8NTWQm0IRjqoYypdrSmyWgkocDQ8Dw== dependencies: - "@babel/helper-annotate-as-pure" "^7.16.7" - "@babel/helper-create-class-features-plugin" "^7.17.12" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-annotate-as-pure" "^7.18.6" + "@babel/helper-create-class-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-private-property-in-object" "^7.14.5" -"@babel/plugin-proposal-unicode-property-regex@^7.17.12", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.17.12.tgz" - integrity sha512-Wb9qLjXf3ZazqXA7IvI7ozqRIXIGPtSo+L5coFmEkhTQK18ao4UDDD0zdTGAarmbLj2urpRwrc6893cu5Bfh0A== +"@babel/plugin-proposal-unicode-property-regex@^7.18.6", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz#af613d2cd5e643643b65cded64207b15c85cb78e" + integrity sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.17.12" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-create-regexp-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-async-generators@^7.8.4": version "7.8.4" @@ -462,12 +543,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-syntax-import-assertions@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.17.12.tgz" - integrity sha512-n/loy2zkq9ZEM8tEOwON9wTQSTNDTDEz6NujPtJGLU7qObzT1N4c4YZZf8E6ATB2AjNQg/Ib2AIpO03EZaCehw== +"@babel/plugin-syntax-import-assertions@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.18.6.tgz#cd6190500a4fa2fe31990a963ffab4b63e4505e4" + integrity sha512-/DU3RXad9+bZwrgWJQKbr39gYbJpLJHezqEzRzi/BHRlJ9zsQb4CK2CA/5apllXNomwA1qHwzvHl+AdEmC5krQ== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-syntax-json-strings@^7.8.3": version "7.8.3" @@ -539,206 +620,206 @@ dependencies: "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-arrow-functions@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.17.12.tgz" - integrity sha512-PHln3CNi/49V+mza4xMwrg+WGYevSF1oaiXaC2EQfdp4HWlSjRsrDXWJiQBKpP7749u6vQ9mcry2uuFOv5CXvA== +"@babel/plugin-transform-arrow-functions@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.18.6.tgz#19063fcf8771ec7b31d742339dac62433d0611fe" + integrity sha512-9S9X9RUefzrsHZmKMbDXxweEH+YlE8JJEuat9FdvW9Qh1cw7W64jELCtWNkPBPX5En45uy28KGvA/AySqUh8CQ== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-async-to-generator@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.17.12.tgz" - integrity sha512-J8dbrWIOO3orDzir57NRsjg4uxucvhby0L/KZuGsWDj0g7twWK3g7JhJhOrXtuXiw8MeiSdJ3E0OW9H8LYEzLQ== +"@babel/plugin-transform-async-to-generator@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.18.6.tgz#ccda3d1ab9d5ced5265fdb13f1882d5476c71615" + integrity sha512-ARE5wZLKnTgPW7/1ftQmSi1CmkqqHo2DNmtztFhvgtOWSDfq0Cq9/9L+KnZNYSNrydBekhW3rwShduf59RoXag== dependencies: - "@babel/helper-module-imports" "^7.16.7" - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-remap-async-to-generator" "^7.16.8" + "@babel/helper-module-imports" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-remap-async-to-generator" "^7.18.6" -"@babel/plugin-transform-block-scoped-functions@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.16.7.tgz" - integrity sha512-JUuzlzmF40Z9cXyytcbZEZKckgrQzChbQJw/5PuEHYeqzCsvebDx0K0jWnIIVcmmDOAVctCgnYs0pMcrYj2zJg== +"@babel/plugin-transform-block-scoped-functions@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz#9187bf4ba302635b9d70d986ad70f038726216a8" + integrity sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-block-scoping@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.17.12.tgz" - integrity sha512-jw8XW/B1i7Lqwqj2CbrViPcZijSxfguBWZP2aN59NHgxUyO/OcO1mfdCxH13QhN5LbWhPkX+f+brKGhZTiqtZQ== +"@babel/plugin-transform-block-scoping@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.18.9.tgz#f9b7e018ac3f373c81452d6ada8bd5a18928926d" + integrity sha512-5sDIJRV1KtQVEbt/EIBwGy4T01uYIo4KRB3VUqzkhrAIOGx7AoctL9+Ux88btY0zXdDyPJ9mW+bg+v+XEkGmtw== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-classes@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.17.12.tgz" - integrity sha512-cvO7lc7pZat6BsvH6l/EGaI8zpl8paICaoGk+7x7guvtfak/TbIf66nYmJOH13EuG0H+Xx3M+9LQDtSvZFKXKw== - dependencies: - "@babel/helper-annotate-as-pure" "^7.16.7" - "@babel/helper-environment-visitor" "^7.16.7" - "@babel/helper-function-name" "^7.17.9" - "@babel/helper-optimise-call-expression" "^7.16.7" - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-replace-supers" "^7.16.7" - "@babel/helper-split-export-declaration" "^7.16.7" +"@babel/plugin-transform-classes@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.18.9.tgz#90818efc5b9746879b869d5ce83eb2aa48bbc3da" + integrity sha512-EkRQxsxoytpTlKJmSPYrsOMjCILacAjtSVkd4gChEe2kXjFCun3yohhW5I7plXJhCemM0gKsaGMcO8tinvCA5g== + dependencies: + "@babel/helper-annotate-as-pure" "^7.18.6" + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-function-name" "^7.18.9" + "@babel/helper-optimise-call-expression" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-replace-supers" "^7.18.9" + "@babel/helper-split-export-declaration" "^7.18.6" globals "^11.1.0" -"@babel/plugin-transform-computed-properties@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.17.12.tgz" - integrity sha512-a7XINeplB5cQUWMg1E/GI1tFz3LfK021IjV1rj1ypE+R7jHm+pIHmHl25VNkZxtx9uuYp7ThGk8fur1HHG7PgQ== +"@babel/plugin-transform-computed-properties@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.18.9.tgz#2357a8224d402dad623caf6259b611e56aec746e" + integrity sha512-+i0ZU1bCDymKakLxn5srGHrsAPRELC2WIbzwjLhHW9SIE1cPYkLCL0NlnXMZaM1vhfgA2+M7hySk42VBvrkBRw== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-destructuring@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.18.0.tgz" - integrity sha512-Mo69klS79z6KEfrLg/1WkmVnB8javh75HX4pi2btjvlIoasuxilEyjtsQW6XPrubNd7AQy0MMaNIaQE4e7+PQw== +"@babel/plugin-transform-destructuring@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.18.9.tgz#68906549c021cb231bee1db21d3b5b095f8ee292" + integrity sha512-p5VCYNddPLkZTq4XymQIaIfZNJwT9YsjkPOhkVEqt6QIpQFZVM9IltqqYpOEkJoN1DPznmxUDyZ5CTZs/ZCuHA== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-dotall-regex@^7.16.7", "@babel/plugin-transform-dotall-regex@^7.4.4": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.16.7.tgz" - integrity sha512-Lyttaao2SjZF6Pf4vk1dVKv8YypMpomAbygW+mU5cYP3S5cWTfCJjG8xV6CFdzGFlfWK81IjL9viiTvpb6G7gQ== +"@babel/plugin-transform-dotall-regex@^7.18.6", "@babel/plugin-transform-dotall-regex@^7.4.4": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz#b286b3e7aae6c7b861e45bed0a2fafd6b1a4fef8" + integrity sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.16.7" - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-create-regexp-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-duplicate-keys@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.17.12.tgz" - integrity sha512-EA5eYFUG6xeerdabina/xIoB95jJ17mAkR8ivx6ZSu9frKShBjpOGZPn511MTDTkiCO+zXnzNczvUM69YSf3Zw== +"@babel/plugin-transform-duplicate-keys@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz#687f15ee3cdad6d85191eb2a372c4528eaa0ae0e" + integrity sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-exponentiation-operator@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.16.7.tgz" - integrity sha512-8UYLSlyLgRixQvlYH3J2ekXFHDFLQutdy7FfFAMm3CPZ6q9wHCwnUyiXpQCe3gVVnQlHc5nsuiEVziteRNTXEA== +"@babel/plugin-transform-exponentiation-operator@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz#421c705f4521888c65e91fdd1af951bfefd4dacd" + integrity sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw== dependencies: - "@babel/helper-builder-binary-assignment-operator-visitor" "^7.16.7" - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-builder-binary-assignment-operator-visitor" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-for-of@^7.17.12": - version "7.18.1" - resolved "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.1.tgz" - integrity sha512-+TTB5XwvJ5hZbO8xvl2H4XaMDOAK57zF4miuC9qQJgysPNEAZZ9Z69rdF5LJkozGdZrjBIUAIyKUWRMmebI7vg== +"@babel/plugin-transform-for-of@^7.18.8": + version "7.18.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.18.8.tgz#6ef8a50b244eb6a0bdbad0c7c61877e4e30097c1" + integrity sha512-yEfTRnjuskWYo0k1mHUqrVWaZwrdq8AYbfrpqULOJOaucGSp4mNMVps+YtA8byoevxS/urwU75vyhQIxcCgiBQ== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-function-name@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.16.7.tgz" - integrity sha512-SU/C68YVwTRxqWj5kgsbKINakGag0KTgq9f2iZEXdStoAbOzLHEBRYzImmA6yFo8YZhJVflvXmIHUO7GWHmxxA== +"@babel/plugin-transform-function-name@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz#cc354f8234e62968946c61a46d6365440fc764e0" + integrity sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ== dependencies: - "@babel/helper-compilation-targets" "^7.16.7" - "@babel/helper-function-name" "^7.16.7" - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-compilation-targets" "^7.18.9" + "@babel/helper-function-name" "^7.18.9" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-literals@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.17.12.tgz" - integrity sha512-8iRkvaTjJciWycPIZ9k9duu663FT7VrBdNqNgxnVXEFwOIp55JWcZd23VBRySYbnS3PwQ3rGiabJBBBGj5APmQ== +"@babel/plugin-transform-literals@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz#72796fdbef80e56fba3c6a699d54f0de557444bc" + integrity sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-member-expression-literals@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.7.tgz" - integrity sha512-mBruRMbktKQwbxaJof32LT9KLy2f3gH+27a5XSuXo6h7R3vqltl0PgZ80C8ZMKw98Bf8bqt6BEVi3svOh2PzMw== +"@babel/plugin-transform-member-expression-literals@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz#ac9fdc1a118620ac49b7e7a5d2dc177a1bfee88e" + integrity sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-modules-amd@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.18.0.tgz" - integrity sha512-h8FjOlYmdZwl7Xm2Ug4iX2j7Qy63NANI+NQVWQzv6r25fqgg7k2dZl03p95kvqNclglHs4FZ+isv4p1uXMA+QA== +"@babel/plugin-transform-modules-amd@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.18.6.tgz#8c91f8c5115d2202f277549848874027d7172d21" + integrity sha512-Pra5aXsmTsOnjM3IajS8rTaLCy++nGM4v3YR4esk5PCsyg9z8NA5oQLwxzMUtDBd8F+UmVza3VxoAaWCbzH1rg== dependencies: - "@babel/helper-module-transforms" "^7.18.0" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-module-transforms" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-commonjs@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.0.tgz" - integrity sha512-cCeR0VZWtfxWS4YueAK2qtHtBPJRSaJcMlbS8jhSIm/A3E2Kpro4W1Dn4cqJtp59dtWfXjQwK7SPKF8ghs7rlw== +"@babel/plugin-transform-modules-commonjs@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.18.6.tgz#afd243afba166cca69892e24a8fd8c9f2ca87883" + integrity sha512-Qfv2ZOWikpvmedXQJDSbxNqy7Xr/j2Y8/KfijM0iJyKkBTmWuvCA1yeH1yDM7NJhBW/2aXxeucLj6i80/LAJ/Q== dependencies: - "@babel/helper-module-transforms" "^7.18.0" - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-simple-access" "^7.17.7" + "@babel/helper-module-transforms" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-simple-access" "^7.18.6" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-systemjs@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.18.0.tgz" - integrity sha512-vwKpxdHnlM5tIrRt/eA0bzfbi7gUBLN08vLu38np1nZevlPySRe6yvuATJB5F/WPJ+ur4OXwpVYq9+BsxqAQuQ== +"@babel/plugin-transform-modules-systemjs@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.18.9.tgz#545df284a7ac6a05125e3e405e536c5853099a06" + integrity sha512-zY/VSIbbqtoRoJKo2cDTewL364jSlZGvn0LKOf9ntbfxOvjfmyrdtEEOAdswOswhZEb8UH3jDkCKHd1sPgsS0A== dependencies: - "@babel/helper-hoist-variables" "^7.16.7" - "@babel/helper-module-transforms" "^7.18.0" - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-validator-identifier" "^7.16.7" + "@babel/helper-hoist-variables" "^7.18.6" + "@babel/helper-module-transforms" "^7.18.9" + "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-validator-identifier" "^7.18.6" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-umd@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.0.tgz" - integrity sha512-d/zZ8I3BWli1tmROLxXLc9A6YXvGK8egMxHp+E/rRwMh1Kip0AP77VwZae3snEJ33iiWwvNv2+UIIhfalqhzZA== +"@babel/plugin-transform-modules-umd@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz#81d3832d6034b75b54e62821ba58f28ed0aab4b9" + integrity sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ== dependencies: - "@babel/helper-module-transforms" "^7.18.0" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-module-transforms" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-named-capturing-groups-regex@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.17.12.tgz" - integrity sha512-vWoWFM5CKaTeHrdUJ/3SIOTRV+MBVGybOC9mhJkaprGNt5demMymDW24yC74avb915/mIRe3TgNb/d8idvnCRA== +"@babel/plugin-transform-named-capturing-groups-regex@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.18.6.tgz#c89bfbc7cc6805d692f3a49bc5fc1b630007246d" + integrity sha512-UmEOGF8XgaIqD74bC8g7iV3RYj8lMf0Bw7NJzvnS9qQhM4mg+1WHKotUIdjxgD2RGrgFLZZPCFPFj3P/kVDYhg== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.17.12" - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-create-regexp-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-new-target@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.17.12.tgz" - integrity sha512-CaOtzk2fDYisbjAD4Sd1MTKGVIpRtx9bWLyj24Y/k6p4s4gQ3CqDGJauFJxt8M/LEx003d0i3klVqnN73qvK3w== +"@babel/plugin-transform-new-target@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz#d128f376ae200477f37c4ddfcc722a8a1b3246a8" + integrity sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-object-super@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.16.7.tgz" - integrity sha512-14J1feiQVWaGvRxj2WjyMuXS2jsBkgB3MdSN5HuC2G5nRspa5RK9COcs82Pwy5BuGcjb+fYaUj94mYcOj7rCvw== +"@babel/plugin-transform-object-super@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz#fb3c6ccdd15939b6ff7939944b51971ddc35912c" + integrity sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" - "@babel/helper-replace-supers" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" + "@babel/helper-replace-supers" "^7.18.6" -"@babel/plugin-transform-parameters@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.17.12.tgz" - integrity sha512-6qW4rWo1cyCdq1FkYri7AHpauchbGLXpdwnYsfxFb+KtddHENfsY5JZb35xUwkK5opOLcJ3BNd2l7PhRYGlwIA== +"@babel/plugin-transform-parameters@^7.18.8": + version "7.18.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.18.8.tgz#ee9f1a0ce6d78af58d0956a9378ea3427cccb48a" + integrity sha512-ivfbE3X2Ss+Fj8nnXvKJS6sjRG4gzwPMsP+taZC+ZzEGjAYlvENixmt1sZ5Ca6tWls+BlKSGKPJ6OOXvXCbkFg== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-property-literals@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.16.7.tgz" - integrity sha512-z4FGr9NMGdoIl1RqavCqGG+ZuYjfZ/hkCIeuH6Do7tXmSm0ls11nYVSJqFEUOSJbDab5wC6lRE/w6YjVcr6Hqw== +"@babel/plugin-transform-property-literals@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz#e22498903a483448e94e032e9bbb9c5ccbfc93a3" + integrity sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-regenerator@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.0.tgz" - integrity sha512-C8YdRw9uzx25HSIzwA7EM7YP0FhCe5wNvJbZzjVNHHPGVcDJ3Aie+qGYYdS1oVQgn+B3eAIJbWFLrJ4Jipv7nw== +"@babel/plugin-transform-regenerator@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.18.6.tgz#585c66cb84d4b4bf72519a34cfce761b8676ca73" + integrity sha512-poqRI2+qiSdeldcz4wTSTXBRryoq3Gc70ye7m7UD5Ww0nE29IXqMl6r7Nd15WBgRd74vloEMlShtH6CKxVzfmQ== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" regenerator-transform "^0.15.0" -"@babel/plugin-transform-reserved-words@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.17.12.tgz" - integrity sha512-1KYqwbJV3Co03NIi14uEHW8P50Md6KqFgt0FfpHdK6oyAHQVTosgPuPSiWud1HX0oYJ1hGRRlk0fP87jFpqXZA== +"@babel/plugin-transform-reserved-words@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz#b1abd8ebf8edaa5f7fe6bbb8d2133d23b6a6f76a" + integrity sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/plugin-transform-runtime@^7.12.10": version "7.17.0" @@ -752,41 +833,41 @@ babel-plugin-polyfill-regenerator "^0.3.0" semver "^6.3.0" -"@babel/plugin-transform-shorthand-properties@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.16.7.tgz" - integrity sha512-hah2+FEnoRoATdIb05IOXf+4GzXYTq75TVhIn1PewihbpyrNWUt2JbudKQOETWw6QpLe+AIUpJ5MVLYTQbeeUg== +"@babel/plugin-transform-shorthand-properties@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz#6d6df7983d67b195289be24909e3f12a8f664dc9" + integrity sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-spread@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.17.12.tgz" - integrity sha512-9pgmuQAtFi3lpNUstvG9nGfk9DkrdmWNp9KeKPFmuZCpEnxRzYlS8JgwPjYj+1AWDOSvoGN0H30p1cBOmT/Svg== +"@babel/plugin-transform-spread@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.18.9.tgz#6ea7a6297740f381c540ac56caf75b05b74fb664" + integrity sha512-39Q814wyoOPtIB/qGopNIL9xDChOE1pNU0ZY5dO0owhiVt/5kFm4li+/bBtwc7QotG0u5EPzqhZdjMtmqBqyQA== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" + "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-skip-transparent-expression-wrappers" "^7.18.9" -"@babel/plugin-transform-sticky-regex@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.16.7.tgz" - integrity sha512-NJa0Bd/87QV5NZZzTuZG5BPJjLYadeSZ9fO6oOUoL4iQx+9EEuw/eEM92SrsT19Yc2jgB1u1hsjqDtH02c3Drw== +"@babel/plugin-transform-sticky-regex@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz#c6706eb2b1524028e317720339583ad0f444adcc" + integrity sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.6" -"@babel/plugin-transform-template-literals@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.17.12.tgz" - integrity sha512-kAKJ7DX1dSRa2s7WN1xUAuaQmkTpN+uig4wCKWivVXIObqGbVTUlSavHyfI2iZvz89GFAMGm9p2DBJ4Y1Tp0hw== +"@babel/plugin-transform-template-literals@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz#04ec6f10acdaa81846689d63fae117dd9c243a5e" + integrity sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-typeof-symbol@^7.17.12": - version "7.17.12" - resolved "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.17.12.tgz" - integrity sha512-Q8y+Jp7ZdtSPXCThB6zjQ74N3lj0f6TDh1Hnf5B+sYlzQ8i5Pjp8gW0My79iekSpT4WnI06blqP6DT0OmaXXmw== +"@babel/plugin-transform-typeof-symbol@^7.18.9": + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz#c8cea68263e45addcd6afc9091429f80925762c0" + integrity sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw== dependencies: - "@babel/helper-plugin-utils" "^7.17.12" + "@babel/helper-plugin-utils" "^7.18.9" "@babel/plugin-transform-typescript@^7.16.7": version "7.16.8" @@ -797,53 +878,53 @@ "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-syntax-typescript" "^7.16.7" -"@babel/plugin-transform-unicode-escapes@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.16.7.tgz" - integrity sha512-TAV5IGahIz3yZ9/Hfv35TV2xEm+kaBDaZQCn2S/hG9/CZ0DktxJv9eKfPc7yYCvOYR4JGx1h8C+jcSOvgaaI/Q== +"@babel/plugin-transform-unicode-escapes@^7.18.10": + version "7.18.10" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz#1ecfb0eda83d09bbcb77c09970c2dd55832aa246" + integrity sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ== dependencies: - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-plugin-utils" "^7.18.9" -"@babel/plugin-transform-unicode-regex@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.16.7.tgz" - integrity sha512-oC5tYYKw56HO75KZVLQ+R/Nl3Hro9kf8iG0hXoaHP7tjAyCpvqBiSNe6vGrZni1Z6MggmUOC6A7VP7AVmw225Q== +"@babel/plugin-transform-unicode-regex@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz#194317225d8c201bbae103364ffe9e2cea36cdca" + integrity sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.16.7" - "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-create-regexp-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" "@babel/preset-env@^7.12.11": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.18.0.tgz" - integrity sha512-cP74OMs7ECLPeG1reiCQ/D/ypyOxgfm8uR6HRYV23vTJ7Lu1nbgj9DQDo/vH59gnn7GOAwtTDPPYV4aXzsMKHA== - dependencies: - "@babel/compat-data" "^7.17.10" - "@babel/helper-compilation-targets" "^7.17.10" - "@babel/helper-plugin-utils" "^7.17.12" - "@babel/helper-validator-option" "^7.16.7" - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.17.12" - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.17.12" - "@babel/plugin-proposal-async-generator-functions" "^7.17.12" - "@babel/plugin-proposal-class-properties" "^7.17.12" - "@babel/plugin-proposal-class-static-block" "^7.18.0" - "@babel/plugin-proposal-dynamic-import" "^7.16.7" - "@babel/plugin-proposal-export-namespace-from" "^7.17.12" - "@babel/plugin-proposal-json-strings" "^7.17.12" - "@babel/plugin-proposal-logical-assignment-operators" "^7.17.12" - "@babel/plugin-proposal-nullish-coalescing-operator" "^7.17.12" - "@babel/plugin-proposal-numeric-separator" "^7.16.7" - "@babel/plugin-proposal-object-rest-spread" "^7.18.0" - "@babel/plugin-proposal-optional-catch-binding" "^7.16.7" - "@babel/plugin-proposal-optional-chaining" "^7.17.12" - "@babel/plugin-proposal-private-methods" "^7.17.12" - "@babel/plugin-proposal-private-property-in-object" "^7.17.12" - "@babel/plugin-proposal-unicode-property-regex" "^7.17.12" + version "7.18.10" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.18.10.tgz#83b8dfe70d7eea1aae5a10635ab0a5fe60dfc0f4" + integrity sha512-wVxs1yjFdW3Z/XkNfXKoblxoHgbtUF7/l3PvvP4m02Qz9TZ6uZGxRVYjSQeR87oQmHco9zWitW5J82DJ7sCjvA== + dependencies: + "@babel/compat-data" "^7.18.8" + "@babel/helper-compilation-targets" "^7.18.9" + "@babel/helper-plugin-utils" "^7.18.9" + "@babel/helper-validator-option" "^7.18.6" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.18.6" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.18.9" + "@babel/plugin-proposal-async-generator-functions" "^7.18.10" + "@babel/plugin-proposal-class-properties" "^7.18.6" + "@babel/plugin-proposal-class-static-block" "^7.18.6" + "@babel/plugin-proposal-dynamic-import" "^7.18.6" + "@babel/plugin-proposal-export-namespace-from" "^7.18.9" + "@babel/plugin-proposal-json-strings" "^7.18.6" + "@babel/plugin-proposal-logical-assignment-operators" "^7.18.9" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.18.6" + "@babel/plugin-proposal-numeric-separator" "^7.18.6" + "@babel/plugin-proposal-object-rest-spread" "^7.18.9" + "@babel/plugin-proposal-optional-catch-binding" "^7.18.6" + "@babel/plugin-proposal-optional-chaining" "^7.18.9" + "@babel/plugin-proposal-private-methods" "^7.18.6" + "@babel/plugin-proposal-private-property-in-object" "^7.18.6" + "@babel/plugin-proposal-unicode-property-regex" "^7.18.6" "@babel/plugin-syntax-async-generators" "^7.8.4" "@babel/plugin-syntax-class-properties" "^7.12.13" "@babel/plugin-syntax-class-static-block" "^7.14.5" "@babel/plugin-syntax-dynamic-import" "^7.8.3" "@babel/plugin-syntax-export-namespace-from" "^7.8.3" - "@babel/plugin-syntax-import-assertions" "^7.17.12" + "@babel/plugin-syntax-import-assertions" "^7.18.6" "@babel/plugin-syntax-json-strings" "^7.8.3" "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" @@ -853,43 +934,43 @@ "@babel/plugin-syntax-optional-chaining" "^7.8.3" "@babel/plugin-syntax-private-property-in-object" "^7.14.5" "@babel/plugin-syntax-top-level-await" "^7.14.5" - "@babel/plugin-transform-arrow-functions" "^7.17.12" - "@babel/plugin-transform-async-to-generator" "^7.17.12" - "@babel/plugin-transform-block-scoped-functions" "^7.16.7" - "@babel/plugin-transform-block-scoping" "^7.17.12" - "@babel/plugin-transform-classes" "^7.17.12" - "@babel/plugin-transform-computed-properties" "^7.17.12" - "@babel/plugin-transform-destructuring" "^7.18.0" - "@babel/plugin-transform-dotall-regex" "^7.16.7" - "@babel/plugin-transform-duplicate-keys" "^7.17.12" - "@babel/plugin-transform-exponentiation-operator" "^7.16.7" - "@babel/plugin-transform-for-of" "^7.17.12" - "@babel/plugin-transform-function-name" "^7.16.7" - "@babel/plugin-transform-literals" "^7.17.12" - "@babel/plugin-transform-member-expression-literals" "^7.16.7" - "@babel/plugin-transform-modules-amd" "^7.18.0" - "@babel/plugin-transform-modules-commonjs" "^7.18.0" - "@babel/plugin-transform-modules-systemjs" "^7.18.0" - "@babel/plugin-transform-modules-umd" "^7.18.0" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.17.12" - "@babel/plugin-transform-new-target" "^7.17.12" - "@babel/plugin-transform-object-super" "^7.16.7" - "@babel/plugin-transform-parameters" "^7.17.12" - "@babel/plugin-transform-property-literals" "^7.16.7" - "@babel/plugin-transform-regenerator" "^7.18.0" - "@babel/plugin-transform-reserved-words" "^7.17.12" - "@babel/plugin-transform-shorthand-properties" "^7.16.7" - "@babel/plugin-transform-spread" "^7.17.12" - "@babel/plugin-transform-sticky-regex" "^7.16.7" - "@babel/plugin-transform-template-literals" "^7.17.12" - "@babel/plugin-transform-typeof-symbol" "^7.17.12" - "@babel/plugin-transform-unicode-escapes" "^7.16.7" - "@babel/plugin-transform-unicode-regex" "^7.16.7" + "@babel/plugin-transform-arrow-functions" "^7.18.6" + "@babel/plugin-transform-async-to-generator" "^7.18.6" + "@babel/plugin-transform-block-scoped-functions" "^7.18.6" + "@babel/plugin-transform-block-scoping" "^7.18.9" + "@babel/plugin-transform-classes" "^7.18.9" + "@babel/plugin-transform-computed-properties" "^7.18.9" + "@babel/plugin-transform-destructuring" "^7.18.9" + "@babel/plugin-transform-dotall-regex" "^7.18.6" + "@babel/plugin-transform-duplicate-keys" "^7.18.9" + "@babel/plugin-transform-exponentiation-operator" "^7.18.6" + "@babel/plugin-transform-for-of" "^7.18.8" + "@babel/plugin-transform-function-name" "^7.18.9" + "@babel/plugin-transform-literals" "^7.18.9" + "@babel/plugin-transform-member-expression-literals" "^7.18.6" + "@babel/plugin-transform-modules-amd" "^7.18.6" + "@babel/plugin-transform-modules-commonjs" "^7.18.6" + "@babel/plugin-transform-modules-systemjs" "^7.18.9" + "@babel/plugin-transform-modules-umd" "^7.18.6" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.18.6" + "@babel/plugin-transform-new-target" "^7.18.6" + "@babel/plugin-transform-object-super" "^7.18.6" + "@babel/plugin-transform-parameters" "^7.18.8" + "@babel/plugin-transform-property-literals" "^7.18.6" + "@babel/plugin-transform-regenerator" "^7.18.6" + "@babel/plugin-transform-reserved-words" "^7.18.6" + "@babel/plugin-transform-shorthand-properties" "^7.18.6" + "@babel/plugin-transform-spread" "^7.18.9" + "@babel/plugin-transform-sticky-regex" "^7.18.6" + "@babel/plugin-transform-template-literals" "^7.18.9" + "@babel/plugin-transform-typeof-symbol" "^7.18.9" + "@babel/plugin-transform-unicode-escapes" "^7.18.10" + "@babel/plugin-transform-unicode-regex" "^7.18.6" "@babel/preset-modules" "^0.1.5" - "@babel/types" "^7.18.0" - babel-plugin-polyfill-corejs2 "^0.3.0" - babel-plugin-polyfill-corejs3 "^0.5.0" - babel-plugin-polyfill-regenerator "^0.3.0" + "@babel/types" "^7.18.10" + babel-plugin-polyfill-corejs2 "^0.3.2" + babel-plugin-polyfill-corejs3 "^0.5.3" + babel-plugin-polyfill-regenerator "^0.4.0" core-js-compat "^3.22.1" semver "^6.3.0" @@ -914,9 +995,9 @@ "@babel/plugin-transform-typescript" "^7.16.7" "@babel/register@^7.12.13": - version "7.17.7" - resolved "https://registry.npmjs.org/@babel/register/-/register-7.17.7.tgz" - integrity sha512-fg56SwvXRifootQEDQAu1mKdjh5uthPzdO0N6t358FktfL4XjAVXuH58ULoiW8mesxiOgNIrxiImqEwv0+hRRA== + version "7.18.9" + resolved "https://registry.yarnpkg.com/@babel/register/-/register-7.18.9.tgz#1888b24bc28d5cc41c412feb015e9ff6b96e439c" + integrity sha512-ZlbnXDcNYHMR25ITwwNKT88JiaukkdVj/nG7r3wnuXkOTHc60Uy05PwMCPre0hSkY68E6zK3xz+vUJSP2jWmcw== dependencies: clone-deep "^4.0.1" find-cache-dir "^2.0.0" @@ -931,35 +1012,35 @@ dependencies: regenerator-runtime "^0.13.4" -"@babel/template@^7.16.7": - version "7.16.7" - resolved "https://registry.npmjs.org/@babel/template/-/template-7.16.7.tgz" - integrity sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w== - dependencies: - "@babel/code-frame" "^7.16.7" - "@babel/parser" "^7.16.7" - "@babel/types" "^7.16.7" - -"@babel/traverse@^7.13.0", "@babel/traverse@^7.16.7", "@babel/traverse@^7.16.8", "@babel/traverse@^7.18.0": - version "7.18.0" - resolved "https://registry.npmjs.org/@babel/traverse/-/traverse-7.18.0.tgz" - integrity sha512-oNOO4vaoIQoGjDQ84LgtF/IAlxlyqL4TUuoQ7xLkQETFaHkY1F7yazhB4Kt3VcZGL0ZF/jhrEpnXqUb0M7V3sw== - dependencies: - "@babel/code-frame" "^7.16.7" - "@babel/generator" "^7.18.0" - "@babel/helper-environment-visitor" "^7.16.7" - "@babel/helper-function-name" "^7.17.9" - "@babel/helper-hoist-variables" "^7.16.7" - "@babel/helper-split-export-declaration" "^7.16.7" - "@babel/parser" "^7.18.0" - "@babel/types" "^7.18.0" +"@babel/template@^7.16.7", "@babel/template@^7.18.10", "@babel/template@^7.18.6": + version "7.18.10" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.18.10.tgz#6f9134835970d1dbf0835c0d100c9f38de0c5e71" + integrity sha512-TI+rCtooWHr3QJ27kJxfjutghu44DLnasDMwpDqCXVTal9RLp3RSYNh4NdBrRP2cQAoG9A8juOQl6P6oZG4JxA== + dependencies: + "@babel/code-frame" "^7.18.6" + "@babel/parser" "^7.18.10" + "@babel/types" "^7.18.10" + +"@babel/traverse@^7.13.0", "@babel/traverse@^7.16.7", "@babel/traverse@^7.18.11", "@babel/traverse@^7.18.13", "@babel/traverse@^7.18.9": + version "7.18.13" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.18.13.tgz#5ab59ef51a997b3f10c4587d648b9696b6cb1a68" + integrity sha512-N6kt9X1jRMLPxxxPYWi7tgvJRH/rtoU+dbKAPDM44RFHiMH8igdsaSBgFeskhSl/kLWLDUvIh1RXCrTmg0/zvA== + dependencies: + "@babel/code-frame" "^7.18.6" + "@babel/generator" "^7.18.13" + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-function-name" "^7.18.9" + "@babel/helper-hoist-variables" "^7.18.6" + "@babel/helper-split-export-declaration" "^7.18.6" + "@babel/parser" "^7.18.13" + "@babel/types" "^7.18.13" debug "^4.1.0" globals "^11.1.0" -"@babel/types@^7.16.0", "@babel/types@^7.16.7", "@babel/types@^7.16.8", "@babel/types@^7.17.0", "@babel/types@^7.18.0", "@babel/types@^7.18.6", "@babel/types@^7.4.4": - version "7.18.10" - resolved "https://registry.npmjs.org/@babel/types/-/types-7.18.10.tgz" - integrity sha512-MJvnbEiiNkpjo+LknnmRrqbY1GPUUggjv+wQVjetM/AONoupqRALB7I6jGqNUAZsKcRIEu2J6FRFvsczljjsaQ== +"@babel/types@^7.16.7", "@babel/types@^7.17.0", "@babel/types@^7.18.10", "@babel/types@^7.18.13", "@babel/types@^7.18.6", "@babel/types@^7.18.9", "@babel/types@^7.4.4": + version "7.18.13" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.18.13.tgz#30aeb9e514f4100f7c1cb6e5ba472b30e48f519a" + integrity sha512-ePqfTihzW0W6XAU+aMw2ykilisStJfDnsejDCXRchCcMJ4O0+8DhPXf2YUbZ6wjBlsEmZwLK/sPweWtu8hcJYQ== dependencies: "@babel/helper-string-parser" "^7.18.10" "@babel/helper-validator-identifier" "^7.18.6" @@ -970,16 +1051,17 @@ resolved "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz" integrity sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ== -"@commitlint/cli@^17.0.0": - version "17.0.0" - resolved "https://registry.npmjs.org/@commitlint/cli/-/cli-17.0.0.tgz" - integrity sha512-Np6slCdVVG1XwMvwbZrXIzS1INPAD5QmN4L6al04AmCd4nAPU63gxgxC5Mz0Fmx7va23Uvb0S7yEFV1JPhvPUQ== +"@commitlint/cli@^17.0.3": + version "17.0.3" + resolved "https://registry.yarnpkg.com/@commitlint/cli/-/cli-17.0.3.tgz#50be9d9a8d79f6c47bfd2703638fe65215eb2526" + integrity sha512-oAo2vi5d8QZnAbtU5+0cR2j+A7PO8zuccux65R/EycwvsZrDVyW518FFrnJK2UQxbRtHFFIG+NjQ6vOiJV0Q8A== dependencies: "@commitlint/format" "^17.0.0" - "@commitlint/lint" "^17.0.0" - "@commitlint/load" "^17.0.0" + "@commitlint/lint" "^17.0.3" + "@commitlint/load" "^17.0.3" "@commitlint/read" "^17.0.0" "@commitlint/types" "^17.0.0" + execa "^5.0.0" lodash "^4.17.19" resolve-from "5.0.0" resolve-global "1.0.0" @@ -992,13 +1074,13 @@ dependencies: conventional-changelog-conventionalcommits "^5.0.0" -"@commitlint/config-validator@^17.0.0": - version "17.0.0" - resolved "https://registry.npmjs.org/@commitlint/config-validator/-/config-validator-17.0.0.tgz" - integrity sha512-78IQjoZWR4kDHp/U5y17euEWzswJpPkA9TDL5F6oZZZaLIEreWzrDZD5PWtM8MsSRl/K2LDU/UrzYju2bKLMpA== +"@commitlint/config-validator@^17.0.3": + version "17.0.3" + resolved "https://registry.yarnpkg.com/@commitlint/config-validator/-/config-validator-17.0.3.tgz#5d1ec17eece1f85a0d06c05d168a039b313eb5d7" + integrity sha512-3tLRPQJKapksGE7Kee9axv+9z5I2GDHitDH4q63q7NmNA0wkB+DAorJ0RHz2/K00Zb1/MVdHzhCga34FJvDihQ== dependencies: "@commitlint/types" "^17.0.0" - ajv "^6.12.6" + ajv "^8.11.0" "@commitlint/ensure@^17.0.0": version "17.0.0" @@ -1021,32 +1103,32 @@ "@commitlint/types" "^17.0.0" chalk "^4.1.0" -"@commitlint/is-ignored@^17.0.0": - version "17.0.0" - resolved "https://registry.npmjs.org/@commitlint/is-ignored/-/is-ignored-17.0.0.tgz" - integrity sha512-UmacD0XM/wWykgdXn5CEWVS4XGuqzU+ZGvM2hwv85+SXGnIOaG88XHrt81u37ZeVt1riWW+YdOxcJW6+nd5v5w== +"@commitlint/is-ignored@^17.0.3": + version "17.0.3" + resolved "https://registry.yarnpkg.com/@commitlint/is-ignored/-/is-ignored-17.0.3.tgz#0e1c725c1e50aea5852fb1260bc92b2ee1856425" + integrity sha512-/wgCXAvPtFTQZxsVxj7owLeRf5wwzcXLaYmrZPR4a87iD4sCvUIRl1/ogYrtOyUmHwWfQsvjqIB4mWE/SqWSnA== dependencies: "@commitlint/types" "^17.0.0" semver "7.3.7" -"@commitlint/lint@^17.0.0": - version "17.0.0" - resolved "https://registry.npmjs.org/@commitlint/lint/-/lint-17.0.0.tgz" - integrity sha512-5FL7VLvGJQby24q0pd4UdM8FNFcL+ER1T/UBf8A9KRL5+QXV1Rkl6Zhcl7+SGpGlVo6Yo0pm6aLW716LVKWLGg== +"@commitlint/lint@^17.0.3": + version "17.0.3" + resolved "https://registry.yarnpkg.com/@commitlint/lint/-/lint-17.0.3.tgz#98542a48f03b5c144309e24cbe1c032366ea75e2" + integrity sha512-2o1fk7JUdxBUgszyt41sHC/8Nd5PXNpkmuOo9jvGIjDHzOwXyV0PSdbEVTH3xGz9NEmjohFHr5l+N+T9fcxong== dependencies: - "@commitlint/is-ignored" "^17.0.0" + "@commitlint/is-ignored" "^17.0.3" "@commitlint/parse" "^17.0.0" "@commitlint/rules" "^17.0.0" "@commitlint/types" "^17.0.0" -"@commitlint/load@^17.0.0": - version "17.0.0" - resolved "https://registry.npmjs.org/@commitlint/load/-/load-17.0.0.tgz" - integrity sha512-XaiHF4yWQOPAI0O6wXvk+NYLtJn/Xb7jgZEeKd4C1ZWd7vR7u8z5h0PkWxSr0uLZGQsElGxv3fiZ32C5+q6M8w== +"@commitlint/load@^17.0.3": + version "17.0.3" + resolved "https://registry.yarnpkg.com/@commitlint/load/-/load-17.0.3.tgz#683aa484a5515714512e442f2f4b11f75e66097a" + integrity sha512-3Dhvr7GcKbKa/ey4QJ5MZH3+J7QFlARohUow6hftQyNjzoXXROm+RwpBes4dDFrXG1xDw9QPXA7uzrOShCd4bw== dependencies: - "@commitlint/config-validator" "^17.0.0" + "@commitlint/config-validator" "^17.0.3" "@commitlint/execute-rule" "^17.0.0" - "@commitlint/resolve-extends" "^17.0.0" + "@commitlint/resolve-extends" "^17.0.3" "@commitlint/types" "^17.0.0" "@types/node" ">=12" chalk "^4.1.0" @@ -1080,12 +1162,12 @@ fs-extra "^10.0.0" git-raw-commits "^2.0.0" -"@commitlint/resolve-extends@^17.0.0": - version "17.0.0" - resolved "https://registry.npmjs.org/@commitlint/resolve-extends/-/resolve-extends-17.0.0.tgz" - integrity sha512-wi60WiJmwaQ7lzMXK8Vbc18Hq9tE2j/6iv2AFfPUGV7fvfY6Sf1iNKuUHirSqR0fquUyufIXe4y/K9A6LVIIvw== +"@commitlint/resolve-extends@^17.0.3": + version "17.0.3" + resolved "https://registry.yarnpkg.com/@commitlint/resolve-extends/-/resolve-extends-17.0.3.tgz#43b237899e2abd59d16af091521b888c8a071412" + integrity sha512-H/RFMvrcBeJCMdnVC4i8I94108UDccIHrTke2tyQEg9nXQnR5/Hd6MhyNWkREvcrxh9Y+33JLb+PiPiaBxCtBA== dependencies: - "@commitlint/config-validator" "^17.0.0" + "@commitlint/config-validator" "^17.0.3" "@commitlint/types" "^17.0.0" import-fresh "^3.0.0" lodash "^4.17.19" @@ -1116,11 +1198,11 @@ find-up "^5.0.0" "@commitlint/travis-cli@^17.0.0": - version "17.0.0" - resolved "https://registry.npmjs.org/@commitlint/travis-cli/-/travis-cli-17.0.0.tgz" - integrity sha512-0SBUjEQAHeeIakuyo1Rm0YgEtDXY0qFZYpKWgNmRqZl/QfsUddm7nz5/9pYXxbOpSbSNUpuiPJOV+dPTBVF5bg== + version "17.0.3" + resolved "https://registry.yarnpkg.com/@commitlint/travis-cli/-/travis-cli-17.0.3.tgz#4c5b37dd03f1e938cf9d6fdbb9f240ac0f1dcf8d" + integrity sha512-A2JUrh4kLxvm9UsG6LjUWlWbYJAZKM6oHUkBIRttrX/u/vnMuNkzzGvAj/Dso/C51l4TovkbFKz7hQyMgfBGRw== dependencies: - "@commitlint/cli" "^17.0.0" + "@commitlint/cli" "^17.0.3" execa "^5.0.0" "@commitlint/types@^17.0.0": @@ -1312,6 +1394,15 @@ "@jridgewell/sourcemap-codec" "^1.4.10" "@jridgewell/trace-mapping" "^0.3.9" +"@jridgewell/gen-mapping@^0.3.2": + version "0.3.2" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9" + integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A== + dependencies: + "@jridgewell/set-array" "^1.0.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.9" + "@jridgewell/resolve-uri@^3.0.3": version "3.0.5" resolved "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.0.5.tgz" @@ -1322,6 +1413,11 @@ resolved "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.1.tgz" integrity sha512-Ct5MqZkLGEXTVmQYbGtx9SVqD2fqwvdubdps5D3djjAkgkKwT918VNOz65pEHFaYTeWcukmJmH5SwsA9Tn2ObQ== +"@jridgewell/set-array@^1.0.1": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" + integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== + "@jridgewell/source-map@^0.3.2": version "0.3.2" resolved "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.2.tgz" @@ -2251,7 +2347,7 @@ aggregate-error@^3.0.0: clean-stack "^2.0.0" indent-string "^4.0.0" -ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.6: +ajv@^6.10.0, ajv@^6.12.4: version "6.12.6" resolved "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz" integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== @@ -2261,7 +2357,7 @@ ajv@^6.10.0, ajv@^6.12.4, ajv@^6.12.6: json-schema-traverse "^0.4.1" uri-js "^4.2.2" -ajv@^8.0.1: +ajv@^8.0.1, ajv@^8.11.0: version "8.11.0" resolved "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz" integrity sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg== @@ -2465,21 +2561,21 @@ babel-plugin-dynamic-import-node@^2.3.3: dependencies: object.assign "^4.1.0" -babel-plugin-polyfill-corejs2@^0.3.0: - version "0.3.1" - resolved "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.1.tgz" - integrity sha512-v7/T6EQcNfVLfcN2X8Lulb7DjprieyLWJK/zOWH5DUYcAgex9sP3h25Q+DLsX9TloXe3y1O8l2q2Jv9q8UVB9w== +babel-plugin-polyfill-corejs2@^0.3.0, babel-plugin-polyfill-corejs2@^0.3.2: + version "0.3.2" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.2.tgz#e4c31d4c89b56f3cf85b92558954c66b54bd972d" + integrity sha512-LPnodUl3lS0/4wN3Rb+m+UK8s7lj2jcLRrjho4gLw+OJs+I4bvGXshINesY5xx/apM+biTnQ9reDI8yj+0M5+Q== dependencies: - "@babel/compat-data" "^7.13.11" - "@babel/helper-define-polyfill-provider" "^0.3.1" + "@babel/compat-data" "^7.17.7" + "@babel/helper-define-polyfill-provider" "^0.3.2" semver "^6.1.1" -babel-plugin-polyfill-corejs3@^0.5.0: - version "0.5.2" - resolved "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz" - integrity sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ== +babel-plugin-polyfill-corejs3@^0.5.0, babel-plugin-polyfill-corejs3@^0.5.3: + version "0.5.3" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.3.tgz#d7e09c9a899079d71a8b670c6181af56ec19c5c7" + integrity sha512-zKsXDh0XjnrUEW0mxIHLfjBfnXSMr5Q/goMe/fxpQnLm07mcOZiIZHBNWCMx60HmdvjxfXcalac0tfFg0wqxyw== dependencies: - "@babel/helper-define-polyfill-provider" "^0.3.1" + "@babel/helper-define-polyfill-provider" "^0.3.2" core-js-compat "^3.21.0" babel-plugin-polyfill-regenerator@^0.3.0: @@ -2489,6 +2585,13 @@ babel-plugin-polyfill-regenerator@^0.3.0: dependencies: "@babel/helper-define-polyfill-provider" "^0.3.1" +babel-plugin-polyfill-regenerator@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.0.tgz#8f51809b6d5883e07e71548d75966ff7635527fe" + integrity sha512-RW1cnryiADFeHmfLS+WW/G431p1PsW5qdRdz0SDRi7TKcUgc7Oh/uXkT7MZ/+tGsT1BkczEAmD5XjUyJ5SWDTw== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.3.2" + backo2@^1.0.2: version "1.0.2" resolved "https://registry.npmjs.org/backo2/-/backo2-1.0.2.tgz" @@ -3709,12 +3812,12 @@ eslint-plugin-import@2.26.0: tsconfig-paths "^3.14.1" eslint-plugin-mocha@^10.0.4: - version "10.0.4" - resolved "https://registry.npmjs.org/eslint-plugin-mocha/-/eslint-plugin-mocha-10.0.4.tgz" - integrity sha512-8wzAeepVY027oBHz/TmBmUr7vhVqoC1KTFeDybFLhbaWKx+aQ7fJJVuUsqcUy+L+G+XvgQBJY+cbAf7hl5DF7Q== + version "10.1.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-mocha/-/eslint-plugin-mocha-10.1.0.tgz#69325414f875be87fb2cb00b2ef33168d4eb7c8d" + integrity sha512-xLqqWUF17llsogVOC+8C6/jvQ+4IoOREbN7ZCHuOHuD6cT5cDD4h7f2LgsZuzMAiwswWE21tO7ExaknHVDrSkw== dependencies: eslint-utils "^3.0.0" - ramda "^0.28.0" + rambda "^7.1.0" eslint-plugin-prettier@^4.0.0: version "4.0.0" @@ -6994,10 +7097,10 @@ quick-lru@^5.1.1: resolved "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz" integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA== -ramda@^0.28.0: - version "0.28.0" - resolved "https://registry.npmjs.org/ramda/-/ramda-0.28.0.tgz" - integrity sha512-9QnLuG/kPVgWvMQ4aODhsBUFKOUmnbUnsSXACv+NCQZcHbeb+v8Lodp8OVxtRULN1/xOyYLLaL6npE6dMq5QTA== +rambda@^7.1.0: + version "7.2.1" + resolved "https://registry.yarnpkg.com/rambda/-/rambda-7.2.1.tgz#c533f6e2def4edcd59f967df938ace5dd6da56af" + integrity sha512-Wswj8ZvzdI3VhaGPkZAxaCTwuMmGtgWt7Zxsgyo4P+iTmVnkojvyWaOep5q3ZjMIecW0wtQa66GWxaKkZ24RAA== randombytes@^2.1.0: version "2.1.0" @@ -7189,10 +7292,10 @@ regexpp@^3.1.0: resolved "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz" integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg== -regexpu-core@^5.0.1: - version "5.0.1" - resolved "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.0.1.tgz" - integrity sha512-CriEZlrKK9VJw/xQGJpQM5rY88BtuL8DM+AEwvcThHilbxiTAy8vq4iJnd2tqq8wLmjbGZzP7ZcKFjbGkmEFrw== +regexpu-core@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-5.1.0.tgz#2f8504c3fd0ebe11215783a41541e21c79942c6d" + integrity sha512-bb6hk+xWd2PEOkj5It46A16zFMs2mv86Iwpdu94la4S3sJ7C973h2dHpYKwIBGaWSO7cIRJ+UX0IeMaWcO4qwA== dependencies: regenerate "^1.4.2" regenerate-unicode-properties "^10.0.1" @@ -7461,7 +7564,7 @@ semver@7.0.0: resolved "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz" integrity sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A== -semver@7.3.7, semver@^7.0.0, semver@^7.1.1, semver@^7.1.2, semver@^7.3.7: +semver@7.3.7, semver@^7.0.0, semver@^7.1.1, semver@^7.1.2, semver@^7.2.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7: version "7.3.7" resolved "https://registry.npmjs.org/semver/-/semver-7.3.7.tgz" integrity sha512-QlYTucUYOews+WeEujDoEGziz4K6c47V/Bd+LjSSYcA94p+DmINdf7ncaUinThfvZyu13lN9OY1XDxt8C0Tw0g== @@ -7473,13 +7576,6 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: resolved "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== -semver@^7.2.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5: - version "7.3.5" - resolved "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz" - integrity sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ== - dependencies: - lru-cache "^6.0.0" - send@0.17.2: version "0.17.2" resolved "https://registry.npmjs.org/send/-/send-0.17.2.tgz" diff --git a/zk-token-sdk/src/instruction/close_account.rs b/zk-token-sdk/src/instruction/close_account.rs index 4525f87901..b6702e3051 100644 --- a/zk-token-sdk/src/instruction/close_account.rs +++ b/zk-token-sdk/src/instruction/close_account.rs @@ -41,7 +41,7 @@ impl CloseAccountData { keypair: &ElGamalKeypair, ciphertext: &ElGamalCiphertext, ) -> Result { - let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes()); let mut transcript = CloseAccountProof::transcript_new(&pod_pubkey, &pod_ciphertext); diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 9aa606e8ca..64f540a591 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -62,7 +62,7 @@ impl WithdrawData { // current source balance let final_ciphertext = current_ciphertext - &ElGamal::encode(amount); - let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); let pod_final_ciphertext: pod::ElGamalCiphertext = final_ciphertext.into(); let mut transcript = WithdrawProof::transcript_new(&pod_pubkey, &pod_final_ciphertext); let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript);