From 52424de6b4da936f8b309965ff82531007789158 Mon Sep 17 00:00:00 2001 From: Waclaw Banasik Date: Tue, 29 Oct 2024 17:02:27 +0000 Subject: [PATCH 01/13] fix(nayduck) - fix congestion control test flakiness (#12331) The test became flaky since relaxing the congestion control parameters made it harder to congest the chain. It turns out that the test was really inefficient in sending transactions because it was querying for the current head block for every transaction. The fix is to store the block hash at the beginning of the test and reuse it for all transactions. Sped up the test by breaking early when the expected conditions are met (some transactions are rejected, the chain finishes processing all of the load.) Added some cosmetic changes to make it easier to tweak the load. The load can be adjusted by changing the number of accounts or the sleep time in the load method. --- pytest/tests/sanity/congestion_control.py | 98 +++++++++++------------ 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/pytest/tests/sanity/congestion_control.py b/pytest/tests/sanity/congestion_control.py index 153b3376b46..3ec57b05019 100644 --- a/pytest/tests/sanity/congestion_control.py +++ b/pytest/tests/sanity/congestion_control.py @@ -5,6 +5,7 @@ # Usage: # python3 pytest/tests/sanity/congestion_control.py +import logging import unittest import pathlib import sys @@ -13,7 +14,7 @@ sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) -from configured_logger import logger +from configured_logger import new_logger from cluster import BaseNode, init_cluster, load_config, spin_up_node, Key from utils import load_test_contract, poll_blocks, wait_for_blocks from transaction import sign_create_account_with_full_access_key_and_balance_tx, sign_deploy_contract_tx, sign_function_call_tx @@ -25,15 +26,15 @@ GOOD_FINAL_EXECUTION_STATUS = ['FINAL', 'EXECUTED', 'EXECUTED_OPTIMISTIC'] +# boundary accounts for the shard layout +BOUNDARY_ACCOUNT_LIST = ["fff", "kkk", "ppp", "uuu"] +# account prefix for each shard +SHARD_ACCOUNT_LIST = ["aaa", "ggg", "lll", "rrr", "vvv"] + # Shard layout with 5 roughly equal size shards for convenience. SHARD_LAYOUT = { "V1": { - "boundary_accounts": [ - "fff", - "kkk", - "ppp", - "uuu", - ], + "boundary_accounts": BOUNDARY_ACCOUNT_LIST, "version": 2, "shards_split_map": [], "to_parent_shard_map": [], @@ -42,23 +43,11 @@ NUM_SHARDS = len(SHARD_LAYOUT["V1"]["boundary_accounts"]) + 1 -ACCOUNT_SHARD_0 = "aaa.test0" -ACCOUNT_SHARD_1 = "ggg.test0" -ACCOUNT_SHARD_2 = "lll.test0" -ACCOUNT_SHARD_3 = "rrr.test0" -ACCOUNT_SHARD_4 = "vvv.test0" - -ALL_ACCOUNTS = [ - ACCOUNT_SHARD_0, - ACCOUNT_SHARD_1, - ACCOUNT_SHARD_2, - ACCOUNT_SHARD_3, - ACCOUNT_SHARD_4, -] - TxHash = str AccountId = str +logger = new_logger("congestion_control_test", logging.INFO) + class CongestionControlTest(unittest.TestCase): @@ -72,7 +61,7 @@ def test(self): self.nonce = 1 - accounts = self.__prepare_accounts() + accounts = self.__prepare_accounts(10) node = self.__setup_node() @@ -116,7 +105,8 @@ def __check_setup(self, node, accounts): def __run_under_congestion(self, node): logger.info("Checking the chain under congestion") (start_height, _) = node.get_latest_block() - for height, hash in poll_blocks(node, __target=30): + target = start_height + 20 + for height, hash in poll_blocks(node, __target=target): # Wait for a few blocks to congest the chain. if height < start_height + 5: continue @@ -145,15 +135,15 @@ def __run_under_congestion(self, node): gas_used = chunk['header']['gas_used'] congestion_info = chunk['header']['congestion_info'] - self.assertEqual(int(congestion_info['buffered_receipts_gas']), 0) self.assertEqual(int(congestion_info['delayed_receipts_gas']), 0) - self.assertEqual(congestion_info['receipt_bytes'], 0) logger.info( - f"#{height} other gas used: {gas_used} congestion info {congestion_info}" + f"#{height} other gas used: {gas_used} congestion info {congestion_info}" ) def __run_after_congestion(self, node): + empty_chunk_count = 0 + logger.info("Checking the chain after congestion") for height, hash in poll_blocks(node, __target=100): chunk = self.__get_chunk(node, hash, 0) @@ -165,6 +155,12 @@ def __run_after_congestion(self, node): f"#{height} gas used: {gas_used} congestion info {congestion_info}" ) + if gas_used == 0: + empty_chunk_count += 1 + + if empty_chunk_count > 5: + break + chunk = self.__get_chunk(node, hash, 0) gas_used = chunk['header']['gas_used'] congestion_info = chunk['header']['congestion_info'] @@ -199,8 +195,11 @@ def __check_txs(self, node: BaseNode): logger.info( f"Checking transactions under way, {checked}/{total}") + if accepted_count > 0 and rejected_count > 0: + break + logger.info( - f"Checking transactions done, total {len(self.txs)}, accepted {accepted_count}, rejected {rejected_count}" + f"Checking transactions done, total {len(self.txs)}, checked {checked}, accepted {accepted_count}, rejected {rejected_count}" ) self.assertGreater(accepted_count, 0) self.assertGreater(rejected_count, 0) @@ -221,15 +220,14 @@ def __check_tx(self, result): "The transaction failed, please check that the contract was built with `test_features` enabled." ) - def __start_load(self, node: BaseNode, accounts): + def __start_load(self, node: BaseNode, accounts: list[Key]): logger.info("Starting load threads") self.finished = False self.lock = threading.Lock() self.txs = [] target_account = accounts[0] - # Spawn two thread per each account to get more transactions in. - for account in accounts + accounts: + for account in accounts: thread = threading.Thread( target=self.__load, args=[node, account, target_account], @@ -245,7 +243,7 @@ def __stop_load(self): for thread in self.threads: thread.join() - def __load(self, node: BaseNode, sender_account, target_account): + def __load(self, node: BaseNode, sender_account: Key, target_account: Key): logger.debug( f"Starting load thread {sender_account.account_id} -> {target_account.account_id}" ) @@ -254,10 +252,7 @@ def __load(self, node: BaseNode, sender_account, target_account): with self.lock: self.txs.append((sender_account.account_id, tx_hash)) - # This sleep here is more a formality, the call_contract call is - # slower. This is also the reason for sending transactions from - # multiple threads. - time.sleep(0.1) + time.sleep(0.5) logger.debug( f"Stopped load thread {sender_account.account_id} -> {target_account.account_id}" @@ -287,15 +282,24 @@ def __setup_node(self) -> BaseNode: ) node = spin_up_node(config, near_root, node_dir, 0) + + # Save a block hash to use for creating transactions. Querying it every + # time when creating a new transaction is really slow. + self.tx_block_hash = node.get_latest_block() + return node - def __prepare_accounts(self): + def __prepare_accounts(self, n: int) -> list[Key]: logger.info("Preparing accounts") + # Each prefix belongs to a different shard. accounts = [] - for account_id in ALL_ACCOUNTS: + for i in range(n): + prefix = SHARD_ACCOUNT_LIST[i % NUM_SHARDS] + account_id = f"{prefix}_{i:02}.test0" account_key = Key.from_random(account_id) accounts.append(account_key) + return accounts def __create_accounts(self, node: BaseNode, accounts: list[Key]): @@ -312,10 +316,11 @@ def __create_accounts(self, node: BaseNode, accounts: list[Key]): def __deploy_contracts(self, node: BaseNode, accounts: list[Key]): logger.info("Deploying contracts") + contract = load_test_contract('test_contract_rs.wasm') deploy_contract_tx_list = list() - for account_key in accounts: - tx_hash = self.__deploy_contract(node, account_key) - deploy_contract_tx_list.append((account_key.account_id, tx_hash)) + for account in accounts: + tx_hash = self.__deploy_contract(node, account, contract) + deploy_contract_tx_list.append((account.account_id, tx_hash)) self.__wait_for_txs(node, deploy_contract_tx_list) @@ -340,17 +345,14 @@ def __create_account(self, node: BaseNode, account_key, balance): logger.debug(f"Create account {account_key.account_id}: {result}") return result['result'] - def __deploy_contract(self, node: BaseNode, account_key): + def __deploy_contract(self, node: BaseNode, account_key: Key, contract): logger.debug("Deploying contract.") - block_hash = node.get_latest_block().hash_bytes - contract = load_test_contract('test_contract_rs.wasm') - tx = sign_deploy_contract_tx( account_key, contract, self.nonce, - block_hash, + self.tx_block_hash.hash_bytes, ) self.nonce += 1 result = node.send_tx(tx) @@ -374,9 +376,7 @@ def __call_contract_sync(self, node: BaseNode, sender: Key, receiver: Key): result = node.send_tx_and_wait(tx, 5) return result - def __prepare_tx(self, node, sender, receiver): - block_hash = node.get_latest_block().hash_bytes - + def __prepare_tx(self, node: BaseNode, sender: Key, receiver: Key): gas_amount = 250 * TGAS gas_bytes = gas_amount.to_bytes(8, byteorder="little") @@ -388,7 +388,7 @@ def __prepare_tx(self, node, sender, receiver): 300 * TGAS, 0, self.nonce, - block_hash, + self.tx_block_hash.hash_bytes, ) self.nonce += 1 return tx From 79a1ea480d0c632b82ccd8e9a70b7043ad0fc426 Mon Sep 17 00:00:00 2001 From: Jan Malinowski <149345204+jancionear@users.noreply.github.com> Date: Tue, 29 Oct 2024 18:27:36 +0000 Subject: [PATCH 02/13] Check that header version is valid for the current protocol version in witness validation (#12327) Add one more check in `pre_validate_chunk_state_witness` - make sure that the chunk header is valid in this protocol version. We already have a `valid_for` function, but it looks like it's only used in shards manager actor. AFAIU not having this check is not a problem at the moment - only the newest chunk header will match the chunk extra with congestion control info, so other versions of chunk headers won't be endorsed. But there's the concern that there could be a new version of chunk header which can match older chunk extra. For example for bandwidth scheduler I added a new version of `ShardChunkHeaderInner` and accepted cases where `ChunkExtra` had `None` while the list of requests in `ShardChunkHeaderInner` was empty. It was needed to handle protocol upgrades. Chunk validators would endorse the newer chunk header, even though it's not supposed to be used in the current protocol version. This is no longer a problem after https://github.com/near/nearcore/pull/12307, but there could be a similar case in the future. Let's add a check and make sure that chunk validators don't endorse headers with the wrong version, just to be safe. --- chain/chain-primitives/src/error.rs | 9 ++- .../stateless_validation/chunk_validation.rs | 5 ++ chain/chunks/src/shards_manager_actor.rs | 2 +- core/primitives/src/sharding.rs | 63 +++++++++++++++++-- .../src/sharding/shard_chunk_header_inner.rs | 11 ++++ 5 files changed, 82 insertions(+), 8 deletions(-) diff --git a/chain/chain-primitives/src/error.rs b/chain/chain-primitives/src/error.rs index 995120014de..b62dcec4a15 100644 --- a/chain/chain-primitives/src/error.rs +++ b/chain/chain-primitives/src/error.rs @@ -2,7 +2,7 @@ use near_primitives::block::BlockValidityError; use near_primitives::challenge::{ChunkProofs, ChunkState}; use near_primitives::errors::{ChunkAccessError, EpochError, StorageError}; use near_primitives::shard_layout::ShardLayoutError; -use near_primitives::sharding::{ChunkHash, ShardChunkHeader}; +use near_primitives::sharding::{BadHeaderForProtocolVersionError, ChunkHash, ShardChunkHeader}; use near_primitives::types::{BlockHeight, EpochId, ShardId}; use near_time::Utc; use std::io; @@ -241,6 +241,9 @@ pub enum Error { /// EpochSyncProof validation error. #[error("EpochSyncProof Validation Error: {0}")] InvalidEpochSyncProof(String), + /// Invalid chunk header version for protocol version + #[error(transparent)] + BadHeaderForProtocolVersion(#[from] BadHeaderForProtocolVersionError), /// Anything else #[error("Other Error: {0}")] Other(String), @@ -326,7 +329,8 @@ impl Error { | Error::InvalidProtocolVersion | Error::NotAValidator(_) | Error::NotAChunkValidator - | Error::InvalidChallengeRoot => true, + | Error::InvalidChallengeRoot + | Error::BadHeaderForProtocolVersion(_) => true, } } @@ -407,6 +411,7 @@ impl Error { Error::NotAChunkValidator => "not_a_chunk_validator", Error::InvalidChallengeRoot => "invalid_challenge_root", Error::ReshardingError(_) => "resharding_error", + Error::BadHeaderForProtocolVersion(_) => "bad_header_for_protocol_version", } } } diff --git a/chain/chain/src/stateless_validation/chunk_validation.rs b/chain/chain/src/stateless_validation/chunk_validation.rs index 3820d9cfce4..64d77fce36a 100644 --- a/chain/chain/src/stateless_validation/chunk_validation.rs +++ b/chain/chain/src/stateless_validation/chunk_validation.rs @@ -191,6 +191,11 @@ pub fn pre_validate_chunk_state_witness( ) -> Result { let store = chain.chain_store(); + // Ensure that the chunk header version is supported in this protocol version + let protocol_version = + epoch_manager.get_epoch_info(&state_witness.epoch_id)?.protocol_version(); + state_witness.chunk_header.validate_version(protocol_version)?; + // First, go back through the blockchain history to locate the last new chunk // and last last new chunk for the shard. let StateWitnessBlockRange { diff --git a/chain/chunks/src/shards_manager_actor.rs b/chain/chunks/src/shards_manager_actor.rs index 8c1a1d2c913..8952fcbbfbf 100644 --- a/chain/chunks/src/shards_manager_actor.rs +++ b/chain/chunks/src/shards_manager_actor.rs @@ -1398,7 +1398,7 @@ impl ShardsManagerActor { // 2. check protocol version let protocol_version = self.epoch_manager.get_epoch_protocol_version(&epoch_id)?; - if header.valid_for(protocol_version) { + if header.validate_version(protocol_version).is_ok() { Ok(()) } else if epoch_id_confirmed { Err(Error::InvalidChunkHeader) diff --git a/core/primitives/src/sharding.rs b/core/primitives/src/sharding.rs index 1ea0ab1e898..c9be7e063ae 100644 --- a/core/primitives/src/sharding.rs +++ b/core/primitives/src/sharding.rs @@ -583,7 +583,10 @@ impl ShardChunkHeader { } /// Returns whether the header is valid for given `ProtocolVersion`. - pub fn valid_for(&self, version: ProtocolVersion) -> bool { + pub fn validate_version( + &self, + version: ProtocolVersion, + ) -> Result<(), BadHeaderForProtocolVersionError> { const BLOCK_HEADER_V3_VERSION: ProtocolVersion = ProtocolFeature::BlockHeaderV3.protocol_version(); const CONGESTION_CONTROL_VERSION: ProtocolVersion = @@ -591,7 +594,7 @@ impl ShardChunkHeader { const BANDWIDTH_SCHEDULER_VERSION: ProtocolVersion = ProtocolFeature::BandwidthScheduler.protocol_version(); - match &self { + let is_valid = match &self { ShardChunkHeader::V1(_) => version < SHARD_CHUNK_HEADER_UPGRADE_VERSION, ShardChunkHeader::V2(_) => { SHARD_CHUNK_HEADER_UPGRADE_VERSION <= version && version < BLOCK_HEADER_V3_VERSION @@ -606,12 +609,54 @@ impl ShardChunkHeader { // In bandwidth scheduler version v3 and v4 are allowed. The first chunk in // the bandwidth scheduler version will be v3 because the chunk extra for the // last chunk of previous version doesn't have bandwidth requests. - ShardChunkHeaderInner::V2(_) => { - version >= BLOCK_HEADER_V3_VERSION && version < BANDWIDTH_SCHEDULER_VERSION - } + // v2 is also allowed in the bandwidth scheduler version because there + // are multiple tests which upgrade from an old version directly to the + // latest version. TODO(#12328) - don't allow InnerV2 in bandwidth scheduler version. + ShardChunkHeaderInner::V2(_) => version >= BLOCK_HEADER_V3_VERSION, ShardChunkHeaderInner::V3(_) => version >= CONGESTION_CONTROL_VERSION, ShardChunkHeaderInner::V4(_) => version >= BANDWIDTH_SCHEDULER_VERSION, }, + }; + + if is_valid { + Ok(()) + } else { + Err(BadHeaderForProtocolVersionError { + protocol_version: version, + header_version: self.header_version_number(), + header_inner_version: self.inner_version_number(), + }) + } + } + + /// Used for error messages, use `match` for other code. + #[inline] + pub(crate) fn header_version_number(&self) -> u64 { + match self { + ShardChunkHeader::V1(_) => 1, + ShardChunkHeader::V2(_) => 2, + ShardChunkHeader::V3(_) => 3, + } + } + + /// Used for error messages, use `match` for other code. + #[inline] + pub(crate) fn inner_version_number(&self) -> u64 { + match self { + ShardChunkHeader::V1(v1) => { + // Shows that Header V1 contains Inner V1 + let _inner_v1: &ShardChunkHeaderInnerV1 = &v1.inner; + 1 + } + ShardChunkHeader::V2(v2) => { + // Shows that Header V2 also contains Inner V1, not Inner V2 + let _inner_v1: &ShardChunkHeaderInnerV1 = &v2.inner; + 1 + } + ShardChunkHeader::V3(v3) => { + let inner_enum: &ShardChunkHeaderInner = &v3.inner; + inner_enum.version_number() + } } } @@ -624,6 +669,14 @@ impl ShardChunkHeader { } } +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +#[error("Invalid chunk header version for protocol version {protocol_version}. (header: {header_version}, inner: {header_inner_version})")] +pub struct BadHeaderForProtocolVersionError { + pub protocol_version: ProtocolVersion, + pub header_version: u64, + pub header_inner_version: u64, +} + #[derive( BorshSerialize, BorshDeserialize, Hash, Eq, PartialEq, Clone, Debug, Default, ProtocolSchema, )] diff --git a/core/primitives/src/sharding/shard_chunk_header_inner.rs b/core/primitives/src/sharding/shard_chunk_header_inner.rs index a09c40ccc86..40f693a35e6 100644 --- a/core/primitives/src/sharding/shard_chunk_header_inner.rs +++ b/core/primitives/src/sharding/shard_chunk_header_inner.rs @@ -164,6 +164,17 @@ impl ShardChunkHeaderInner { Self::V4(inner) => Some(&inner.bandwidth_requests), } } + + /// Used for error messages, use `match` for other code. + #[inline] + pub(crate) fn version_number(&self) -> u64 { + match self { + Self::V1(_) => 1, + Self::V2(_) => 2, + Self::V3(_) => 3, + Self::V4(_) => 4, + } + } } #[derive(BorshSerialize, BorshDeserialize, Clone, PartialEq, Eq, Debug, ProtocolSchema)] From daa67a3771d76088695a73196ada89b0ae486d67 Mon Sep 17 00:00:00 2001 From: robin-near <111538878+robin-near@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:12:15 -0700 Subject: [PATCH 03/13] [Epoch Sync] Only run epoch sync if we're at genesis. (#12334) Closes #12309 --- chain/client/src/sync/epoch.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/chain/client/src/sync/epoch.rs b/chain/client/src/sync/epoch.rs index 6f3900275a7..592f48a0d0a 100644 --- a/chain/client/src/sync/epoch.rs +++ b/chain/client/src/sync/epoch.rs @@ -594,6 +594,13 @@ impl EpochSync { return Ok(()); } let tip_height = chain.chain_store().header_head()?.height; + if tip_height != chain.genesis().height() { + // Epoch Sync only supports bootstrapping at genesis. This is because there is no reason + // to use Epoch Sync on an already existing node; we would have to carefully delete old + // data and then the result would be the same as if we just started the node from + // scratch. + return Ok(()); + } if tip_height + self.config.epoch_sync_horizon >= highest_height { return Ok(()); } From 0e776feb9a76b5a799a2c8df3a7cfef16b1b6549 Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Wed, 30 Oct 2024 14:39:56 +0530 Subject: [PATCH 04/13] [fix] Set BLOCK_CACHE_SIZE to 1 in epoch manager for no_cache feature (#12338) Auto fixed issue https://github.com/near/nearcore/issues/5080 --- chain/epoch-manager/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/epoch-manager/src/lib.rs b/chain/epoch-manager/src/lib.rs index d0721aaf73a..f34e60c4796 100644 --- a/chain/epoch-manager/src/lib.rs +++ b/chain/epoch-manager/src/lib.rs @@ -60,7 +60,7 @@ mod validator_selection; mod validator_stats; const EPOCH_CACHE_SIZE: usize = if cfg!(feature = "no_cache") { 1 } else { 50 }; -const BLOCK_CACHE_SIZE: usize = if cfg!(feature = "no_cache") { 5 } else { 1000 }; // TODO(#5080): fix this +const BLOCK_CACHE_SIZE: usize = if cfg!(feature = "no_cache") { 1 } else { 1000 }; const AGGREGATOR_SAVE_PERIOD: u64 = 1000; /// In the current architecture, various components have access to the same From 1e41b11ce754239d6d9af4a8160a6d98eff9cb5f Mon Sep 17 00:00:00 2001 From: Tayfun Elmas Date: Wed, 30 Oct 2024 12:11:05 +0300 Subject: [PATCH 05/13] refactor: Combine contract accesses and deployments into ContractUpdates (#12326) The propagation of contract accesses (calls) and deployments are done together. Thus, introducing a wrapper called `ContractUpdates` that contains the accesses and deployments and this is passed through the call stack from the chunk application up to the point we send the messages. Also address a TODO to change BTreeSet to HashSet to contain the code hashes. We do NOT change the representation of code hashes in database and network messages, but only the internal data structures. --- chain/chain/src/chain_update.rs | 6 +- chain/chain/src/resharding/manager.rs | 5 +- chain/chain/src/runtime/mod.rs | 3 +- chain/chain/src/store/mod.rs | 8 +- chain/chain/src/test_utils/kv_runtime.rs | 3 +- chain/chain/src/types.rs | 10 +-- .../partial_witness/partial_witness_actor.rs | 4 +- .../partial_witness_tracker.rs | 12 +-- .../state_witness_producer.rs | 75 ++++++++-------- .../contract_distribution.rs | 29 ++++--- core/store/src/contract.rs | 22 ++--- core/store/src/trie/update.rs | 23 ++--- runtime/runtime/src/lib.rs | 25 ++---- runtime/runtime/src/tests/apply.rs | 87 ++++++++++++------- 14 files changed, 151 insertions(+), 161 deletions(-) diff --git a/chain/chain/src/chain_update.rs b/chain/chain/src/chain_update.rs index 9d9b9bb592a..b03946b0947 100644 --- a/chain/chain/src/chain_update.rs +++ b/chain/chain/src/chain_update.rs @@ -157,8 +157,7 @@ impl<'a> ChainUpdate<'a> { shard_id, apply_result.proof, apply_result.applied_receipts_hash, - apply_result.contract_accesses, - apply_result.contract_deploys, + apply_result.contract_updates, ); } } @@ -188,8 +187,7 @@ impl<'a> ChainUpdate<'a> { shard_uid.shard_id(), apply_result.proof, apply_result.applied_receipts_hash, - apply_result.contract_accesses, - apply_result.contract_deploys, + apply_result.contract_updates, ); } } diff --git a/chain/chain/src/resharding/manager.rs b/chain/chain/src/resharding/manager.rs index 0d3f5318ae5..049be53f932 100644 --- a/chain/chain/src/resharding/manager.rs +++ b/chain/chain/src/resharding/manager.rs @@ -226,10 +226,7 @@ impl ReshardingManager { new_shard_uid.shard_id(), Some(partial_storage), CryptoHash::default(), - // No contract code is accessed during resharding. - // TODO(#11099): Confirm if sending no contracts is ok here. - Default::default(), - // No contract code is deployed during resharding. + // No contract code is accessed or deployed during resharding. // TODO(#11099): Confirm if sending no contracts is ok here. Default::default(), ); diff --git a/chain/chain/src/runtime/mod.rs b/chain/chain/src/runtime/mod.rs index 7bf7ceac6a4..7bec806078e 100644 --- a/chain/chain/src/runtime/mod.rs +++ b/chain/chain/src/runtime/mod.rs @@ -468,10 +468,9 @@ impl NightshadeRuntime { processed_yield_timeouts: apply_result.processed_yield_timeouts, applied_receipts_hash: hash(&borsh::to_vec(receipts).unwrap()), congestion_info: apply_result.congestion_info, - contract_accesses: apply_result.contract_accesses, bandwidth_requests: apply_result.bandwidth_requests, bandwidth_scheduler_state_hash: apply_result.bandwidth_scheduler_state_hash, - contract_deploys: apply_result.contract_deploys, + contract_updates: apply_result.contract_updates, }; Ok(result) diff --git a/chain/chain/src/store/mod.rs b/chain/chain/src/store/mod.rs index ecf60cf6dd2..918beccc626 100644 --- a/chain/chain/src/store/mod.rs +++ b/chain/chain/src/store/mod.rs @@ -1,5 +1,5 @@ use std::collections::hash_map::Entry; -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::{HashMap, HashSet}; use std::io; use borsh::{BorshDeserialize, BorshSerialize}; @@ -23,7 +23,7 @@ use near_primitives::sharding::{ use near_primitives::state_sync::{ ReceiptProofResponse, ShardStateSyncResponseHeader, StateHeaderKey, StateSyncDumpProgress, }; -use near_primitives::stateless_validation::contract_distribution::CodeHash; +use near_primitives::stateless_validation::contract_distribution::ContractUpdates; use near_primitives::stateless_validation::stored_chunk_state_transition_data::{ StoredChunkStateTransitionData, StoredChunkStateTransitionDataV2, }; @@ -2010,10 +2010,10 @@ impl<'a> ChainStoreUpdate<'a> { shard_id: ShardId, partial_storage: Option, applied_receipts_hash: CryptoHash, - contract_accesses: BTreeSet, - contract_deploys: BTreeSet, + contract_updates: ContractUpdates, ) { if let Some(partial_storage) = partial_storage { + let ContractUpdates { contract_accesses, contract_deploys } = contract_updates; self.state_transition_data.insert( (block_hash, shard_id), StoredChunkStateTransitionData::V2(StoredChunkStateTransitionDataV2 { diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index dc68100afdf..cf8046adff4 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -1388,8 +1388,7 @@ impl RuntimeAdapter for KeyValueRuntime { congestion_info: Self::get_congestion_info(PROTOCOL_VERSION), bandwidth_requests: BandwidthRequests::default_for_protocol_version(PROTOCOL_VERSION), bandwidth_scheduler_state_hash: CryptoHash::default(), - contract_accesses: Default::default(), - contract_deploys: Default::default(), + contract_updates: Default::default(), }) } diff --git a/chain/chain/src/types.rs b/chain/chain/src/types.rs index 599b7a90582..087b1b4b2b0 100644 --- a/chain/chain/src/types.rs +++ b/chain/chain/src/types.rs @@ -1,5 +1,3 @@ -use std::collections::BTreeSet; - use borsh::{BorshDeserialize, BorshSerialize}; use near_async::time::{Duration, Utc}; use near_chain_configs::GenesisConfig; @@ -26,7 +24,7 @@ use near_primitives::receipt::{PromiseYieldTimeout, Receipt}; use near_primitives::sandbox::state_patch::SandboxStatePatch; use near_primitives::shard_layout::ShardUId; use near_primitives::state_part::PartId; -use near_primitives::stateless_validation::contract_distribution::CodeHash; +use near_primitives::stateless_validation::contract_distribution::ContractUpdates; use near_primitives::transaction::{ExecutionOutcomeWithId, SignedTransaction}; use near_primitives::types::validator_stake::{ValidatorStake, ValidatorStakeIter}; use near_primitives::types::{ @@ -112,10 +110,8 @@ pub struct ApplyChunkResult { pub bandwidth_requests: Option, /// Used only for a sanity check. pub bandwidth_scheduler_state_hash: CryptoHash, - /// Code-hashes of the contracts accessed (called) while applying the chunk. - pub contract_accesses: BTreeSet, - /// Code-hashes of the contracts deployed while applying the chunk. - pub contract_deploys: BTreeSet, + /// Contracts accessed and deployed while applying the chunk. + pub contract_updates: ContractUpdates, } impl ApplyChunkResult { diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs index 84ed9f590a4..98ce8fa047d 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs @@ -1,4 +1,4 @@ -use std::collections::BTreeSet; +use std::collections::HashSet; use std::sync::Arc; use itertools::Itertools; @@ -368,7 +368,7 @@ impl PartialWitnessActor { let runtime_config = self .runtime .get_runtime_config(self.epoch_manager.get_epoch_protocol_version(&key.epoch_id)?)?; - let missing_contract_hashes = BTreeSet::from_iter( + let missing_contract_hashes = HashSet::from_iter( accesses .contracts() .iter() diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs index 8bfe0d4a208..28588de9eec 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs @@ -1,4 +1,4 @@ -use std::collections::BTreeSet; +use std::collections::HashSet; use std::num::NonZeroUsize; use std::sync::Arc; @@ -43,7 +43,7 @@ enum AccessedContractsState { Unknown, /// Received `ChunkContractAccesses` and sent `ContractCodeRequest`, /// waiting for response from the chunk producer. - Requested { contract_hashes: BTreeSet, requested_at: Instant }, + Requested { contract_hashes: HashSet, requested_at: Instant }, /// Received a valid `ContractCodeResponse`. Received(Vec), } @@ -145,7 +145,7 @@ struct CacheEntry { enum CacheUpdate { WitnessPart(PartialEncodedStateWitness, Arc), - AccessedContractHashes(BTreeSet), + AccessedContractHashes(HashSet), AccessedContractCodes(Vec), } @@ -234,7 +234,7 @@ impl CacheEntry { } } - fn set_requested_contracts(&mut self, contract_hashes: BTreeSet) { + fn set_requested_contracts(&mut self, contract_hashes: HashSet) { match &self.accessed_contracts { AccessedContractsState::Unknown => { self.accessed_contracts = AccessedContractsState::Requested { @@ -251,7 +251,7 @@ impl CacheEntry { fn set_received_contracts(&mut self, contract_codes: Vec) { match &self.accessed_contracts { AccessedContractsState::Requested { contract_hashes, requested_at } => { - let actual = BTreeSet::from_iter( + let actual = HashSet::from_iter( contract_codes.iter().map(|code| CodeHash(CryptoHash::hash_bytes(&code.0))), ); let expected = contract_hashes; @@ -380,7 +380,7 @@ impl PartialEncodedStateWitnessTracker { pub fn store_accessed_contract_hashes( &mut self, key: ChunkProductionKey, - hashes: BTreeSet, + hashes: HashSet, ) -> Result<(), Error> { tracing::debug!(target: "client", ?key, ?hashes, "store_accessed_contract_hashes"); let update = CacheUpdate::AccessedContractHashes(hashes); diff --git a/chain/client/src/stateless_validation/state_witness_producer.rs b/chain/client/src/stateless_validation/state_witness_producer.rs index cc73c3d1fec..b31cfe8b50b 100644 --- a/chain/client/src/stateless_validation/state_witness_producer.rs +++ b/chain/client/src/stateless_validation/state_witness_producer.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use near_async::messaging::{CanSend, IntoSender}; @@ -12,7 +12,7 @@ use near_primitives::hash::{hash, CryptoHash}; use near_primitives::receipt::Receipt; use near_primitives::sharding::{ChunkHash, ReceiptProof, ShardChunk, ShardChunkHeader}; use near_primitives::stateless_validation::contract_distribution::{ - ChunkContractAccesses, CodeHash, + ChunkContractAccesses, ContractUpdates, }; use near_primitives::stateless_validation::state_witness::{ ChunkStateTransition, ChunkStateWitness, @@ -37,8 +37,7 @@ struct StateTransitionData { main_transition: ChunkStateTransition, implicit_transitions: Vec, applied_receipts_hash: CryptoHash, - contract_accesses: BTreeSet, - contract_deploys: BTreeSet, + contract_updates: ContractUpdates, } /// Result of creating witness. @@ -48,10 +47,8 @@ struct StateTransitionData { pub(crate) struct CreateWitnessResult { /// State witness created. pub(crate) state_witness: ChunkStateWitness, - /// Code-hashes of contracts accessed while applying the previous chunk. - pub(crate) contract_accesses: BTreeSet, - /// Code-hashes of contracts deployed while applying the previous chunk. - pub(crate) contract_deploys: BTreeSet, + /// Contracts accessed and deployed while applying the chunk. + pub(crate) contract_updates: ContractUpdates, } impl Client { @@ -76,14 +73,13 @@ impl Client { let my_signer = validator_signer.as_ref().ok_or(Error::NotAValidator(format!("send state witness")))?; - let CreateWitnessResult { state_witness, contract_accesses, contract_deploys } = self - .create_state_witness( - my_signer.validator_id().clone(), - prev_block_header, - prev_chunk_header, - chunk, - transactions_storage_proof, - )?; + let CreateWitnessResult { state_witness, contract_updates } = self.create_state_witness( + my_signer.validator_id().clone(), + prev_block_header, + prev_chunk_header, + chunk, + transactions_storage_proof, + )?; if self.config.save_latest_witnesses { self.chain.chain_store.save_latest_chunk_state_witness(&state_witness)?; @@ -108,12 +104,12 @@ impl Client { if ProtocolFeature::ExcludeContractCodeFromStateWitness.enabled(protocol_version) { // TODO(#11099): Currently we consume contract_deploys only for the following log message. Distribute it to validators // that will not validate the current witness so that they can follow-up with requesting the contract code. - tracing::debug!(target: "client", ?contract_accesses, ?contract_deploys, "Contract accesses and deploys while sending state witness"); - if !contract_accesses.is_empty() { + tracing::debug!(target: "client", ?contract_updates, "Contract accesses and deploys while sending state witness"); + if !contract_updates.contract_accesses.is_empty() { self.send_contract_accesses_to_chunk_validators( epoch_id, &chunk_header, - contract_accesses, + contract_updates, my_signer.as_ref(), ); } @@ -143,8 +139,7 @@ impl Client { main_transition, implicit_transitions, applied_receipts_hash, - contract_accesses, - contract_deploys, + contract_updates, } = self.collect_state_transition_data(&chunk_header, prev_chunk_header)?; let new_transactions = chunk.transactions().to_vec(); @@ -178,7 +173,7 @@ impl Client { new_transactions, new_transactions_validation_state, ); - Ok(CreateWitnessResult { state_witness, contract_accesses, contract_deploys }) + Ok(CreateWitnessResult { state_witness, contract_updates }) } /// Collect state transition data necessary to produce state witness for @@ -219,7 +214,7 @@ impl Client { if current_shard_id != next_shard_id { // If shard id changes, we need to get implicit state // transition from current shard id to the next shard id. - let (chunk_state_transition, _, _, _) = + let (chunk_state_transition, _, _) = self.get_state_transition(¤t_block_hash, &next_epoch_id, next_shard_id)?; implicit_transitions.push(chunk_state_transition); } @@ -231,7 +226,7 @@ impl Client { } // Add implicit state transition. - let (chunk_state_transition, _, _, _) = self.get_state_transition( + let (chunk_state_transition, _, _) = self.get_state_transition( ¤t_block_hash, ¤t_epoch_id, current_shard_id, @@ -247,19 +242,17 @@ impl Client { implicit_transitions.reverse(); // Get the main state transition. - let (main_transition, receipts_hash, contract_accesses, contract_deploys) = - if prev_chunk_header.is_genesis() { - self.get_genesis_state_transition(&main_block, &epoch_id, shard_id)? - } else { - self.get_state_transition(&main_block, &epoch_id, shard_id)? - }; + let (main_transition, receipts_hash, contract_updates) = if prev_chunk_header.is_genesis() { + self.get_genesis_state_transition(&main_block, &epoch_id, shard_id)? + } else { + self.get_state_transition(&main_block, &epoch_id, shard_id)? + }; Ok(StateTransitionData { main_transition, implicit_transitions, applied_receipts_hash: receipts_hash, - contract_accesses: BTreeSet::from_iter(contract_accesses.into_iter()), - contract_deploys: BTreeSet::from_iter(contract_deploys.into_iter()), + contract_updates, }) } @@ -269,8 +262,7 @@ impl Client { block_hash: &CryptoHash, epoch_id: &EpochId, shard_id: ShardId, - ) -> Result<(ChunkStateTransition, CryptoHash, BTreeSet, BTreeSet), Error> - { + ) -> Result<(ChunkStateTransition, CryptoHash, ContractUpdates), Error> { let shard_uid = self.chain.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; let stored_chunk_state_transition_data = self .chain @@ -303,6 +295,10 @@ impl Client { contract_deploys, }) => (base_state, receipts_hash, contract_accesses, contract_deploys), }; + let contract_updates = ContractUpdates { + contract_accesses: contract_accesses.into_iter().collect(), + contract_deploys: contract_deploys.into_iter().collect(), + }; Ok(( ChunkStateTransition { block_hash: *block_hash, @@ -310,8 +306,7 @@ impl Client { post_state_root: *self.chain.get_chunk_extra(block_hash, &shard_uid)?.state_root(), }, receipts_hash, - BTreeSet::from_iter(contract_accesses.into_iter()), - BTreeSet::from_iter(contract_deploys.into_iter()), + contract_updates, )) } @@ -320,8 +315,7 @@ impl Client { block_hash: &CryptoHash, epoch_id: &EpochId, shard_id: ShardId, - ) -> Result<(ChunkStateTransition, CryptoHash, BTreeSet, BTreeSet), Error> - { + ) -> Result<(ChunkStateTransition, CryptoHash, ContractUpdates), Error> { let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, &epoch_id)?; Ok(( ChunkStateTransition { @@ -331,7 +325,6 @@ impl Client { }, hash(&borsh::to_vec::<[Receipt]>(&[]).unwrap()), Default::default(), - Default::default(), )) } @@ -426,9 +419,11 @@ impl Client { &self, epoch_id: &EpochId, chunk_header: &ShardChunkHeader, - contract_accesses: BTreeSet, + contract_updates: ContractUpdates, my_signer: &ValidatorSigner, ) { + let ContractUpdates { contract_accesses, .. } = contract_updates; + let chunk_production_key = ChunkProductionKey { epoch_id: *epoch_id, shard_id: chunk_header.shard_id(), diff --git a/core/primitives/src/stateless_validation/contract_distribution.rs b/core/primitives/src/stateless_validation/contract_distribution.rs index 4ad7dd92acd..912fb867698 100644 --- a/core/primitives/src/stateless_validation/contract_distribution.rs +++ b/core/primitives/src/stateless_validation/contract_distribution.rs @@ -1,4 +1,4 @@ -use std::collections::BTreeSet; +use std::collections::HashSet; use borsh::{BorshDeserialize, BorshSerialize}; use bytesize::ByteSize; @@ -24,7 +24,7 @@ pub enum ChunkContractAccesses { impl ChunkContractAccesses { pub fn new( next_chunk: ChunkProductionKey, - contracts: BTreeSet, + contracts: HashSet, signer: &ValidatorSigner, ) -> Self { Self::V1(ChunkContractAccessesV1::new(next_chunk, contracts, signer)) @@ -59,7 +59,7 @@ pub struct ChunkContractAccessesV1 { impl ChunkContractAccessesV1 { fn new( next_chunk: ChunkProductionKey, - contracts: BTreeSet, + contracts: HashSet, signer: &ValidatorSigner, ) -> Self { let inner = ChunkContractAccessesInner::new(next_chunk, contracts); @@ -85,7 +85,7 @@ pub struct ChunkContractAccessesInner { } impl ChunkContractAccessesInner { - fn new(next_chunk: ChunkProductionKey, contracts: BTreeSet) -> Self { + fn new(next_chunk: ChunkProductionKey, contracts: HashSet) -> Self { Self { next_chunk, contracts: contracts.into_iter().collect(), @@ -107,7 +107,7 @@ pub enum ChunkContractDeployments { impl ChunkContractDeployments { pub fn new( next_chunk: ChunkProductionKey, - contracts: BTreeSet, + contracts: HashSet, signer: &ValidatorSigner, ) -> Self { Self::V1(ChunkContractDeploymentsV1::new(next_chunk, contracts, signer)) @@ -136,7 +136,7 @@ pub struct ChunkContractDeploymentsV1 { impl ChunkContractDeploymentsV1 { fn new( next_chunk: ChunkProductionKey, - contracts: BTreeSet, + contracts: HashSet, signer: &ValidatorSigner, ) -> Self { let inner = ChunkContractDeploymentsInner::new(next_chunk, contracts); @@ -158,7 +158,7 @@ pub struct ChunkContractDeploymentsInner { } impl ChunkContractDeploymentsInner { - fn new(next_chunk: ChunkProductionKey, contracts: BTreeSet) -> Self { + fn new(next_chunk: ChunkProductionKey, contracts: HashSet) -> Self { Self { next_chunk, contracts: contracts.into_iter().collect(), @@ -179,7 +179,7 @@ pub enum ContractCodeRequest { impl ContractCodeRequest { pub fn new( next_chunk: ChunkProductionKey, - contracts: BTreeSet, + contracts: HashSet, signer: &ValidatorSigner, ) -> Self { Self::V1(ContractCodeRequestV1::new(next_chunk, contracts, signer)) @@ -214,7 +214,7 @@ pub struct ContractCodeRequestV1 { impl ContractCodeRequestV1 { fn new( next_chunk: ChunkProductionKey, - contracts: BTreeSet, + contracts: HashSet, signer: &ValidatorSigner, ) -> Self { let inner = @@ -243,7 +243,7 @@ impl ContractCodeRequestInner { fn new( requester: AccountId, next_chunk: ChunkProductionKey, - contracts: BTreeSet, + contracts: HashSet, ) -> Self { Self { requester, @@ -382,3 +382,12 @@ impl Into for CodeHash { /// Raw bytes of the (uncompiled) contract code. #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] pub struct CodeBytes(pub std::sync::Arc<[u8]>); + +/// Contains the accesses and changes (eg. deployments) to the contracts while applying a chunk. +#[derive(Debug, Default)] +pub struct ContractUpdates { + /// Code-hashes of the contracts accessed (called) while applying the chunk. + pub contract_accesses: HashSet, + /// Code-hashes of the contracts deployed while applying the chunk. + pub contract_deploys: HashSet, +} diff --git a/core/store/src/contract.rs b/core/store/src/contract.rs index 10854be9250..a5abff262be 100644 --- a/core/store/src/contract.rs +++ b/core/store/src/contract.rs @@ -1,9 +1,9 @@ use crate::{metrics, TrieStorage}; use near_primitives::errors::StorageError; use near_primitives::hash::CryptoHash; -use near_primitives::stateless_validation::contract_distribution::CodeHash; +use near_primitives::stateless_validation::contract_distribution::{CodeHash, ContractUpdates}; use near_vm_runner::ContractCode; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, HashSet}; use std::sync::{Arc, Mutex}; /// Tracks the uncommitted and committed deployments and calls to contracts, while applying the receipts in a chunk. @@ -35,7 +35,7 @@ struct ContractsTracker { /// /// We do not distinguish between committed and uncommitted calls, because we need /// to record all calls to validate both successful and failing function calls. - contract_calls: BTreeSet, + contract_calls: HashSet, } impl ContractsTracker { @@ -68,22 +68,14 @@ impl ContractsTracker { } /// Finalizes this tracker and returns the calls and committed deployments. - fn finalize(mut self) -> ContractStorageResult { - ContractStorageResult { - contract_calls: std::mem::take(&mut self.contract_calls), + fn finalize(mut self) -> ContractUpdates { + ContractUpdates { + contract_accesses: std::mem::take(&mut self.contract_calls), contract_deploys: self.committed_deploys.into_keys().collect(), } } } -/// Result of finalizing the contract storage, containing the contract calls and committed deployments. -pub struct ContractStorageResult { - /// List of code-hashes for the contract calls while applying the chunk. - pub contract_calls: BTreeSet, - /// List of code-hashes for the (committed) contract deployments while applying the chunk. - pub contract_deploys: BTreeSet, -} - /// Reads contract code from the trie by its hash. /// /// Cloning is cheap. @@ -178,7 +170,7 @@ impl ContractStorage { /// /// It also finalizes and destructs the inner`ContractsTracker` so there must be no other deployments or /// calls to contracts after this returns. - pub(crate) fn finalize(self) -> ContractStorageResult { + pub(crate) fn finalize(self) -> ContractUpdates { let mut guard = self.tracker.lock().expect("no panics"); let tracker = guard.take().expect("finalize must be called only once"); tracker.finalize() diff --git a/core/store/src/trie/update.rs b/core/store/src/trie/update.rs index 0ee0440e1cf..563b287c559 100644 --- a/core/store/src/trie/update.rs +++ b/core/store/src/trie/update.rs @@ -1,16 +1,16 @@ pub use self::iterator::TrieUpdateIterator; use super::accounting_cache::TrieAccountingCacheSwitch; use super::{OptimizedValueRef, Trie, TrieWithReadLock}; -use crate::contract::{ContractStorage, ContractStorageResult}; +use crate::contract::ContractStorage; use crate::trie::{KeyLookupMode, TrieChanges}; use crate::StorageError; -use near_primitives::stateless_validation::contract_distribution::CodeHash; +use near_primitives::stateless_validation::contract_distribution::ContractUpdates; use near_primitives::trie_key::TrieKey; use near_primitives::types::{ RawStateChange, RawStateChanges, RawStateChangesWithTrieKey, StateChangeCause, StateRoot, TrieCacheMode, }; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeMap; mod iterator; @@ -58,10 +58,8 @@ pub struct TrieUpdateResult { pub trie: Trie, pub trie_changes: TrieChanges, pub state_changes: Vec, - /// Code-hashes of the contracts accessed (called). - pub contract_accesses: BTreeSet, - /// Code-hashes of the contracts deployed. - pub contract_deploys: BTreeSet, + /// Contracts accessed and deployed while applying the chunk. + pub contract_updates: ContractUpdates, } impl TrieUpdate { @@ -192,15 +190,8 @@ impl TrieUpdate { span.record("mem_reads", iops_delta.mem_reads); span.record("db_reads", iops_delta.db_reads); } - let ContractStorageResult { contract_calls, contract_deploys } = - contract_storage.finalize(); - Ok(TrieUpdateResult { - trie, - trie_changes, - state_changes, - contract_accesses: contract_calls, - contract_deploys, - }) + let contract_updates = contract_storage.finalize(); + Ok(TrieUpdateResult { trie, trie_changes, state_changes, contract_updates }) } /// Returns Error if the underlying storage fails diff --git a/runtime/runtime/src/lib.rs b/runtime/runtime/src/lib.rs index 75e2f9931d2..16ca49e96d6 100644 --- a/runtime/runtime/src/lib.rs +++ b/runtime/runtime/src/lib.rs @@ -36,7 +36,7 @@ use near_primitives::receipt::{ use near_primitives::runtime::migration_data::{MigrationData, MigrationFlags}; use near_primitives::sandbox::state_patch::SandboxStatePatch; use near_primitives::state_record::StateRecord; -use near_primitives::stateless_validation::contract_distribution::CodeHash; +use near_primitives::stateless_validation::contract_distribution::ContractUpdates; #[cfg(feature = "protocol_feature_nonrefundable_transfer_nep491")] use near_primitives::transaction::NonrefundableStorageTransferAction; use near_primitives::transaction::{ @@ -72,7 +72,7 @@ use near_vm_runner::ContractRuntimeCache; use near_vm_runner::ProfileDataV3; use pipelining::ReceiptPreparationPipeline; use std::cmp::max; -use std::collections::{BTreeSet, HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; use tracing::{debug, instrument}; @@ -202,8 +202,8 @@ pub struct ApplyResult { pub bandwidth_requests: Option, /// Used only for a sanity check. pub bandwidth_scheduler_state_hash: CryptoHash, - pub contract_accesses: BTreeSet, - pub contract_deploys: BTreeSet, + /// Contracts accessed and deployed while applying the chunk. + pub contract_updates: ContractUpdates, } #[derive(Debug)] @@ -2080,13 +2080,8 @@ impl Runtime { metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND .with_label_values(&[shard_id_str.as_str()]) .observe(chunk_recorded_size_upper_bound); - let TrieUpdateResult { - trie, - trie_changes, - state_changes, - contract_accesses, - contract_deploys, - } = state_update.finalize()?; + let TrieUpdateResult { trie, trie_changes, state_changes, contract_updates } = + state_update.finalize()?; if let Some(prefetcher) = &processing_state.prefetcher { // Only clear the prefetcher queue after finalize is done because as part of receipt @@ -2146,8 +2141,7 @@ impl Runtime { .as_ref() .map(|o| o.scheduler_state_hash) .unwrap_or_default(), - contract_accesses, - contract_deploys, + contract_updates, }) } } @@ -2235,7 +2229,7 @@ fn missing_chunk_apply_result( processing_state: ApplyProcessingState, bandwidth_scheduler_output: &Option, ) -> Result { - let TrieUpdateResult { trie, trie_changes, state_changes, contract_accesses, contract_deploys } = + let TrieUpdateResult { trie, trie_changes, state_changes, contract_updates } = processing_state.state_update.finalize()?; let proof = trie.recorded_storage(); @@ -2276,8 +2270,7 @@ fn missing_chunk_apply_result( .as_ref() .map(|o| o.scheduler_state_hash) .unwrap_or_default(), - contract_accesses, - contract_deploys, + contract_updates, }); } diff --git a/runtime/runtime/src/tests/apply.rs b/runtime/runtime/src/tests/apply.rs index f77d929c1d1..0067f939ee9 100644 --- a/runtime/runtime/src/tests/apply.rs +++ b/runtime/runtime/src/tests/apply.rs @@ -38,7 +38,7 @@ use near_store::{ Trie, }; use near_vm_runner::{ContractCode, FilesystemContractRuntimeCache}; -use std::collections::{BTreeSet, HashMap}; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use testlib::runtime_utils::{alice_account, bob_account}; @@ -1272,9 +1272,12 @@ fn test_exclude_contract_code_from_witness() { .unwrap(); assert_eq!(apply_result.delayed_receipts_count, 0); - assert_eq!(apply_result.contract_accesses, BTreeSet::new()); + assert_eq!(apply_result.contract_updates.contract_accesses, HashSet::new()); // Since both accounts deploy the same contract, we expect only one contract deploy. - assert_eq!(apply_result.contract_deploys, BTreeSet::from([CodeHash(*contract_code.hash())])); + assert_eq!( + apply_result.contract_updates.contract_deploys, + HashSet::from([CodeHash(*contract_code.hash())]) + ); let mut store_update = tries.store_update(); let root = @@ -1311,8 +1314,11 @@ fn test_exclude_contract_code_from_witness() { assert_eq!(apply_result.delayed_receipts_count, 0); // Since both accounts call the same contract, we expect only one contract access. - assert_eq!(apply_result.contract_accesses, BTreeSet::from([CodeHash(*contract_code.hash())])); - assert_eq!(apply_result.contract_deploys, BTreeSet::new()); + assert_eq!( + apply_result.contract_updates.contract_accesses, + HashSet::from([CodeHash(*contract_code.hash())]) + ); + assert_eq!(apply_result.contract_updates.contract_deploys, HashSet::new()); // Check that both contracts are excluded from the storage proof. let partial_storage = apply_result.proof.unwrap(); @@ -1374,9 +1380,12 @@ fn test_exclude_contract_code_from_witness_with_failed_call() { .unwrap(); assert_eq!(apply_result.delayed_receipts_count, 0); - assert_eq!(apply_result.contract_accesses, BTreeSet::new()); + assert_eq!(apply_result.contract_updates.contract_accesses, HashSet::new()); // Since both accounts deploy the same contract, we expect only one contract deploy. - assert_eq!(apply_result.contract_deploys, BTreeSet::from([CodeHash(*contract_code.hash())])); + assert_eq!( + apply_result.contract_updates.contract_deploys, + HashSet::from([CodeHash(*contract_code.hash())]) + ); let mut store_update = tries.store_update(); let root = @@ -1413,8 +1422,11 @@ fn test_exclude_contract_code_from_witness_with_failed_call() { assert_eq!(apply_result.delayed_receipts_count, 1); // Since both accounts call the same contract, we expect only one contract access. - assert_eq!(apply_result.contract_accesses, BTreeSet::from([CodeHash(*contract_code.hash())])); - assert_eq!(apply_result.contract_deploys, BTreeSet::new()); + assert_eq!( + apply_result.contract_updates.contract_accesses, + HashSet::from([CodeHash(*contract_code.hash())]) + ); + assert_eq!(apply_result.contract_updates.contract_deploys, HashSet::new()); // Check that both contracts are excluded from the storage proof. let partial_storage = apply_result.proof.unwrap(); @@ -1502,10 +1514,10 @@ fn test_deploy_and_call_different_contracts() { .unwrap(); assert_eq!(apply_result.delayed_receipts_count, 0); - assert_eq!(apply_result.contract_accesses, BTreeSet::new()); + assert_eq!(apply_result.contract_updates.contract_accesses, HashSet::new()); assert_eq!( - apply_result.contract_deploys, - BTreeSet::from([ + apply_result.contract_updates.contract_deploys, + HashSet::from([ CodeHash(*first_contract_code.hash()), CodeHash(*second_contract_code.hash()) ]) @@ -1530,13 +1542,13 @@ fn test_deploy_and_call_different_contracts() { assert_eq!(apply_result.delayed_receipts_count, 0); assert_eq!( - apply_result.contract_accesses, - BTreeSet::from([ + apply_result.contract_updates.contract_accesses, + HashSet::from([ CodeHash(*first_contract_code.hash()), CodeHash(*second_contract_code.hash()) ]) ); - assert_eq!(apply_result.contract_deploys, BTreeSet::new()); + assert_eq!(apply_result.contract_updates.contract_deploys, HashSet::new()); } // Similar to test_deploy_and_call_different_contracts, but one of the function calls fails. @@ -1611,10 +1623,10 @@ fn test_deploy_and_call_different_contracts_with_failed_call() { .unwrap(); assert_eq!(apply_result.delayed_receipts_count, 0); - assert_eq!(apply_result.contract_accesses, BTreeSet::new()); + assert_eq!(apply_result.contract_updates.contract_accesses, HashSet::new()); assert_eq!( - apply_result.contract_deploys, - BTreeSet::from([ + apply_result.contract_updates.contract_deploys, + HashSet::from([ CodeHash(*first_contract_code.hash()), CodeHash(*second_contract_code.hash()) ]) @@ -1639,10 +1651,10 @@ fn test_deploy_and_call_different_contracts_with_failed_call() { assert_eq!(apply_result.delayed_receipts_count, 1); assert_eq!( - apply_result.contract_accesses, - BTreeSet::from([CodeHash(*first_contract_code.hash())]) + apply_result.contract_updates.contract_accesses, + HashSet::from([CodeHash(*first_contract_code.hash())]) ); - assert_eq!(apply_result.contract_deploys, BTreeSet::new()); + assert_eq!(apply_result.contract_updates.contract_deploys, HashSet::new()); } // Tests excluding contract code from state witness and recording of contract deployments and function calls @@ -1718,15 +1730,15 @@ fn test_deploy_and_call_in_apply() { assert_eq!(apply_result.delayed_receipts_count, 0); assert_eq!( - apply_result.contract_accesses, - BTreeSet::from([ + apply_result.contract_updates.contract_accesses, + HashSet::from([ CodeHash(*first_contract_code.hash()), CodeHash(*second_contract_code.hash()) ]) ); assert_eq!( - apply_result.contract_deploys, - BTreeSet::from([ + apply_result.contract_updates.contract_deploys, + HashSet::from([ CodeHash(*first_contract_code.hash()), CodeHash(*second_contract_code.hash()) ]) @@ -1806,13 +1818,13 @@ fn test_deploy_and_call_in_apply_with_failed_call() { assert_eq!(apply_result.delayed_receipts_count, 1); assert_eq!( - apply_result.contract_accesses, - BTreeSet::from([CodeHash(*first_contract_code.hash())]) + apply_result.contract_updates.contract_accesses, + HashSet::from([CodeHash(*first_contract_code.hash())]) ); // We record both deployments even if the function call to one of them fails. assert_eq!( - apply_result.contract_deploys, - BTreeSet::from([ + apply_result.contract_updates.contract_deploys, + HashSet::from([ CodeHash(*first_contract_code.hash()), CodeHash(*second_contract_code.hash()) ]) @@ -1862,8 +1874,14 @@ fn test_deploy_and_call_in_same_receipt() { .unwrap(); assert_eq!(apply_result.delayed_receipts_count, 0); - assert_eq!(apply_result.contract_accesses, BTreeSet::from([CodeHash(*contract_code.hash())])); - assert_eq!(apply_result.contract_deploys, BTreeSet::from([CodeHash(*contract_code.hash()),])); + assert_eq!( + apply_result.contract_updates.contract_accesses, + HashSet::from([CodeHash(*contract_code.hash())]) + ); + assert_eq!( + apply_result.contract_updates.contract_deploys, + HashSet::from([CodeHash(*contract_code.hash()),]) + ); } // Tests the case in which deploy and call are contained in the same receipt and function call fails due to exceeding gas limit. @@ -1911,8 +1929,11 @@ fn test_deploy_and_call_in_same_receipt_with_failed_call() { .unwrap(); assert_eq!(apply_result.delayed_receipts_count, 0); - assert_eq!(apply_result.contract_accesses, BTreeSet::from([CodeHash(*contract_code.hash())])); - assert_eq!(apply_result.contract_deploys, BTreeSet::new()); + assert_eq!( + apply_result.contract_updates.contract_accesses, + HashSet::from([CodeHash(*contract_code.hash())]) + ); + assert_eq!(apply_result.contract_updates.contract_deploys, HashSet::new()); } /// Check that applying nothing does not change the state trie. From 76268e31c17e3c2d2ea25a9c0b48b0b43668dd31 Mon Sep 17 00:00:00 2001 From: Tayfun Elmas Date: Wed, 30 Oct 2024 14:19:28 +0300 Subject: [PATCH 06/13] feat(contract-distribution): Filter new deploys from accesses and add a new testloop test (#12298) - Exclude the code-hashes from the newly deployed code from the message sent from chunk-producer to chunk-validators. This is because code for the newly deployed contracts are available in the state witness inside the receipts (they are not excluded from the witness), and the witness is already sent to the validators. Then the chunk validators will update their contract cache while applying the deploy actions. Thus, the validators do not need to request code for these. - Add two testloop tests for contract distribution. A simple test deploys and calls a contract. Another test deploys multiple contracts to two different accounts/shards and calls them from different accounts. We call the contracts before and after clearing the cache. We promote some of the helper functions from the `congestion_control` test to the utils to share with the new test. - To support the test, we add a test-only method to the contract cache to revoke the cache. This allows us to test that the chain can still work if the cache is empty. Note that we only add the cleaning functionality for testing, since the reasoning about cleaning the cache while there are ongoing accesses is not trivial (for example see [this comment](https://github.com/near/nearcore/pull/12300#discussion_r1814637453)) - We also change `env.rs` because the `warmup` step needs one more block to ensure there is no missing chunk. The changes in the block heights in `test_view_requests_to_archival_node` is related to this change. --------- Co-authored-by: Andrea --- .../state_witness_producer.rs | 14 +- integration-tests/src/test_loop/builder.rs | 4 +- integration-tests/src/test_loop/env.rs | 4 +- .../src/test_loop/tests/congestion_control.rs | 68 +++----- .../contract_distribution_cross_shard.rs | 160 ++++++++++++++++++ .../tests/contract_distribution_simple.rs | 131 ++++++++++++++ integration-tests/src/test_loop/tests/mod.rs | 2 + .../tests/view_requests_to_archival_node.rs | 26 +-- .../src/test_loop/utils/transactions.rs | 74 ++++++-- runtime/near-vm-runner/src/cache.rs | 87 ++++++++++ 10 files changed, 485 insertions(+), 85 deletions(-) create mode 100644 integration-tests/src/test_loop/tests/contract_distribution_cross_shard.rs create mode 100644 integration-tests/src/test_loop/tests/contract_distribution_simple.rs diff --git a/chain/client/src/stateless_validation/state_witness_producer.rs b/chain/client/src/stateless_validation/state_witness_producer.rs index b31cfe8b50b..9c72c90fcbb 100644 --- a/chain/client/src/stateless_validation/state_witness_producer.rs +++ b/chain/client/src/stateless_validation/state_witness_producer.rs @@ -422,7 +422,7 @@ impl Client { contract_updates: ContractUpdates, my_signer: &ValidatorSigner, ) { - let ContractUpdates { contract_accesses, .. } = contract_updates; + let ContractUpdates { contract_accesses, contract_deploys } = contract_updates; let chunk_production_key = ChunkProductionKey { epoch_id: *epoch_id, @@ -450,13 +450,21 @@ impl Client { .into_iter() .collect(); + // Since chunk validators will receive the newly deployed contracts as part of the state witness (as DeployActions in receipts), + // they will update their contract cache while applying these deploy actions, thus we can exclude code-hash for these contracts from the message. + let predeployed_contract_accesses = + contract_accesses.difference(&contract_deploys).cloned().collect(); + // Exclude chunk producers that track the same shard from the target list, since they track the state that contains the respective code. let target_chunk_validators = chunk_validators.difference(&chunk_producers).cloned().collect(); - // TODO(#11099): Exclude new deployments from the list of contract accesses. self.network_adapter.send(PeerManagerMessageRequest::NetworkRequests( NetworkRequests::ChunkContractAccesses( target_chunk_validators, - ChunkContractAccesses::new(chunk_production_key, contract_accesses, my_signer), + ChunkContractAccesses::new( + chunk_production_key, + predeployed_contract_accesses, + my_signer, + ), ), )); } diff --git a/integration-tests/src/test_loop/builder.rs b/integration-tests/src/test_loop/builder.rs index f32fe6182fa..62d4e3e6319 100644 --- a/integration-tests/src/test_loop/builder.rs +++ b/integration-tests/src/test_loop/builder.rs @@ -546,8 +546,8 @@ impl TestLoopBuilder { let shard_tracker = ShardTracker::new(TrackedConfig::from_config(&client_config), epoch_manager.clone()); - let contract_cache = FilesystemContractRuntimeCache::new(&homedir, None::<&str>) - .expect("filesystem contract cache"); + let contract_cache = + FilesystemContractRuntimeCache::test().expect("filesystem contract cache"); let runtime_adapter = NightshadeRuntime::test_with_trie_config( &homedir, store.clone(), diff --git a/integration-tests/src/test_loop/env.rs b/integration-tests/src/test_loop/env.rs index b4251ca875f..5e202c40ffb 100644 --- a/integration-tests/src/test_loop/env.rs +++ b/integration-tests/src/test_loop/env.rs @@ -42,7 +42,7 @@ impl TestLoopEnv { test_loop.run_until( |test_loop_data| { let client_actor = test_loop_data.get(&client_handle); - client_actor.client.chain.head().unwrap().height == genesis_height + 3 + client_actor.client.chain.head().unwrap().height == genesis_height + 4 }, Duration::seconds(5), ); @@ -51,7 +51,7 @@ impl TestLoopEnv { let event = move |test_loop_data: &mut TestLoopData| { let client_actor = test_loop_data.get(&client_handle); let block = - client_actor.client.chain.get_block_by_height(genesis_height + 2).unwrap(); + client_actor.client.chain.get_block_by_height(genesis_height + 3).unwrap(); let num_shards = block.header().chunk_mask().len(); assert_eq!(block.header().chunk_mask(), vec![true; num_shards]); }; diff --git a/integration-tests/src/test_loop/tests/congestion_control.rs b/integration-tests/src/test_loop/tests/congestion_control.rs index 07c6e659221..ad2200e3649 100644 --- a/integration-tests/src/test_loop/tests/congestion_control.rs +++ b/integration-tests/src/test_loop/tests/congestion_control.rs @@ -1,23 +1,22 @@ use core::panic; -use assert_matches::assert_matches; use itertools::Itertools; use near_async::test_loop::data::{TestLoopData, TestLoopDataHandle}; use near_async::test_loop::TestLoopV2; use near_async::time::Duration; use near_chain_configs::test_genesis::TestGenesisBuilder; use near_client::client_actor::ClientActorInner; -use near_client::Client; use near_o11y::testonly::init_test_logger; -use near_primitives::hash::CryptoHash; use near_primitives::types::{AccountId, BlockHeight}; -use near_primitives::views::FinalExecutionStatus; use crate::test_loop::builder::TestLoopBuilder; use crate::test_loop::env::{TestData, TestLoopEnv}; -use crate::test_loop::utils::transactions::{call_contract, deploy_contract, get_node_data}; -use crate::test_loop::utils::ONE_NEAR; +use crate::test_loop::utils::transactions::{ + call_contract, check_txs, deploy_contract, make_accounts, +}; +use crate::test_loop::utils::{ONE_NEAR, TGAS}; +const NUM_ACCOUNTS: usize = 100; const NUM_PRODUCERS: usize = 2; const NUM_VALIDATORS: usize = 2; const NUM_RPC: usize = 1; @@ -33,7 +32,7 @@ fn test_congestion_control_simple() { // Test setup let contract_id: AccountId = "000".parse().unwrap(); - let mut accounts = (0..100).map(make_account).collect_vec(); + let mut accounts = make_accounts(NUM_ACCOUNTS); accounts.push(contract_id.clone()); let (env, rpc_id) = setup(&accounts); @@ -106,7 +105,8 @@ fn do_deploy_contract( contract_id: &AccountId, ) { tracing::info!(target: "test", ?rpc_id, ?contract_id, "Deploying contract."); - let tx = deploy_contract(test_loop, node_datas, rpc_id, contract_id); + let code = near_test_contracts::rs_contract().to_vec(); + let tx = deploy_contract(test_loop, node_datas, rpc_id, contract_id, code, 1); test_loop.run_for(Duration::seconds(5)); check_txs(&*test_loop, node_datas, rpc_id, &[tx]); } @@ -120,36 +120,27 @@ fn do_call_contract( accounts: &Vec, ) { tracing::info!(target: "test", ?rpc_id, ?contract_id, "Calling contract."); + let method_name = "burn_gas_raw".to_owned(); + let burn_gas: u64 = 250 * TGAS; + let args = burn_gas.to_le_bytes().to_vec(); let mut txs = vec![]; for sender_id in accounts { - let tx = call_contract(test_loop, node_datas, &sender_id, &contract_id); + let tx = call_contract( + test_loop, + node_datas, + rpc_id, + &sender_id, + &contract_id, + method_name.clone(), + args.clone(), + 2, + ); txs.push(tx); } test_loop.run_for(Duration::seconds(20)); check_txs(&*test_loop, node_datas, &rpc_id, &txs); } -/// Check the status of the transactions and assert that they are successful. -/// -/// Please note that it's important to use an rpc node that tracks all shards. -/// Otherwise, the transactions may not be found. -fn check_txs( - test_loop: &TestLoopV2, - node_datas: &Vec, - rpc: &AccountId, - txs: &[CryptoHash], -) { - let rpc = rpc_client(test_loop, node_datas, rpc); - - for &tx in txs { - let tx_outcome = rpc.chain.get_partial_transaction_result(&tx); - let status = tx_outcome.as_ref().map(|o| o.status.clone()); - let status = status.unwrap(); - tracing::info!(target: "test", ?tx, ?status, "transaction status"); - assert_matches!(status, FinalExecutionStatus::SuccessValue(_)); - } -} - /// The condition that can be used for the test loop to wait until the chain /// height is greater than the target height. fn height_condition( @@ -159,20 +150,3 @@ fn height_condition( ) -> bool { test_loop_data.get(&client_handle).client.chain.head().unwrap().height > target_height } - -/// Get the client for the provided rpd node account id. -fn rpc_client<'a>( - test_loop: &'a TestLoopV2, - node_datas: &'a Vec, - rpc_id: &AccountId, -) -> &'a Client { - let node_data = get_node_data(node_datas, rpc_id); - let client_actor_handle = node_data.client_sender.actor_handle(); - let client_actor = test_loop.data.get(&client_actor_handle); - &client_actor.client -} - -/// Make the account id for the provided index. -fn make_account(i: i32) -> AccountId { - format!("account{}", i).parse().unwrap() -} diff --git a/integration-tests/src/test_loop/tests/contract_distribution_cross_shard.rs b/integration-tests/src/test_loop/tests/contract_distribution_cross_shard.rs new file mode 100644 index 00000000000..9f0288d8e4b --- /dev/null +++ b/integration-tests/src/test_loop/tests/contract_distribution_cross_shard.rs @@ -0,0 +1,160 @@ +use itertools::Itertools; +use near_async::test_loop::TestLoopV2; +use near_async::time::Duration; +use near_chain_configs::test_genesis::TestGenesisBuilder; +use near_o11y::testonly::init_test_logger; +use near_primitives::types::AccountId; + +use crate::test_loop::builder::TestLoopBuilder; +use crate::test_loop::env::{TestData, TestLoopEnv}; +use crate::test_loop::utils::transactions::{ + call_contract, check_txs, deploy_contract, make_accounts, +}; +use crate::test_loop::utils::ONE_NEAR; + +const NUM_ACCOUNTS: usize = 9; +const EPOCH_LENGTH: u64 = 10; +const GENESIS_HEIGHT: u64 = 1000; + +const NUM_BLOCK_AND_CHUNK_PRODUCERS: usize = 4; +const NUM_CHUNK_VALIDATORS_ONLY: usize = 4; +const NUM_RPC: usize = 1; +const NUM_VALIDATORS: usize = NUM_BLOCK_AND_CHUNK_PRODUCERS + NUM_CHUNK_VALIDATORS_ONLY; + +/// Tests a scenario that different contracts are deployed to a number of accounts and +/// these contracts are called from a set of accounts. +/// Test setup: 2 shards with 9 accounts, for 8 validators and 1 RPC node. +/// Deploys contract to one account from each shard. +/// Make 2 accounts from each shard make calls to these contracts. +#[cfg_attr(not(feature = "test_features"), ignore)] +#[test] +fn test_contract_distribution_cross_shard() { + init_test_logger(); + let accounts = make_accounts(NUM_ACCOUNTS); + + let (env, rpc_id) = setup(&accounts); + let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = env; + + let mut nonce = 2; + + // Deploy a contract for each shard (account0 from first one, and account4 from second one). + // Then take two accounts from each shard (one with a contract deployed and one without) and + // make them call both the contracts, so we cover same-shard and cross-shard contract calls. + let contract_ids = [&accounts[0], &accounts[4]]; + let sender_ids = [&accounts[0], &accounts[1], &accounts[4], &accounts[5]]; + + // First deploy and call the contracts as described above. + // Next, clear the compiled contract cache and repeat the same contract calls. + deploy_contracts(&mut test_loop, &node_datas, &rpc_id, &contract_ids, &mut nonce); + + call_contracts(&mut test_loop, &node_datas, &rpc_id, &contract_ids, &sender_ids, &mut nonce); + + #[cfg(feature = "test_features")] + clear_compiled_contract_caches(&mut test_loop, &node_datas); + + call_contracts(&mut test_loop, &node_datas, &rpc_id, &contract_ids, &sender_ids, &mut nonce); + + TestLoopEnv { test_loop, datas: node_datas, tempdir } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); +} + +fn setup(accounts: &Vec) -> (TestLoopEnv, AccountId) { + let builder = TestLoopBuilder::new(); + + let initial_balance = 10000 * ONE_NEAR; + // All block_and_chunk_producers will be both block and chunk validators. + let block_and_chunk_producers = + (0..NUM_BLOCK_AND_CHUNK_PRODUCERS).map(|idx| accounts[idx].as_str()).collect_vec(); + // These are the accounts that are only chunk validators, but not block/chunk producers. + let chunk_validators_only = (NUM_BLOCK_AND_CHUNK_PRODUCERS..NUM_VALIDATORS) + .map(|idx| accounts[idx].as_str()) + .collect_vec(); + + let clients = accounts.iter().take(NUM_VALIDATORS + NUM_RPC).cloned().collect_vec(); + let rpc_id = accounts[NUM_VALIDATORS].clone(); + + let mut genesis_builder = TestGenesisBuilder::new(); + genesis_builder + .genesis_time_from_clock(&builder.clock()) + .protocol_version_latest() + .genesis_height(GENESIS_HEIGHT) + .gas_prices_free() + .gas_limit_one_petagas() + .shard_layout_simple_v1(&["account4"]) + .transaction_validity_period(1000) + .epoch_length(EPOCH_LENGTH) + .validators_desired_roles(&block_and_chunk_producers, &chunk_validators_only) + .shuffle_shard_assignment_for_chunk_producers(true); + for account in accounts { + genesis_builder.add_user_account_simple(account.clone(), initial_balance); + } + let (genesis, epoch_config_store) = genesis_builder.build(); + + let env = + builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + (env, rpc_id) +} + +/// Deploys a contract for the given accounts (`contract_ids`) and waits until the transactions are executed. +/// Each account in `contract_ids` gets a fake contract with a different size (thus code-hashes are different) +fn deploy_contracts( + test_loop: &mut TestLoopV2, + node_datas: &Vec, + rpc_id: &AccountId, + contract_ids: &[&AccountId], + nonce: &mut u64, +) { + let mut txs = vec![]; + for (i, contract_id) in contract_ids.into_iter().enumerate() { + tracing::info!(target: "test", ?rpc_id, ?contract_id, "Deploying contract."); + let code = near_test_contracts::sized_contract((i + 1) * 100).to_vec(); + let tx = deploy_contract(test_loop, node_datas, rpc_id, contract_id, code, *nonce); + txs.push(tx); + *nonce += 1; + } + test_loop.run_for(Duration::seconds(2)); + check_txs(&*test_loop, node_datas, rpc_id, &txs); +} + +/// Makes calls to the contracts from sender_ids to the contract_ids (at which contracts are deployed). +fn call_contracts( + test_loop: &mut TestLoopV2, + node_datas: &Vec, + rpc_id: &AccountId, + contract_ids: &[&AccountId], + sender_ids: &[&AccountId], + nonce: &mut u64, +) { + let method_name = "main".to_owned(); + let mut txs = vec![]; + for sender_id in sender_ids.into_iter() { + for contract_id in contract_ids.into_iter() { + tracing::info!(target: "test", ?rpc_id, ?sender_id, ?contract_id, "Calling contract."); + let tx = call_contract( + test_loop, + node_datas, + rpc_id, + sender_id, + contract_id, + method_name.clone(), + vec![], + *nonce, + ); + txs.push(tx); + *nonce += 1; + } + } + test_loop.run_for(Duration::seconds(2)); + check_txs(&*test_loop, node_datas, &rpc_id, &txs); +} + +/// Clears the compiled contract caches for all the clients. +#[cfg(feature = "test_features")] +pub fn clear_compiled_contract_caches(test_loop: &mut TestLoopV2, node_datas: &Vec) { + for i in 0..node_datas.len() { + let client_handle = node_datas[i].client_sender.actor_handle(); + let contract_cache_handle = + test_loop.data.get(&client_handle).client.runtime_adapter.compiled_contract_cache(); + contract_cache_handle.test_only_clear().unwrap(); + } +} diff --git a/integration-tests/src/test_loop/tests/contract_distribution_simple.rs b/integration-tests/src/test_loop/tests/contract_distribution_simple.rs new file mode 100644 index 00000000000..ff07176ed1a --- /dev/null +++ b/integration-tests/src/test_loop/tests/contract_distribution_simple.rs @@ -0,0 +1,131 @@ +use itertools::Itertools; +use near_async::test_loop::TestLoopV2; +use near_async::time::Duration; +use near_chain_configs::test_genesis::TestGenesisBuilder; +use near_o11y::testonly::init_test_logger; +use near_primitives::types::AccountId; + +use crate::test_loop::builder::TestLoopBuilder; +use crate::test_loop::env::{TestData, TestLoopEnv}; +use crate::test_loop::utils::transactions::{ + call_contract, check_txs, deploy_contract, make_account, make_accounts, +}; +use crate::test_loop::utils::ONE_NEAR; + +const NUM_ACCOUNTS: usize = 2; +const EPOCH_LENGTH: u64 = 10; +const GENESIS_HEIGHT: u64 = 1000; + +const NUM_BLOCK_AND_CHUNK_PRODUCERS: usize = 1; +const NUM_CHUNK_VALIDATORS_ONLY: usize = 1; +const NUM_VALIDATORS: usize = NUM_BLOCK_AND_CHUNK_PRODUCERS + NUM_CHUNK_VALIDATORS_ONLY; + +fn test_contract_distribution(clear_cache: bool) { + init_test_logger(); + let accounts = make_accounts(NUM_ACCOUNTS); + + let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = setup(&accounts); + + do_deploy_contract(&mut test_loop, &node_datas, 1); + + if clear_cache { + #[cfg(feature = "test_features")] + clear_compiled_contract_caches(&mut test_loop, &node_datas); + } + + do_call_contract(&mut test_loop, &node_datas, 2); + + TestLoopEnv { test_loop, datas: node_datas, tempdir } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); +} + +/// Tests a simple scenario where we deploy and call a contract. +#[test] +fn test_contract_distribution_deploy_and_call() { + test_contract_distribution(false); +} + +/// Tests a simple scenario where we deploy a contract, and then +/// we clear the compiled contract cache and call the deployed contract call. +#[cfg_attr(not(feature = "test_features"), ignore)] +#[test] +fn test_contract_distribution_call_after_clear() { + test_contract_distribution(true); +} + +fn setup(accounts: &Vec) -> TestLoopEnv { + let builder = TestLoopBuilder::new(); + + let initial_balance = 10000 * ONE_NEAR; + // All block_and_chunk_producers will be both block and chunk validators. + let block_and_chunk_producers = + (0..NUM_BLOCK_AND_CHUNK_PRODUCERS).map(|idx| accounts[idx].as_str()).collect_vec(); + // These are the accounts that are only chunk validators, but not block/chunk producers. + let chunk_validators_only = (NUM_BLOCK_AND_CHUNK_PRODUCERS..NUM_VALIDATORS) + .map(|idx| accounts[idx].as_str()) + .collect_vec(); + + let clients = accounts.iter().take(NUM_VALIDATORS).cloned().collect_vec(); + + let mut genesis_builder = TestGenesisBuilder::new(); + genesis_builder + .genesis_time_from_clock(&builder.clock()) + .protocol_version_latest() + .genesis_height(GENESIS_HEIGHT) + .gas_prices_free() + .gas_limit_one_petagas() + .shard_layout_single() + .transaction_validity_period(1000) + .epoch_length(EPOCH_LENGTH) + .validators_desired_roles(&block_and_chunk_producers, &chunk_validators_only) + .shuffle_shard_assignment_for_chunk_producers(true); + for account in accounts { + genesis_builder.add_user_account_simple(account.clone(), initial_balance); + } + let (genesis, epoch_config_store) = genesis_builder.build(); + + let env = + builder.genesis(genesis).epoch_config_store(epoch_config_store).clients(clients).build(); + env +} + +fn do_deploy_contract(test_loop: &mut TestLoopV2, node_datas: &Vec, nonce: u64) { + // Make all of rpc, sender, and contract id the same: + let account = make_account(0); + + tracing::info!(target: "test", "Deploying contract."); + let code = near_test_contracts::sized_contract(100).to_vec(); + let tx = deploy_contract(test_loop, &node_datas, &account, &account, code, nonce); + test_loop.run_for(Duration::seconds(2)); + check_txs(test_loop, node_datas, &account, &[tx]); +} + +fn do_call_contract(test_loop: &mut TestLoopV2, node_datas: &Vec, nonce: u64) { + // Make all of rpc, sender, and contract id the same: + let account = make_account(0); + + tracing::info!(target: "test", "Calling contract."); + let tx = call_contract( + test_loop, + node_datas, + &account, + &account, + &account, + "main".to_owned(), + vec![], + nonce, + ); + test_loop.run_for(Duration::seconds(2)); + check_txs(test_loop, node_datas, &account, &[tx]); +} + +/// Clears the compiled contract caches for all the clients. +#[cfg(feature = "test_features")] +pub fn clear_compiled_contract_caches(test_loop: &mut TestLoopV2, node_datas: &Vec) { + for i in 0..node_datas.len() { + let client_handle = node_datas[i].client_sender.actor_handle(); + let contract_cache_handle = + test_loop.data.get(&client_handle).client.runtime_adapter.compiled_contract_cache(); + contract_cache_handle.test_only_clear().unwrap(); + } +} diff --git a/integration-tests/src/test_loop/tests/mod.rs b/integration-tests/src/test_loop/tests/mod.rs index a9ac6de8eee..0ab541c6007 100644 --- a/integration-tests/src/test_loop/tests/mod.rs +++ b/integration-tests/src/test_loop/tests/mod.rs @@ -2,6 +2,8 @@ pub mod bandwidth_scheduler_protocol_upgrade; mod chunk_validator_kickout; pub mod congestion_control; pub mod congestion_control_genesis_bootstrap; +pub mod contract_distribution_cross_shard; +pub mod contract_distribution_simple; pub mod epoch_sync; pub mod fix_min_stake_ratio; pub mod in_memory_tries; diff --git a/integration-tests/src/test_loop/tests/view_requests_to_archival_node.rs b/integration-tests/src/test_loop/tests/view_requests_to_archival_node.rs index 9a4667de0da..01691c67ef9 100644 --- a/integration-tests/src/test_loop/tests/view_requests_to_archival_node.rs +++ b/integration-tests/src/test_loop/tests/view_requests_to_archival_node.rs @@ -89,7 +89,7 @@ fn test_view_requests_to_archival_node() { // Run the chain until it garbage collects blocks from the first epoch. let client_handle = node_datas[ARCHIVAL_CLIENT].client_sender.actor_handle(); - let target_height: u64 = EPOCH_LENGTH * (GC_NUM_EPOCHS_TO_KEEP + 2) + 5; + let target_height: u64 = EPOCH_LENGTH * (GC_NUM_EPOCHS_TO_KEEP + 2) + 6; test_loop.run_until( |test_loop_data| { let chain = &test_loop_data.get(&client_handle).client.chain; @@ -180,7 +180,7 @@ impl<'a> ViewClientTester<'a> { block }; - let block_by_height = GetBlock(BlockReference::BlockId(BlockId::Height(5))); + let block_by_height = GetBlock(BlockReference::BlockId(BlockId::Height(6))); let block = get_and_check_block(block_by_height); let block_by_hash = @@ -207,7 +207,7 @@ impl<'a> ViewClientTester<'a> { /// Generates variations of the [`BlockHeadersRequest`] request and issues them to the view client of the archival node. fn check_get_block_headers(&mut self) { - let block = self.get_block_at_height(5); + let block = self.get_block_at_height(6); let headers_request = BlockHeadersRequest(vec![block.header.hash]); let headers_response = self.send(headers_request, ARCHIVAL_CLIENT).unwrap(); @@ -216,7 +216,7 @@ impl<'a> ViewClientTester<'a> { /// Generates variations of the [`GetChunk`] request and issues them to the view client of the archival node. fn check_get_chunk(&mut self) { - let block = self.get_block_at_height(5); + let block = self.get_block_at_height(6); let mut get_and_check_chunk = |request: GetChunk| { let chunk = self.send(request, ARCHIVAL_CLIENT).unwrap(); @@ -224,7 +224,7 @@ impl<'a> ViewClientTester<'a> { chunk }; - let chunk_by_height = GetChunk::Height(5, ShardId::new(0)); + let chunk_by_height = GetChunk::Height(6, ShardId::new(0)); get_and_check_chunk(chunk_by_height); let chunk_by_block_hash = GetChunk::BlockHash(block.header.hash, ShardId::new(0)); @@ -236,14 +236,14 @@ impl<'a> ViewClientTester<'a> { /// Generates variations of the [`GetShardChunk`] request and issues them to the view client of the archival node. fn check_get_shard_chunk(&mut self) { - let block = self.get_block_at_height(5); + let block = self.get_block_at_height(6); let mut get_and_check_shard_chunk = |request: GetShardChunk| { let shard_chunk = self.send(request, ARCHIVAL_CLIENT).unwrap(); assert_eq!(shard_chunk.take_header().gas_limit(), 1_000_000_000_000_000); }; - let chunk_by_height = GetShardChunk::Height(5, ShardId::new(0)); + let chunk_by_height = GetShardChunk::Height(6, ShardId::new(0)); get_and_check_shard_chunk(chunk_by_height); let chunk_by_block_hash = GetShardChunk::BlockHash(block.header.hash, ShardId::new(0)); @@ -255,7 +255,7 @@ impl<'a> ViewClientTester<'a> { /// Generates variations of the [`GetProtocolConfig`] request and issues them to the view client of the archival node. fn check_get_protocol_config(&mut self) { - let block = self.get_block_at_height(5); + let block = self.get_block_at_height(6); let mut get_and_check_protocol_config = |request: GetProtocolConfig| { let config = self.send(request, ARCHIVAL_CLIENT).unwrap(); @@ -264,7 +264,7 @@ impl<'a> ViewClientTester<'a> { }; let protocol_config_by_height = - GetProtocolConfig(BlockReference::BlockId(BlockId::Height(5))); + GetProtocolConfig(BlockReference::BlockId(BlockId::Height(6))); get_and_check_protocol_config(protocol_config_by_height); let protocol_config_by_hash = @@ -334,7 +334,7 @@ impl<'a> ViewClientTester<'a> { }; let ordered_validators_by_height = - GetValidatorOrdered { block_id: Some(BlockId::Height(5)) }; + GetValidatorOrdered { block_id: Some(BlockId::Height(6)) }; get_and_check_ordered_validators(ordered_validators_by_height); let ordered_validators_by_block_hash = @@ -347,7 +347,7 @@ impl<'a> ViewClientTester<'a> { /// Generates variations of the [`GetBlockStateChangesInBlock`] request and issues them to the view client of the archival node. fn check_get_state_changes_in_block(&mut self) { - let block = self.get_block_at_height(5); + let block = self.get_block_at_height(6); let state_changes_in_block = GetStateChangesInBlock { block_hash: block.header.hash }; let state_changes = self.send(state_changes_in_block, ARCHIVAL_CLIENT).unwrap(); @@ -372,7 +372,7 @@ impl<'a> ViewClientTester<'a> { /// Generates variations of the [`GetReceipt`] request and issues them to the view client of the archival node. fn check_get_execution_outcomes(&mut self) { - let block = self.get_block_at_height(5); + let block = self.get_block_at_height(6); let request = GetExecutionOutcomesForBlock { block_hash: block.header.hash }; let outcomes = self.send(request, ARCHIVAL_CLIENT).unwrap(); @@ -405,7 +405,7 @@ impl<'a> ViewClientTester<'a> { /// Generates variations of the [`GetStateChanges`] request and issues them to the view client of the archival node. fn check_get_state_changes(&mut self) { - let block = self.get_block_at_height(5); + let block = self.get_block_at_height(6); let accounts = (0..NUM_ACCOUNTS) .map(|i| format!("account{}", i).parse().unwrap()) diff --git a/integration-tests/src/test_loop/utils/transactions.rs b/integration-tests/src/test_loop/utils/transactions.rs index ff95ec6db8a..b890a5f0799 100644 --- a/integration-tests/src/test_loop/utils/transactions.rs +++ b/integration-tests/src/test_loop/utils/transactions.rs @@ -5,8 +5,7 @@ use near_async::messaging::{CanSend, SendAsync}; use near_async::test_loop::TestLoopV2; use near_async::time::Duration; use near_client::test_utils::test_loop::ClientQueries; -use near_client::Client; -use near_client::ProcessTxResponse; +use near_client::{Client, ProcessTxResponse}; use near_network::client::ProcessTxRequest; use near_primitives::errors::InvalidTxError; use near_primitives::hash::CryptoHash; @@ -136,16 +135,13 @@ pub fn deploy_contract( node_datas: &[TestData], rpc_id: &AccountId, contract_id: &AccountId, + code: Vec, + nonce: u64, ) -> CryptoHash { let block_hash = get_shared_block_hash(node_datas, test_loop); - // TOOD make nonce an argument - let nonce = 1; let signer = create_user_test_signer(&contract_id).into(); - let code = near_test_contracts::rs_contract(); - let code = code.to_vec(); - let tx = SignedTransaction::deploy_contract(nonce, contract_id, code, &signer, block_hash); let tx_hash = tx.get_hash(); let process_tx_request = @@ -167,24 +163,18 @@ pub fn deploy_contract( pub fn call_contract( test_loop: &mut TestLoopV2, node_datas: &[TestData], + rpc_id: &AccountId, sender_id: &AccountId, contract_id: &AccountId, + method_name: String, + args: Vec, + nonce: u64, ) -> CryptoHash { let block_hash = get_shared_block_hash(node_datas, test_loop); - - // TOOD make nonce an argument - let nonce = 2; let signer = create_user_test_signer(sender_id); - - let burn_gas = 250 * TGAS; let attach_gas = 300 * TGAS; - let deposit = 0; - // TODO make method and args arguments - let method_name = "burn_gas_raw".to_owned(); - let args = burn_gas.to_le_bytes().to_vec(); - let tx = SignedTransaction::call( nonce, sender_id.clone(), @@ -201,13 +191,50 @@ pub fn call_contract( let process_tx_request = ProcessTxRequest { transaction: tx, is_forwarded: false, check_only: false }; - let future = node_datas[0].client_sender.send_async(process_tx_request); + + let rpc_node_data = get_node_data(node_datas, rpc_id); + let rpc_node_data_sender = &rpc_node_data.client_sender; + + let future = rpc_node_data_sender.send_async(process_tx_request); drop(future); tracing::debug!(target: "test", ?sender_id, ?contract_id, ?tx_hash, "called contract"); tx_hash } +/// Check the status of the transactions and assert that they are successful. +/// +/// Please note that it's important to use an rpc node that tracks all shards. +/// Otherwise, the transactions may not be found. +pub fn check_txs( + test_loop: &TestLoopV2, + node_datas: &Vec, + rpc_id: &AccountId, + txs: &[CryptoHash], +) { + let rpc = rpc_client(test_loop, node_datas, rpc_id); + + for &tx in txs { + let tx_outcome = rpc.chain.get_partial_transaction_result(&tx); + let status = tx_outcome.as_ref().map(|o| o.status.clone()); + let status = status.unwrap(); + tracing::info!(target: "test", ?tx, ?status, "transaction status"); + assert_matches!(status, FinalExecutionStatus::SuccessValue(_)); + } +} + +/// Get the client for the provided rpd node account id. +fn rpc_client<'a>( + test_loop: &'a TestLoopV2, + node_datas: &'a Vec, + rpc_id: &AccountId, +) -> &'a Client { + let node_data = get_node_data(node_datas, rpc_id); + let client_actor_handle = node_data.client_sender.actor_handle(); + let client_actor = test_loop.data.get(&client_actor_handle); + &client_actor.client +} + /// Finds a block that all clients have on their chain and return its hash. pub fn get_shared_block_hash(node_datas: &[TestData], test_loop: &TestLoopV2) -> CryptoHash { let clients = node_datas @@ -324,3 +351,14 @@ pub fn execute_tx( .get_final_transaction_result(&tx_hash) .unwrap()) } + +/// Creates account ids for the given number of accounts. +pub fn make_accounts(num_accounts: usize) -> Vec { + let accounts = (0..num_accounts).map(|i| make_account(i)).collect_vec(); + accounts +} + +/// Creates an account id to be contained at the given index. +pub fn make_account(index: usize) -> AccountId { + format!("account{}", index).parse().unwrap() +} diff --git a/runtime/near-vm-runner/src/cache.rs b/runtime/near-vm-runner/src/cache.rs index 26023e451ae..2003ce35edf 100644 --- a/runtime/near-vm-runner/src/cache.rs +++ b/runtime/near-vm-runner/src/cache.rs @@ -99,6 +99,17 @@ pub trait ContractRuntimeCache: Send + Sync { fn has(&self, key: &CryptoHash) -> std::io::Result { self.get(key).map(|entry| entry.is_some()) } + /// TESTING ONLY: Clears the cache including in-memory and persistent data (if any). + /// + /// This should be used only for testing, since the implementations may not provide + /// a consistent view when the cache is both cleared and accessed as the same time. + /// + /// Default implementation panics; the implementations for which this method is called + /// should provide a proper implementation. + #[cfg(feature = "test_features")] + fn test_only_clear(&self) -> std::io::Result<()> { + unimplemented!("test_only_clear is not implemented for this cache"); + } } impl fmt::Debug for dyn ContractRuntimeCache { @@ -378,6 +389,36 @@ impl ContractRuntimeCache for FilesystemContractRuntimeCache { } }) } + + /// Clears the in-memory cache and files in the cache directory. + /// + /// The cache must be created using `test` method, otherwise this method will panic. + #[cfg(feature = "test_features")] + fn test_only_clear(&self) -> std::io::Result<()> { + let Some(temp_dir) = &self.state.test_temp_dir else { + panic!("must be called for testing only"); + }; + self.memory_cache().clear(); + let dir_path: std::path::PathBuf = + [temp_dir.path(), "data".as_ref(), "contracts".as_ref()].into_iter().collect(); + for entry in std::fs::read_dir(dir_path).unwrap() { + if let Ok(entry) = entry { + let path = entry.path(); + if path.is_dir() { + debug_assert!(false, "Contract code cache directory should only contain files but found directory: {}", path.display()); + } else { + if let Err(err) = std::fs::remove_file(&path) { + tracing::error!( + "Failed to remove contract cache file {}: {}", + path.display(), + err + ); + } + } + } + } + Ok(()) + } } type AnyCacheValue = dyn Any + Send; @@ -400,6 +441,12 @@ impl AnyCache { } } + pub fn clear(&self) { + if let Some(cache) = &self.cache { + cache.lock().unwrap().clear(); + } + } + /// Lookup the key in the cache, generating a new element if absent. /// /// This function accepts two callbacks as an argument: first is a fallible generation @@ -589,4 +636,44 @@ mod tests { ); assert!(matches!(result, Err("mikan"))); } + + #[cfg(feature = "test_features")] + #[test] + fn test_clear_compiled_contract_cache() { + let cache = FilesystemContractRuntimeCache::test().unwrap(); + + let contract1 = ContractCode::new(near_test_contracts::sized_contract(100).to_vec(), None); + let contract2 = ContractCode::new(near_test_contracts::sized_contract(200).to_vec(), None); + + let compiled_contract1 = CompiledContractInfo { + wasm_bytes: 100, + compiled: CompiledContract::Code(contract1.code().to_vec()), + }; + + let compiled_contract2 = CompiledContractInfo { + wasm_bytes: 200, + compiled: CompiledContract::Code(contract2.code().to_vec()), + }; + + let insert_and_assert_keys_exist = || { + cache.put(contract1.hash(), compiled_contract1.clone()).unwrap(); + cache.put(contract2.hash(), compiled_contract2.clone()).unwrap(); + + assert_eq!(cache.get(contract1.hash()).unwrap().unwrap(), compiled_contract1); + assert_eq!(cache.get(contract2.hash()).unwrap().unwrap(), compiled_contract2); + }; + + let assert_keys_absent = || { + assert_eq!(cache.has(contract1.hash()).unwrap(), false); + assert_eq!(cache.has(contract2.hash()).unwrap(), false); + }; + + // Insert the keys, and then ckear the cache, and assert that keys no longer exist after clear. + insert_and_assert_keys_exist(); + cache.test_only_clear().unwrap(); + assert_keys_absent(); + + // Insert the keys again and assert that the cache can be updated after clear. + insert_and_assert_keys_exist(); + } } From 0e971006ed22c98182b7822cc598b7471451d64e Mon Sep 17 00:00:00 2001 From: Tayfun Elmas Date: Wed, 30 Oct 2024 15:33:06 +0300 Subject: [PATCH 07/13] feat(contract-distribution): Update Python serializer to support new contract distribution messages (#12342) Messages are defined in this file: https://github.com/near/nearcore/blob/master/core/primitives/src/stateless_validation/contract_distribution.rs The PR updates the schema in Python so that we can parse the new messages in relevant Nayduck tests. --- pytest/lib/messages/block.py | 178 ++++++++++++++++++++++++++++++++- pytest/lib/messages/network.py | 7 +- 2 files changed, 182 insertions(+), 3 deletions(-) diff --git a/pytest/lib/messages/block.py b/pytest/lib/messages/block.py index dd15fa27269..b04516cec3e 100644 --- a/pytest/lib/messages/block.py +++ b/pytest/lib/messages/block.py @@ -364,6 +364,58 @@ class SignatureDifferentiator: pass +class ChunkContractAccesses: + pass + + +class ChunkContractAccessesV1: + pass + + +class ChunkContractAccessesInner: + pass + + +class ChunkContractDeployments: + pass + + +class ChunkContractDeploymentsV1: + pass + + +class ChunkContractDeploymentsInner: + pass + + +class ContractCodeRequest: + pass + + +class ContractCodeRequestV1: + pass + + +class ContractCodeRequestInner: + pass + + +class ContractCodeResponse: + pass + + +class ContractCodeResponseV1: + pass + + +class ContractCodeResponseInner: + pass + + +class ChunkProductionKey: + pass + + block_schema = [ [ Block, { @@ -1132,5 +1184,129 @@ class SignatureDifferentiator: [SignatureDifferentiator, { 'kind': 'struct', 'fields': [['0', 'string']] - }] + }], + [ + ChunkContractAccesses, { + 'kind': 'enum', + 'field': 'enum', + 'values': [['V1', ChunkContractAccessesV1],] + } + ], + [ + ChunkContractAccessesV1, { + 'kind': + 'struct', + 'fields': [ + ['inner', ChunkContractAccessesInner], + ['signature', Signature], + ] + } + ], + [ + ChunkContractAccessesInner, { + 'kind': + 'struct', + 'fields': [ + ['next_chunk', ChunkProductionKey], + ['contracts', [[32]]], + ['signature_differentiator', SignatureDifferentiator], + ] + } + ], + [ + ChunkContractDeployments, { + 'kind': 'enum', + 'field': 'enum', + 'values': [['V1', ChunkContractDeploymentsV1],] + } + ], + [ + ChunkContractDeploymentsV1, { + 'kind': + 'struct', + 'fields': [ + ['inner', ChunkContractDeploymentsInner], + ['signature', Signature], + ] + } + ], + [ + ChunkContractDeploymentsInner, { + 'kind': + 'struct', + 'fields': [ + ['next_chunk', ChunkProductionKey], + ['contracts', [[32]]], + ['signature_differentiator', SignatureDifferentiator], + ] + } + ], + [ + ContractCodeRequest, { + 'kind': 'enum', + 'field': 'enum', + 'values': [['V1', ContractCodeRequestV1],] + } + ], + [ + ContractCodeRequestV1, { + 'kind': + 'struct', + 'fields': [ + ['inner', ContractCodeRequestInner], + ['signature', Signature], + ] + } + ], + [ + ContractCodeRequestInner, { + 'kind': + 'struct', + 'fields': [ + ['requester', 'string'], + ['next_chunk', ChunkProductionKey], + ['contracts', [[32]]], + ['signature_differentiator', SignatureDifferentiator], + ] + } + ], + [ + ContractCodeResponse, { + 'kind': 'enum', + 'field': 'enum', + 'values': [['V1', ContractCodeResponseV1],] + } + ], + [ + ContractCodeResponseV1, { + 'kind': + 'struct', + 'fields': [ + ['inner', ContractCodeResponseInner], + ['signature', Signature], + ] + } + ], + [ + ContractCodeResponseInner, { + 'kind': + 'struct', + 'fields': [ + ['next_chunk', ChunkProductionKey], + ['compressed_contracts', ['u8']], + ['signature_differentiator', SignatureDifferentiator], + ] + } + ], + [ + ChunkProductionKey, { + 'kind': + 'struct', + 'fields': [ + ['shard_id', 'u64'], + ['epoch_id', [32]], + ['height_created', 'u64'], + ] + } + ] ] diff --git a/pytest/lib/messages/network.py b/pytest/lib/messages/network.py index 3a009ecca53..5d17333d5e2 100644 --- a/pytest/lib/messages/network.py +++ b/pytest/lib/messages/network.py @@ -1,6 +1,6 @@ from messages.crypto import Signature, PublicKey, MerklePath, ShardProof from messages.tx import SignedTransaction, Receipt -from messages.block import Block, Approval, PartialEncodedChunk, PartialEncodedChunkRequestMsg, PartialEncodedChunkResponseMsg, PartialEncodedChunkForwardMsg, BlockHeader, ShardChunk, ShardChunkHeader, ShardChunkHeaderV1, ChunkEndorsement, ChunkEndorsementV1, ChunkStateWitnessAck, PartialEncodedStateWitness +from messages.block import Block, Approval, PartialEncodedChunk, PartialEncodedChunkRequestMsg, PartialEncodedChunkResponseMsg, PartialEncodedChunkForwardMsg, BlockHeader, ShardChunk, ShardChunkHeader, ShardChunkHeaderV1, ChunkEndorsement, ChunkEndorsementV1, ChunkStateWitnessAck, PartialEncodedStateWitness, ChunkContractAccesses, ChunkContractDeployments, ContractCodeRequest, ContractCodeResponse from messages.shard import StateRootNode @@ -397,7 +397,10 @@ class AdvertisedPeerDistance: PartialEncodedStateWitness ], ['VersionedChunkEndorsement', ChunkEndorsement], - # TODO(11099): Update the schema with the new messages for contract distribution. + ['ChunkContractAccesses', ChunkContractAccesses], + ['ChunkContractDeployments', ChunkContractDeployments], + ['ContractCodeRequest', ContractCodeRequest], + ['ContractCodeResponse', ContractCodeResponse], ] } ], From 4c23860efd015b395f8e9a217d024999a859465f Mon Sep 17 00:00:00 2001 From: Anton Puhach Date: Wed, 30 Oct 2024 13:56:23 +0100 Subject: [PATCH 08/13] refactor: make witness reed solomon parts reusable (#12341) This PR makes code related to witness parts tracking generic so it can be reused for deployed contracts distribution. We need to merge it ASAP, tests will be added in a separate PR. --- .../partial_witness/encoding.rs | 99 +-------- .../partial_witness/partial_witness_actor.rs | 9 +- .../partial_witness_tracker.rs | 143 +++++-------- core/primitives/src/reed_solomon.rs | 189 +++++++++++++++++- .../src/stateless_validation/state_witness.rs | 13 ++ 5 files changed, 266 insertions(+), 187 deletions(-) diff --git a/chain/client/src/stateless_validation/partial_witness/encoding.rs b/chain/client/src/stateless_validation/partial_witness/encoding.rs index 4e23453e419..d2a584800a2 100644 --- a/chain/client/src/stateless_validation/partial_witness/encoding.rs +++ b/chain/client/src/stateless_validation/partial_witness/encoding.rs @@ -1,100 +1,13 @@ -use std::collections::HashMap; -use std::sync::Arc; - -use near_primitives::reed_solomon::{ - reed_solomon_decode, reed_solomon_encode, reed_solomon_part_length, -}; -use near_primitives::stateless_validation::state_witness::EncodedChunkStateWitness; -use near_primitives::utils::compression::CompressedData; -use reed_solomon_erasure::galois_8::ReedSolomon; +use near_primitives::reed_solomon::{reed_solomon_num_data_parts, reed_solomon_part_length}; /// Ratio of the number of data parts to total parts in the Reed Solomon encoding. /// The tradeoff here is having a higher ratio is better for handling missing parts and network errors /// but increases the size of the encoded state witness and the total network bandwidth requirements. -const RATIO_DATA_PARTS: f32 = 0.6; - -/// Type alias around what ReedSolomon represents data part as. -/// This should help with making the code a bit more understandable. -pub type WitnessPart = Option>; - -/// Reed Solomon encoder wrapper for encoding and decoding state witness parts. -pub struct WitnessEncoder { - /// None corresponds to the case when we are the only validator for the chunk - /// since ReedSolomon does not support having exactly 1 total part count and - /// no parity parts. - rs: Option, -} - -impl WitnessEncoder { - fn new(total_parts: usize) -> WitnessEncoder { - let rs = if total_parts > 1 { - let data_parts = num_witness_data_parts(total_parts); - Some(ReedSolomon::new(data_parts, total_parts - data_parts).unwrap()) - } else { - None - }; - Self { rs } - } - - pub fn total_parts(&self) -> usize { - match self.rs { - Some(ref rs) => rs.total_shard_count(), - None => 1, - } - } - - pub fn data_parts(&self) -> usize { - match self.rs { - Some(ref rs) => rs.data_shard_count(), - None => 1, - } - } - - pub fn encode(&self, witness: &EncodedChunkStateWitness) -> (Vec, usize) { - match self.rs { - Some(ref rs) => reed_solomon_encode(rs, witness), - None => { - (vec![Some(witness.as_slice().to_vec().into_boxed_slice())], witness.size_bytes()) - } - } - } - - pub fn decode( - &self, - parts: &mut [WitnessPart], - encoded_length: usize, - ) -> Result { - match self.rs { - Some(ref rs) => reed_solomon_decode(rs, parts, encoded_length), - None => { - Ok(EncodedChunkStateWitness::from_boxed_slice(parts[0].as_ref().unwrap().clone())) - } - } - } -} - -/// We keep one encoder for each length of chunk_validators to avoid re-creating the encoder. -pub struct WitnessEncoderCache { - instances: HashMap>, -} - -impl WitnessEncoderCache { - pub fn new() -> Self { - Self { instances: HashMap::new() } - } - - pub fn entry(&mut self, total_parts: usize) -> Arc { - self.instances - .entry(total_parts) - .or_insert_with(|| Arc::new(WitnessEncoder::new(total_parts))) - .clone() - } -} +pub const WITNESS_RATIO_DATA_PARTS: f64 = 0.6; pub fn witness_part_length(encoded_witness_size: usize, total_parts: usize) -> usize { - reed_solomon_part_length(encoded_witness_size, num_witness_data_parts(total_parts)) -} - -fn num_witness_data_parts(total_parts: usize) -> usize { - std::cmp::max((total_parts as f32 * RATIO_DATA_PARTS) as usize, 1) + reed_solomon_part_length( + encoded_witness_size, + reed_solomon_num_data_parts(total_parts, WITNESS_RATIO_DATA_PARTS), + ) } diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs index 98ce8fa047d..a7233bcddda 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs @@ -17,6 +17,7 @@ use near_network::state_witness::{ use near_network::types::{NetworkRequests, PeerManagerAdapter, PeerManagerMessageRequest}; use near_parameters::RuntimeConfig; use near_performance_metrics_macros::perf; +use near_primitives::reed_solomon::ReedSolomonEncoderCache; use near_primitives::sharding::ShardChunkHeader; use near_primitives::stateless_validation::contract_distribution::{ ChunkContractAccesses, ChunkContractDeployments, CodeBytes, CodeHash, ContractCodeRequest, @@ -40,7 +41,7 @@ use crate::stateless_validation::validate::{ validate_chunk_contract_accesses, validate_partial_encoded_state_witness, }; -use super::encoding::WitnessEncoderCache; +use super::encoding::WITNESS_RATIO_DATA_PARTS; use super::partial_witness_tracker::PartialEncodedStateWitnessTracker; use near_primitives::utils::compression::CompressedData; @@ -59,7 +60,7 @@ pub struct PartialWitnessActor { state_witness_tracker: ChunkStateWitnessTracker, /// Reed Solomon encoder for encoding state witness parts. /// We keep one wrapper for each length of chunk_validators to avoid re-creating the encoder. - encoders: WitnessEncoderCache, + witness_encoders: ReedSolomonEncoderCache, } impl Actor for PartialWitnessActor {} @@ -157,8 +158,8 @@ impl PartialWitnessActor { epoch_manager, partial_witness_tracker, state_witness_tracker: ChunkStateWitnessTracker::new(clock), - encoders: WitnessEncoderCache::new(), runtime, + witness_encoders: ReedSolomonEncoderCache::new(WITNESS_RATIO_DATA_PARTS), } } @@ -207,7 +208,7 @@ impl PartialWitnessActor { ); // Break the state witness into parts using Reed Solomon encoding. - let encoder = self.encoders.entry(chunk_validators.len()); + let encoder = self.witness_encoders.entry(chunk_validators.len()); let (parts, encoded_length) = encoder.encode(&witness_bytes); Ok(chunk_validators diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs index 28588de9eec..4beb1b1ff3d 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs @@ -10,6 +10,9 @@ use near_chain::Error; use near_epoch_manager::EpochManagerAdapter; use near_primitives::challenge::PartialState; use near_primitives::hash::CryptoHash; +use near_primitives::reed_solomon::{ + InsertPartResult, ReedSolomonEncoder, ReedSolomonEncoderCache, ReedSolomonPartsTracker, +}; use near_primitives::stateless_validation::contract_distribution::{CodeBytes, CodeHash}; use near_primitives::stateless_validation::partial_witness::PartialEncodedStateWitness; use near_primitives::stateless_validation::state_witness::{ @@ -22,9 +25,10 @@ use time::ext::InstantExt as _; use crate::client_actor::ClientSenderForPartialWitness; use crate::metrics; -use super::encoding::{WitnessEncoder, WitnessEncoderCache, WitnessPart}; use near_primitives::utils::compression::CompressedData; +use super::encoding::WITNESS_RATIO_DATA_PARTS; + /// Max number of chunks to keep in the witness tracker cache. We reach here only after validation /// of the partial_witness so the LRU cache size need not be too large. /// This effectively limits memory usage to the size of the cache multiplied by @@ -58,84 +62,15 @@ impl AccessedContractsState { } } -struct WaitingPartsState { - parts: Vec, - encoded_length: usize, - data_parts_present: usize, - encoder: Arc, - total_parts_size: usize, -} - enum WitnessPartsState { /// Haven't received any parts yet. Empty, /// Received at least one part, but not enough to decode the witness. - WaitingParts(WaitingPartsState), + WaitingParts(ReedSolomonPartsTracker), /// Received enough parts and tried decoding the witness. Decoded { decode_result: DecodePartialWitnessResult, decoded_at: Instant }, } -impl WaitingPartsState { - fn new(encoder: Arc, encoded_length: usize) -> Self { - Self { - data_parts_present: 0, - parts: vec![None; encoder.total_parts()], - total_parts_size: 0, - encoded_length, - encoder, - } - } - - fn data_parts_required(&self) -> usize { - self.encoder.data_parts() - } - - fn has_enough_parts(&self) -> bool { - self.data_parts_present >= self.data_parts_required() - } - - fn insert_part( - &mut self, - partial_witness: PartialEncodedStateWitness, - ) -> Option { - let ChunkProductionKey { shard_id, height_created, .. } = - partial_witness.chunk_production_key(); - let part_ord = partial_witness.part_ord(); - let encoded_length = partial_witness.encoded_length(); - let part = partial_witness.into_part(); - - // Check if the part is already present. - if self.parts[part_ord].is_some() { - tracing::warn!( - target: "client", - ?shard_id, - height_created, - part_ord, - "Received duplicate or redundant state witness part." - ); - return None; - } - - if self.encoded_length != encoded_length { - tracing::warn!( - target: "client", - expected = self.encoded_length, - actual = encoded_length, - "Partial encoded witness length field doesn't match", - ); - return None; - } - - // Increment the count of data parts present even if the part has been decoded before. - // We use this in metrics to track the number of parts received. Insert the part into the cache entry. - self.data_parts_present += 1; - self.total_parts_size += part.len(); - self.parts[part_ord] = Some(part); - - self.has_enough_parts().then(|| self.encoder.decode(&mut self.parts, self.encoded_length)) - } -} - struct CacheEntry { created_at: Instant, shard_id: ShardId, @@ -144,7 +79,7 @@ struct CacheEntry { } enum CacheUpdate { - WitnessPart(PartialEncodedStateWitness, Arc), + WitnessPart(PartialEncodedStateWitness, Arc), AccessedContractHashes(HashSet), AccessedContractCodes(Vec), } @@ -168,7 +103,7 @@ impl CacheEntry { pub fn data_parts_present(&self) -> Option { match &self.witness_parts { - WitnessPartsState::WaitingParts(data) => Some(data.data_parts_present), + WitnessPartsState::WaitingParts(parts) => Some(parts.data_parts_present()), WitnessPartsState::Empty | WitnessPartsState::Decoded { .. } => None, } } @@ -176,7 +111,7 @@ impl CacheEntry { pub fn total_size(&self) -> usize { let parts_size = match &self.witness_parts { WitnessPartsState::Empty => 0, - WitnessPartsState::WaitingParts(parts) => parts.total_parts_size, + WitnessPartsState::WaitingParts(parts) => parts.total_parts_size(), WitnessPartsState::Decoded { decode_result, .. } => { decode_result.as_ref().map_or(0, |witness| witness.size_bytes()) } @@ -211,10 +146,10 @@ impl CacheEntry { fn process_witness_part( &mut self, partial_witness: PartialEncodedStateWitness, - encoder: Arc, + encoder: Arc, ) { if matches!(self.witness_parts, WitnessPartsState::Empty) { - let parts = WaitingPartsState::new(encoder, partial_witness.encoded_length()); + let parts = ReedSolomonPartsTracker::new(encoder, partial_witness.encoded_length()); self.witness_parts = WitnessPartsState::WaitingParts(parts); } let parts = match &mut self.witness_parts { @@ -222,15 +157,47 @@ impl CacheEntry { WitnessPartsState::WaitingParts(parts) => parts, WitnessPartsState::Decoded { .. } => return, }; - if let Some(decode_result) = parts.insert_part(partial_witness) { - self.witness_parts = - WitnessPartsState::Decoded { decode_result, decoded_at: Instant::now() }; - metrics::DECODE_PARTIAL_WITNESS_ACCESSED_CONTRACTS_STATE_COUNT - .with_label_values(&[ - &self.shard_id.to_string(), - self.accessed_contracts.metrics_label(), - ]) - .inc(); + let key = partial_witness.chunk_production_key(); + if parts.encoded_length() != partial_witness.encoded_length() { + tracing::warn!( + target: "client", + ?key, + expected = parts.encoded_length(), + actual = partial_witness.encoded_length(), + "Partial encoded witness encoded length field does not match", + ); + return; + } + let part_ord = partial_witness.part_ord(); + let part = partial_witness.into_part(); + match parts.insert_part(part_ord, part) { + InsertPartResult::Accepted => {} + InsertPartResult::PartAlreadyAvailable => { + tracing::warn!( + target: "client", + ?key, + part_ord, + "Received duplicate or redundant state witness part" + ); + } + InsertPartResult::InvalidPartOrd => { + tracing::warn!( + target: "client", + ?key, + part_ord, + "Received invalid partial witness part ord" + ); + } + InsertPartResult::Decoded(decode_result) => { + self.witness_parts = + WitnessPartsState::Decoded { decode_result, decoded_at: Instant::now() }; + metrics::DECODE_PARTIAL_WITNESS_ACCESSED_CONTRACTS_STATE_COUNT + .with_label_values(&[ + &self.shard_id.to_string(), + self.accessed_contracts.metrics_label(), + ]) + .inc(); + } } } @@ -347,7 +314,7 @@ pub struct PartialEncodedStateWitnessTracker { /// times. processed_witnesses: LruCache, /// Reed Solomon encoder for decoding state witness parts. - encoders: WitnessEncoderCache, + encoders: ReedSolomonEncoderCache, } impl PartialEncodedStateWitnessTracker { @@ -362,7 +329,7 @@ impl PartialEncodedStateWitnessTracker { processed_witnesses: LruCache::new( NonZeroUsize::new(PROCESSED_WITNESSES_CACHE_SIZE).unwrap(), ), - encoders: WitnessEncoderCache::new(), + encoders: ReedSolomonEncoderCache::new(WITNESS_RATIO_DATA_PARTS), } } @@ -466,7 +433,7 @@ impl PartialEncodedStateWitnessTracker { Ok(()) } - fn get_encoder(&mut self, key: &ChunkProductionKey) -> Result, Error> { + fn get_encoder(&mut self, key: &ChunkProductionKey) -> Result, Error> { // The expected number of parts for the Reed Solomon encoding is the number of chunk validators. let num_parts = self .epoch_manager diff --git a/core/primitives/src/reed_solomon.rs b/core/primitives/src/reed_solomon.rs index ea47e404002..2a2945406c9 100644 --- a/core/primitives/src/reed_solomon.rs +++ b/core/primitives/src/reed_solomon.rs @@ -1,13 +1,19 @@ use borsh::{BorshDeserialize, BorshSerialize}; use itertools::Itertools; use reed_solomon_erasure::galois_8::ReedSolomon; +use std::collections::HashMap; use std::io::Error; +use std::sync::Arc; + +/// Type alias around what ReedSolomon represents data part as. +/// This should help with making the code a bit more understandable. +pub type ReedSolomonPart = Option>; // Encode function takes a serializable object and returns a tuple of parts and length of encoded data pub fn reed_solomon_encode( rs: &ReedSolomon, data: T, -) -> (Vec>>, usize) { +) -> (Vec, usize) { let mut bytes = borsh::to_vec(&data).unwrap(); let encoded_length = bytes.len(); @@ -36,7 +42,7 @@ pub fn reed_solomon_encode( // Return an error if the reed solomon decoding fails or borsh deserialization fails. pub fn reed_solomon_decode( rs: &ReedSolomon, - parts: &mut [Option>], + parts: &mut [ReedSolomonPart], encoded_length: usize, ) -> Result { if let Err(err) = rs.reconstruct(parts) { @@ -56,3 +62,182 @@ pub fn reed_solomon_decode( pub fn reed_solomon_part_length(encoded_length: usize, data_parts: usize) -> usize { (encoded_length + data_parts - 1) / data_parts } + +pub fn reed_solomon_num_data_parts(total_parts: usize, ratio_data_parts: f64) -> usize { + std::cmp::max((total_parts as f64 * ratio_data_parts) as usize, 1) +} + +pub struct ReedSolomonEncoder { + /// ReedSolomon does not support having exactly 1 total part count and + /// no parity parts, so we use None for that + rs: Option, +} + +/// We need separate ReedSolomonEncoder[Serialize|Deserialize] traits instead +/// of just using Borsh directly because of already existing code for handling +/// chunk state witness where we do not use Borsh for single part encoding. +/// This was accidental, but unfortunately we cannot change it because now +/// it is a part of the networking protocol. +pub trait ReedSolomonEncoderSerialize: BorshSerialize { + fn serialize_single_part(&self) -> std::io::Result> { + borsh::to_vec(self) + } +} + +pub trait ReedSolomonEncoderDeserialize: BorshDeserialize { + fn deserialize_single_part(data: &[u8]) -> std::io::Result { + Self::try_from_slice(data) + } +} + +impl ReedSolomonEncoder { + pub fn new(total_parts: usize, ratio_data_parts: f64) -> ReedSolomonEncoder { + let rs = if total_parts > 1 { + let data_parts = reed_solomon_num_data_parts(total_parts, ratio_data_parts); + Some(ReedSolomon::new(data_parts, total_parts - data_parts).unwrap()) + } else { + None + }; + Self { rs } + } + + pub fn total_parts(&self) -> usize { + match self.rs { + Some(ref rs) => rs.total_shard_count(), + None => 1, + } + } + + pub fn data_parts(&self) -> usize { + match self.rs { + Some(ref rs) => rs.data_shard_count(), + None => 1, + } + } + + pub fn encode( + &self, + data: &T, + ) -> (Vec, usize) { + match self.rs { + Some(ref rs) => reed_solomon_encode(rs, data), + None => { + let bytes = T::serialize_single_part(&data).unwrap(); + let size = bytes.len(); + (vec![Some(bytes.into_boxed_slice())], size) + } + } + } + + pub fn decode( + &self, + parts: &mut [ReedSolomonPart], + encoded_length: usize, + ) -> Result { + match self.rs { + Some(ref rs) => reed_solomon_decode(rs, parts, encoded_length), + None => { + if parts.len() != 1 { + return Err(std::io::Error::other(format!( + "Expected single part, received {}", + parts.len() + ))); + } + let Some(part) = &parts[0] else { + return Err(std::io::Error::other("Received part is not expected to be None")); + }; + T::deserialize_single_part(part.as_ref()) + } + } + } +} + +pub struct ReedSolomonEncoderCache { + ratio_data_parts: f64, + instances: HashMap>, +} + +impl ReedSolomonEncoderCache { + pub fn new(ratio_data_parts: f64) -> Self { + Self { ratio_data_parts, instances: HashMap::new() } + } + + /// Gets an encoder (or adds a new one to the cache if not present) for a + /// given number of the total parts. + pub fn entry(&mut self, total_parts: usize) -> Arc { + self.instances + .entry(total_parts) + .or_insert_with(|| { + Arc::new(ReedSolomonEncoder::new(total_parts, self.ratio_data_parts)) + }) + .clone() + } +} + +pub struct ReedSolomonPartsTracker { + parts: Vec, + encoded_length: usize, + data_parts_present: usize, + encoder: Arc, + total_parts_size: usize, + phantom: std::marker::PhantomData, +} + +pub enum InsertPartResult { + Accepted, + PartAlreadyAvailable, + InvalidPartOrd, + Decoded(std::io::Result), +} + +impl ReedSolomonPartsTracker { + pub fn new(encoder: Arc, encoded_length: usize) -> Self { + Self { + data_parts_present: 0, + parts: vec![None; encoder.total_parts()], + total_parts_size: 0, + encoded_length, + encoder, + phantom: std::marker::PhantomData, + } + } + + pub fn data_parts_present(&self) -> usize { + self.data_parts_present + } + + pub fn total_parts_size(&self) -> usize { + self.total_parts_size + } + + pub fn data_parts_required(&self) -> usize { + self.encoder.data_parts() + } + + pub fn has_enough_parts(&self) -> bool { + self.data_parts_present >= self.data_parts_required() + } + + pub fn encoded_length(&self) -> usize { + self.encoded_length + } + + pub fn insert_part(&mut self, part_ord: usize, part: Box<[u8]>) -> InsertPartResult { + if part_ord >= self.parts.len() { + return InsertPartResult::InvalidPartOrd; + } + if self.parts[part_ord].is_some() { + return InsertPartResult::PartAlreadyAvailable; + } + + self.data_parts_present += 1; + self.total_parts_size += part.len(); + self.parts[part_ord] = Some(part); + + if self.has_enough_parts() { + InsertPartResult::Decoded(self.encoder.decode(&mut self.parts, self.encoded_length)) + } else { + InsertPartResult::Accepted + } + } +} diff --git a/core/primitives/src/stateless_validation/state_witness.rs b/core/primitives/src/stateless_validation/state_witness.rs index 004a4df3a1d..3f839736502 100644 --- a/core/primitives/src/stateless_validation/state_witness.rs +++ b/core/primitives/src/stateless_validation/state_witness.rs @@ -5,6 +5,7 @@ use super::{ChunkProductionKey, SignatureDifferentiator}; use crate::bandwidth_scheduler::BandwidthRequests; use crate::challenge::PartialState; use crate::congestion_info::CongestionInfo; +use crate::reed_solomon::{ReedSolomonEncoderDeserialize, ReedSolomonEncoderSerialize}; use crate::sharding::{ChunkHash, ReceiptProof, ShardChunkHeader, ShardChunkHeaderV3}; use crate::transaction::SignedTransaction; use crate::types::EpochId; @@ -47,6 +48,18 @@ impl { } +impl ReedSolomonEncoderSerialize for EncodedChunkStateWitness { + fn serialize_single_part(&self) -> std::io::Result> { + Ok(self.as_slice().to_vec()) + } +} + +impl ReedSolomonEncoderDeserialize for EncodedChunkStateWitness { + fn deserialize_single_part(data: &[u8]) -> std::io::Result { + Ok(EncodedChunkStateWitness::from_boxed_slice(data.to_vec().into_boxed_slice())) + } +} + pub type ChunkStateWitnessSize = usize; /// An acknowledgement sent from the chunk producer upon receiving the state witness to From 9c06b22e4d1f5c0952751cc9ec9a8a4fb3ab251a Mon Sep 17 00:00:00 2001 From: Anton Puhach Date: Wed, 30 Oct 2024 15:01:01 +0100 Subject: [PATCH 09/13] feat: distribution of partial encoded contract deploys (#12340) This does not including networking, validation and metrics. Those will be added in the followup PRs. --- .../partial_witness/encoding.rs | 1 + .../partial_witness/mod.rs | 1 + .../partial_deploys_tracker.rs | 114 +++++++++++++++ .../partial_witness/partial_witness_actor.rs | 137 +++++++++++++++++- .../src/stateless_validation/validate.rs | 14 +- .../contract_distribution.rs | 111 ++++++++++++++ core/primitives/src/validator_signer.rs | 28 +++- .../res/protocol_schema.toml | 5 + 8 files changed, 404 insertions(+), 7 deletions(-) create mode 100644 chain/client/src/stateless_validation/partial_witness/partial_deploys_tracker.rs diff --git a/chain/client/src/stateless_validation/partial_witness/encoding.rs b/chain/client/src/stateless_validation/partial_witness/encoding.rs index d2a584800a2..6ed19dc0708 100644 --- a/chain/client/src/stateless_validation/partial_witness/encoding.rs +++ b/chain/client/src/stateless_validation/partial_witness/encoding.rs @@ -4,6 +4,7 @@ use near_primitives::reed_solomon::{reed_solomon_num_data_parts, reed_solomon_pa /// The tradeoff here is having a higher ratio is better for handling missing parts and network errors /// but increases the size of the encoded state witness and the total network bandwidth requirements. pub const WITNESS_RATIO_DATA_PARTS: f64 = 0.6; +pub const CONTRACT_DEPLOYS_RATIO_DATA_PARTS: f64 = 0.6; pub fn witness_part_length(encoded_witness_size: usize, total_parts: usize) -> usize { reed_solomon_part_length( diff --git a/chain/client/src/stateless_validation/partial_witness/mod.rs b/chain/client/src/stateless_validation/partial_witness/mod.rs index 780badcc4a2..9861d111eb8 100644 --- a/chain/client/src/stateless_validation/partial_witness/mod.rs +++ b/chain/client/src/stateless_validation/partial_witness/mod.rs @@ -1,4 +1,5 @@ mod encoding; +mod partial_deploys_tracker; pub mod partial_witness_actor; mod partial_witness_tracker; diff --git a/chain/client/src/stateless_validation/partial_witness/partial_deploys_tracker.rs b/chain/client/src/stateless_validation/partial_witness/partial_deploys_tracker.rs new file mode 100644 index 00000000000..229b557d353 --- /dev/null +++ b/chain/client/src/stateless_validation/partial_witness/partial_deploys_tracker.rs @@ -0,0 +1,114 @@ +use std::num::NonZeroUsize; +use std::sync::Arc; + +use lru::LruCache; +use near_chain::Error; +use near_primitives::reed_solomon::{ + InsertPartResult, ReedSolomonEncoder, ReedSolomonPartsTracker, +}; +use near_primitives::stateless_validation::contract_distribution::{ + ChunkContractDeploys, PartialEncodedContractDeploys, PartialEncodedContractDeploysPart, +}; +use near_primitives::stateless_validation::ChunkProductionKey; + +const DEPLOY_PARTS_CACHE_SIZE: usize = 20; + +struct CacheEntry { + parts: ReedSolomonPartsTracker, +} + +impl CacheEntry { + fn new(encoder: Arc, encoded_length: usize) -> Self { + Self { parts: ReedSolomonPartsTracker::new(encoder, encoded_length) } + } + + fn process_part( + &mut self, + key: &ChunkProductionKey, + part: PartialEncodedContractDeploysPart, + ) -> Option> { + let part_ord = part.part_ord; + if self.parts.encoded_length() != part.encoded_length { + tracing::warn!( + target: "client", + expected = self.parts.encoded_length(), + actual = part.encoded_length, + part_ord, + "Partial encoded contract deploys encoded_length field doesn't match", + ); + return None; + } + match self.parts.insert_part(part_ord, part.data) { + InsertPartResult::Accepted => None, + InsertPartResult::PartAlreadyAvailable => { + tracing::warn!( + target: "client", + ?key, + part_ord, + "Received duplicate or redundant contract deploy part" + ); + None + } + InsertPartResult::InvalidPartOrd => { + tracing::warn!( + target: "client", + ?key, + part_ord, + "Received invalid contract deploys part ord" + ); + None + } + InsertPartResult::Decoded(decode_result) => Some(decode_result), + } + } +} + +pub struct PartialEncodedContractDeploysTracker { + parts_cache: LruCache, +} + +impl PartialEncodedContractDeploysTracker { + pub fn new() -> Self { + Self { parts_cache: LruCache::new(NonZeroUsize::new(DEPLOY_PARTS_CACHE_SIZE).unwrap()) } + } + + pub fn store_partial_encoded_contract_deploys( + &mut self, + partial_deploys: PartialEncodedContractDeploys, + encoder: Arc, + ) -> Result, Error> { + let (key, part) = partial_deploys.into(); + if !self.parts_cache.contains(&key) { + let new_entry = CacheEntry::new(encoder, part.encoded_length); + if let Some((evicted_key, evicted_entry)) = + self.parts_cache.push(key.clone(), new_entry) + { + tracing::warn!( + target: "client", + ?evicted_key, + data_parts_present = ?evicted_entry.parts.data_parts_present(), + data_parts_required = ?evicted_entry.parts.data_parts_required(), + "Evicted unprocessed contract deploys" + ); + } + } + let entry = self.parts_cache.get_mut(&key).unwrap(); + if let Some(decode_result) = entry.process_part(&key, part) { + self.parts_cache.pop(&key); + let deploys = match decode_result { + Ok(deploys) => deploys, + Err(err) => { + tracing::warn!( + target: "client", + ?err, + ?key, + "Failed to reed solomon decode deployed contracts" + ); + return Ok(None); + } + }; + return Ok(Some(deploys)); + } + Ok(None) + } +} diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs index a7233bcddda..d8148598a3c 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs @@ -17,11 +17,12 @@ use near_network::state_witness::{ use near_network::types::{NetworkRequests, PeerManagerAdapter, PeerManagerMessageRequest}; use near_parameters::RuntimeConfig; use near_performance_metrics_macros::perf; -use near_primitives::reed_solomon::ReedSolomonEncoderCache; +use near_primitives::reed_solomon::{ReedSolomonEncoder, ReedSolomonEncoderCache}; use near_primitives::sharding::ShardChunkHeader; use near_primitives::stateless_validation::contract_distribution::{ - ChunkContractAccesses, ChunkContractDeployments, CodeBytes, CodeHash, ContractCodeRequest, - ContractCodeResponse, + ChunkContractAccesses, ChunkContractDeployments, ChunkContractDeploys, CodeBytes, CodeHash, + ContractCodeRequest, ContractCodeResponse, PartialEncodedContractDeploys, + PartialEncodedContractDeploysPart, }; use near_primitives::stateless_validation::partial_witness::PartialEncodedStateWitness; use near_primitives::stateless_validation::state_witness::{ @@ -38,10 +39,12 @@ use crate::client_actor::ClientSenderForPartialWitness; use crate::metrics; use crate::stateless_validation::state_witness_tracker::ChunkStateWitnessTracker; use crate::stateless_validation::validate::{ - validate_chunk_contract_accesses, validate_partial_encoded_state_witness, + validate_chunk_contract_accesses, validate_partial_encoded_contract_deploys, + validate_partial_encoded_state_witness, }; -use super::encoding::WITNESS_RATIO_DATA_PARTS; +use super::encoding::{CONTRACT_DEPLOYS_RATIO_DATA_PARTS, WITNESS_RATIO_DATA_PARTS}; +use super::partial_deploys_tracker::PartialEncodedContractDeploysTracker; use super::partial_witness_tracker::PartialEncodedStateWitnessTracker; use near_primitives::utils::compression::CompressedData; @@ -56,11 +59,14 @@ pub struct PartialWitnessActor { runtime: Arc, /// Tracks the parts of the state witness sent from chunk producers to chunk validators. partial_witness_tracker: PartialEncodedStateWitnessTracker, + partial_deploys_tracker: PartialEncodedContractDeploysTracker, /// Tracks a collection of state witnesses sent from chunk producers to chunk validators. state_witness_tracker: ChunkStateWitnessTracker, /// Reed Solomon encoder for encoding state witness parts. /// We keep one wrapper for each length of chunk_validators to avoid re-creating the encoder. witness_encoders: ReedSolomonEncoderCache, + /// Same as above for contract deploys + contract_deploys_encoders: ReedSolomonEncoderCache, } impl Actor for PartialWitnessActor {} @@ -157,9 +163,13 @@ impl PartialWitnessActor { my_signer, epoch_manager, partial_witness_tracker, + partial_deploys_tracker: PartialEncodedContractDeploysTracker::new(), state_witness_tracker: ChunkStateWitnessTracker::new(clock), runtime, witness_encoders: ReedSolomonEncoderCache::new(WITNESS_RATIO_DATA_PARTS), + contract_deploys_encoders: ReedSolomonEncoderCache::new( + CONTRACT_DEPLOYS_RATIO_DATA_PARTS, + ), } } @@ -231,6 +241,35 @@ impl PartialWitnessActor { .collect_vec()) } + fn generate_contract_deploys_parts( + &mut self, + key: &ChunkProductionKey, + deploys: ChunkContractDeploys, + ) -> Result, Error> { + let validators = self.ordered_contract_deploys_validators(key)?; + let encoder = self.contract_deploys_encoder(validators.len()); + let (parts, encoded_length) = encoder.encode(&deploys); + let signer = self.my_validator_signer()?; + + Ok(validators + .into_iter() + .zip_eq(parts) + .enumerate() + .map(|(part_ord, (validator, part))| { + let partial_deploys = PartialEncodedContractDeploys::new( + key.clone(), + PartialEncodedContractDeploysPart { + part_ord, + data: part.unwrap().to_vec().into_boxed_slice(), + encoded_length, + }, + &signer, + ); + (validator, partial_deploys) + }) + .collect_vec()) + } + // Break the state witness into parts and send each part to the corresponding chunk validator owner. // The chunk validator owner will then forward the part to all other chunk validators. // Each chunk validator would collect the parts and reconstruct the state witness. @@ -339,6 +378,60 @@ impl PartialWitnessActor { Ok(()) } + #[allow(unused)] + fn handle_partial_encoded_contract_deploys( + &mut self, + partial_deploys: PartialEncodedContractDeploys, + ) -> Result<(), Error> { + tracing::debug!(target: "client", ?partial_deploys, "Receive PartialEncodedContractDeploys"); + + let signer = self.my_validator_signer()?; + if !validate_partial_encoded_contract_deploys( + self.epoch_manager.as_ref(), + &partial_deploys, + &signer, + self.runtime.store(), + )? { + return Ok(()); + } + let key = partial_deploys.chunk_production_key().clone(); + let validators = self.ordered_contract_deploys_validators(&key)?; + // Forward if my part + let signer = self.my_validator_signer()?; + let my_account_id = signer.validator_id(); + let my_part_ord = validators + .iter() + .position(|validator| validator == my_account_id) + .expect("expected to be validated"); + if partial_deploys.part().part_ord == my_part_ord { + let _target_validators = + validators.iter().filter(|&validator| validator != my_account_id).collect_vec(); + // TODO(#11099): send message with this part to target validators + } + // Store part + let encoder = self.contract_deploys_encoder(validators.len()); + if let Some(deploys) = self + .partial_deploys_tracker + .store_partial_encoded_contract_deploys(partial_deploys, encoder)? + { + let _contracts = match deploys.decompress_contracts() { + Ok(contracts) => contracts, + Err(err) => { + tracing::warn!( + target: "client", + ?err, + ?key, + "Failed to decompress deployed contracts." + ); + return Ok(()); + } + }; + // TODO(#11099): precompile contracts using self.runtime + } + + Ok(()) + } + /// Handles the state witness ack message from the chunk validator. /// It computes the round-trip time between sending the state witness and receiving /// the ack message and updates the corresponding metric with it. @@ -403,6 +496,18 @@ impl PartialWitnessActor { unreachable!("code for sending message is not implemented yet") } + #[allow(unused)] + fn handle_distribute_chunk_contract_deploys_request( + &mut self, + key: ChunkProductionKey, + deploys: Vec, + ) -> Result<(), Error> { + let compressed_deploys = ChunkContractDeploys::compress_contracts(&deploys)?; + let _validator_parts = self.generate_contract_deploys_parts(&key, compressed_deploys)?; + // TODO(#11099): network send parts to validators + Ok(()) + } + /// Handles contract code requests message from chunk validators. /// As response to this message, sends the contract code requested to /// the requesting chunk validator for the given code hashes. @@ -451,6 +556,28 @@ impl PartialWitnessActor { fn my_validator_signer(&self) -> Result, Error> { self.my_signer.get().ok_or_else(|| Error::NotAValidator("not a validator".to_owned())) } + + fn contract_deploys_encoder(&mut self, validators_count: usize) -> Arc { + self.contract_deploys_encoders.entry(validators_count) + } + + fn ordered_contract_deploys_validators( + &mut self, + key: &ChunkProductionKey, + ) -> Result, Error> { + let chunk_producers = HashSet::::from_iter( + self.epoch_manager.get_epoch_chunk_producers_for_shard(&key.epoch_id, key.shard_id)?, + ); + let mut validators = self + .epoch_manager + .get_epoch_all_validators(&key.epoch_id)? + .into_iter() + .filter(|stake| !chunk_producers.contains(stake.account_id())) + .map(|stake| stake.account_id().clone()) + .collect::>(); + validators.sort(); + Ok(validators) + } } fn compress_witness(witness: &ChunkStateWitness) -> Result { diff --git a/chain/client/src/stateless_validation/validate.rs b/chain/client/src/stateless_validation/validate.rs index 446dc7ebd22..33f0de1b650 100644 --- a/chain/client/src/stateless_validation/validate.rs +++ b/chain/client/src/stateless_validation/validate.rs @@ -4,7 +4,9 @@ use near_chain::types::Tip; use near_chain_primitives::Error; use near_epoch_manager::EpochManagerAdapter; use near_primitives::stateless_validation::chunk_endorsement::ChunkEndorsementV2; -use near_primitives::stateless_validation::contract_distribution::ChunkContractAccesses; +use near_primitives::stateless_validation::contract_distribution::{ + ChunkContractAccesses, PartialEncodedContractDeploys, +}; use near_primitives::stateless_validation::partial_witness::{ PartialEncodedStateWitness, MAX_COMPRESSED_STATE_WITNESS_SIZE, }; @@ -67,6 +69,16 @@ pub fn validate_partial_encoded_state_witness( Ok(true) } +pub fn validate_partial_encoded_contract_deploys( + _epoch_manager: &dyn EpochManagerAdapter, + _partial_deploys: &PartialEncodedContractDeploys, + _signer: &ValidatorSigner, + _store: &Store, +) -> Result { + // TODO(#11099): implement + Ok(true) +} + /// Function to validate the chunk endorsement. In addition of ChunkProductionKey, we check the following: /// - signature of endorsement and metadata is valid pub fn validate_chunk_endorsement( diff --git a/core/primitives/src/stateless_validation/contract_distribution.rs b/core/primitives/src/stateless_validation/contract_distribution.rs index 912fb867698..55bae7148ce 100644 --- a/core/primitives/src/stateless_validation/contract_distribution.rs +++ b/core/primitives/src/stateless_validation/contract_distribution.rs @@ -7,6 +7,7 @@ use near_primitives_core::hash::CryptoHash; use near_primitives_core::types::AccountId; use near_schema_checker_lib::ProtocolSchema; +use crate::reed_solomon::{ReedSolomonEncoderDeserialize, ReedSolomonEncoderSerialize}; use crate::{utils::compression::CompressedData, validator_signer::ValidatorSigner}; use super::{ChunkProductionKey, SignatureDifferentiator}; @@ -391,3 +392,113 @@ pub struct ContractUpdates { /// Code-hashes of the contracts deployed while applying the chunk. pub contract_deploys: HashSet, } + +#[derive(Clone, BorshSerialize, BorshDeserialize, ProtocolSchema)] +pub struct ChunkContractDeploys { + compressed_contracts: CompressedContractCode, +} + +impl ChunkContractDeploys { + pub fn compress_contracts(contracts: &Vec) -> std::io::Result { + CompressedContractCode::encode(&contracts) + .map(|(compressed_contracts, _size)| Self { compressed_contracts }) + } + + pub fn decompress_contracts(&self) -> std::io::Result> { + self.compressed_contracts.decode().map(|(data, _size)| data) + } +} + +impl ReedSolomonEncoderSerialize for ChunkContractDeploys {} +impl ReedSolomonEncoderDeserialize for ChunkContractDeploys {} + +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)] +pub enum PartialEncodedContractDeploys { + V1(PartialEncodedContractDeploysV1), +} + +impl PartialEncodedContractDeploys { + pub fn new( + key: ChunkProductionKey, + part: PartialEncodedContractDeploysPart, + signer: &ValidatorSigner, + ) -> Self { + Self::V1(PartialEncodedContractDeploysV1::new(key, part, signer)) + } + + pub fn chunk_production_key(&self) -> &ChunkProductionKey { + match &self { + Self::V1(v1) => &v1.inner.next_chunk, + } + } + + pub fn part(&self) -> &PartialEncodedContractDeploysPart { + match &self { + Self::V1(v1) => &v1.inner.part, + } + } +} + +impl Into<(ChunkProductionKey, PartialEncodedContractDeploysPart)> + for PartialEncodedContractDeploys +{ + fn into(self) -> (ChunkProductionKey, PartialEncodedContractDeploysPart) { + match self { + Self::V1(PartialEncodedContractDeploysV1 { inner, .. }) => { + (inner.next_chunk, inner.part) + } + } + } +} + +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)] +pub struct PartialEncodedContractDeploysV1 { + inner: PartialEncodedContractDeploysInner, + signature: Signature, +} + +impl PartialEncodedContractDeploysV1 { + pub fn new( + key: ChunkProductionKey, + part: PartialEncodedContractDeploysPart, + signer: &ValidatorSigner, + ) -> Self { + let inner = PartialEncodedContractDeploysInner::new(key, part); + let signature = signer.sign_partial_encoded_contract_deploys(&inner); + Self { inner, signature } + } +} + +#[derive(Clone, BorshSerialize, BorshDeserialize, ProtocolSchema)] +pub struct PartialEncodedContractDeploysPart { + pub part_ord: usize, + pub data: Box<[u8]>, + pub encoded_length: usize, +} + +impl std::fmt::Debug for PartialEncodedContractDeploysPart { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PartialEncodedContractDeploysPart") + .field("part_ord", &self.part_ord) + .field("data_size", &self.data.len()) + .field("encoded_length", &self.encoded_length) + .finish() + } +} + +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)] +pub struct PartialEncodedContractDeploysInner { + next_chunk: ChunkProductionKey, + part: PartialEncodedContractDeploysPart, + signature_differentiator: SignatureDifferentiator, +} + +impl PartialEncodedContractDeploysInner { + fn new(next_chunk: ChunkProductionKey, part: PartialEncodedContractDeploysPart) -> Self { + Self { + next_chunk, + part, + signature_differentiator: "PartialEncodedContractDeploysInner".to_owned(), + } + } +} diff --git a/core/primitives/src/validator_signer.rs b/core/primitives/src/validator_signer.rs index 71cba698acc..8aa53daa347 100644 --- a/core/primitives/src/validator_signer.rs +++ b/core/primitives/src/validator_signer.rs @@ -14,7 +14,7 @@ use crate::stateless_validation::chunk_endorsement::{ }; use crate::stateless_validation::contract_distribution::{ ChunkContractAccessesInner, ChunkContractDeploymentsInner, ContractCodeRequestInner, - ContractCodeResponseInner, + ContractCodeResponseInner, PartialEncodedContractDeploysInner, }; use crate::stateless_validation::partial_witness::PartialEncodedStateWitnessInner; use crate::stateless_validation::state_witness::EncodedChunkStateWitness; @@ -169,6 +169,18 @@ impl ValidatorSigner { } } + pub fn sign_partial_encoded_contract_deploys( + &self, + inner: &PartialEncodedContractDeploysInner, + ) -> Signature { + match self { + ValidatorSigner::Empty(signer) => signer.sign_partial_encoded_contract_deploys(inner), + ValidatorSigner::InMemory(signer) => { + signer.sign_partial_encoded_contract_deploys(inner) + } + } + } + /// Signs the inner contents of a ContractCodeRequest message. pub fn sign_contract_code_request(&self, inner: &ContractCodeRequestInner) -> Signature { match self { @@ -321,6 +333,13 @@ impl EmptyValidatorSigner { Signature::default() } + fn sign_partial_encoded_contract_deploys( + &self, + _inner: &PartialEncodedContractDeploysInner, + ) -> Signature { + Signature::default() + } + fn sign_contract_code_request(&self, _inner: &ContractCodeRequestInner) -> Signature { Signature::default() } @@ -441,6 +460,13 @@ impl InMemoryValidatorSigner { self.signer.sign(&borsh::to_vec(inner).unwrap()) } + fn sign_partial_encoded_contract_deploys( + &self, + inner: &PartialEncodedContractDeploysInner, + ) -> Signature { + self.signer.sign(&borsh::to_vec(inner).unwrap()) + } + fn sign_contract_code_request(&self, inner: &ContractCodeRequestInner) -> Signature { self.signer.sign(&borsh::to_vec(inner).unwrap()) } diff --git a/tools/protocol-schema-check/res/protocol_schema.toml b/tools/protocol-schema-check/res/protocol_schema.toml index 98106127761..eed9c1d2489 100644 --- a/tools/protocol-schema-check/res/protocol_schema.toml +++ b/tools/protocol-schema-check/res/protocol_schema.toml @@ -59,6 +59,7 @@ ChunkContractAccessesV1 = 2405344532 ChunkContractDeployments = 599007946 ChunkContractDeploymentsInner = 308114150 ChunkContractDeploymentsV1 = 3752738828 +ChunkContractDeploys = 414270153 ChunkEndorsement = 1294072929 ChunkEndorsementInner = 2425301775 ChunkEndorsementMetadata = 1740861942 @@ -167,6 +168,10 @@ PartialEncodedChunkRequestMsg = 1470767646 PartialEncodedChunkResponseMsg = 2957212759 PartialEncodedChunkV1 = 3642706173 PartialEncodedChunkV2 = 1552532170 +PartialEncodedContractDeploys = 3216562245 +PartialEncodedContractDeploysInner = 2549441552 +PartialEncodedContractDeploysPart = 1672852427 +PartialEncodedContractDeploysV1 = 2574493147 PartialEncodedStateWitness = 2729738364 PartialEncodedStateWitnessInner = 2590980035 PartialState = 3772957669 From 7336e0a8c84c7c2d97b09a8a6d611e78df862156 Mon Sep 17 00:00:00 2001 From: Tayfun Elmas Date: Wed, 30 Oct 2024 17:07:37 +0300 Subject: [PATCH 10/13] feat(contract-distribution): Add precompile_contracts to trait RuntimeAdapter (#12339) This is a preparation step for calling `precompile_contracts` for new deployed contracts. The code already exists but it is not exposed via the `RuntimeAdapter` trait. This PR makes the function a member of the trait. Also adds a unittest that checks that the compiled-contract-cache is populated for `NightShadeRuntime` after calling `precompile_contracts`. --- Cargo.lock | 1 + chain/chain/Cargo.toml | 1 + chain/chain/src/runtime/mod.rs | 86 ++++++++++++------------ chain/chain/src/runtime/tests.rs | 73 ++++++++++++++++++++ chain/chain/src/test_utils/kv_runtime.rs | 11 ++- chain/chain/src/types.rs | 8 +++ 6 files changed, 136 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ce82cfff32..dfaefb9c27a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4020,6 +4020,7 @@ dependencies = [ "near-primitives", "near-schema-checker-lib", "near-store", + "near-test-contracts", "near-vm-runner", "node-runtime", "num-rational 0.3.2", diff --git a/chain/chain/Cargo.toml b/chain/chain/Cargo.toml index f91eb519dae..88757f136e7 100644 --- a/chain/chain/Cargo.toml +++ b/chain/chain/Cargo.toml @@ -58,6 +58,7 @@ near-schema-checker-lib.workspace = true [dev-dependencies] near-primitives = { workspace = true, features = ["clock"] } +near-test-contracts.workspace = true serde_json.workspace = true primitive-types.workspace = true insta.workspace = true diff --git a/chain/chain/src/runtime/mod.rs b/chain/chain/src/runtime/mod.rs index 7bec806078e..344e0de9613 100644 --- a/chain/chain/src/runtime/mod.rs +++ b/chain/chain/src/runtime/mod.rs @@ -476,49 +476,6 @@ impl NightshadeRuntime { Ok(result) } - fn precompile_contracts( - &self, - epoch_id: &EpochId, - contract_codes: Vec, - ) -> Result<(), Error> { - let _span = tracing::debug_span!( - target: "runtime", - "precompile_contracts", - num_contracts = contract_codes.len()) - .entered(); - let protocol_version = self.epoch_manager.get_epoch_protocol_version(epoch_id)?; - let runtime_config = self.runtime_config_store.get_config(protocol_version); - let compiled_contract_cache: Option> = - Some(Box::new(self.compiled_contract_cache.handle())); - // Execute precompile_contract in parallel but prevent it from using more than half of all - // threads so that node will still function normally. - rayon::scope(|scope| { - let (slot_sender, slot_receiver) = std::sync::mpsc::channel(); - // Use up-to half of the threads for the compilation. - let max_threads = std::cmp::max(rayon::current_num_threads() / 2, 1); - for _ in 0..max_threads { - slot_sender.send(()).expect("both sender and receiver are owned here"); - } - for code in contract_codes { - slot_receiver.recv().expect("could not receive a slot to compile contract"); - let contract_cache = compiled_contract_cache.as_deref(); - let slot_sender = slot_sender.clone(); - scope.spawn(move |_| { - precompile_contract( - &code, - Arc::clone(&runtime_config.wasm_config), - contract_cache, - ) - .ok(); - // If this fails, it just means there won't be any more attempts to recv the - // slots - let _ = slot_sender.send(()); - }); - } - }); - Ok(()) - } - fn get_gc_stop_height_impl(&self, block_hash: &CryptoHash) -> Result { let epoch_manager = self.epoch_manager.read(); // an epoch must have a first block. @@ -1349,6 +1306,49 @@ impl RuntimeAdapter for NightshadeRuntime { fn compiled_contract_cache(&self) -> &dyn ContractRuntimeCache { self.compiled_contract_cache.as_ref() } + + fn precompile_contracts( + &self, + epoch_id: &EpochId, + contract_codes: Vec, + ) -> Result<(), Error> { + let _span = tracing::debug_span!( + target: "runtime", + "precompile_contracts", + num_contracts = contract_codes.len()) + .entered(); + let protocol_version = self.epoch_manager.get_epoch_protocol_version(epoch_id)?; + let runtime_config = self.runtime_config_store.get_config(protocol_version); + let compiled_contract_cache: Option> = + Some(Box::new(self.compiled_contract_cache.handle())); + // Execute precompile_contract in parallel but prevent it from using more than half of all + // threads so that node will still function normally. + rayon::scope(|scope| { + let (slot_sender, slot_receiver) = std::sync::mpsc::channel(); + // Use up-to half of the threads for the compilation. + let max_threads = std::cmp::max(rayon::current_num_threads() / 2, 1); + for _ in 0..max_threads { + slot_sender.send(()).expect("both sender and receiver are owned here"); + } + for code in contract_codes { + slot_receiver.recv().expect("could not receive a slot to compile contract"); + let contract_cache = compiled_contract_cache.as_deref(); + let slot_sender = slot_sender.clone(); + scope.spawn(move |_| { + precompile_contract( + &code, + Arc::clone(&runtime_config.wasm_config), + contract_cache, + ) + .ok(); + // If this fails, it just means there won't be any more attempts to recv the + // slots + let _ = slot_sender.send(()); + }); + } + }); + Ok(()) + } } /// Get the limit on the number of new receipts imposed by the local congestion control. diff --git a/chain/chain/src/runtime/tests.rs b/chain/chain/src/runtime/tests.rs index f4c1ad611be..eebc84de8b0 100644 --- a/chain/chain/src/runtime/tests.rs +++ b/chain/chain/src/runtime/tests.rs @@ -2,6 +2,7 @@ use std::collections::BTreeSet; use crate::types::{ChainConfig, RuntimeStorageConfig}; use crate::{Chain, ChainGenesis, ChainStoreAccess, DoomslugThresholdMode}; +use assert_matches::assert_matches; use near_chain_configs::test_utils::{TESTING_INIT_BALANCE, TESTING_INIT_STAKE}; use near_epoch_manager::shard_tracker::ShardTracker; use near_epoch_manager::{EpochManager, RngSeed}; @@ -16,8 +17,10 @@ use near_primitives::epoch_block_info::BlockInfo; use near_primitives::receipt::{ActionReceipt, ReceiptV1}; use near_primitives::test_utils::create_test_signer; use near_primitives::types::validator_stake::{ValidatorStake, ValidatorStakeIter}; +use near_primitives::version::PROTOCOL_VERSION; use near_store::flat::{FlatStateChanges, FlatStateDelta, FlatStateDeltaMetadata}; use near_store::genesis::initialize_genesis_state; +use near_vm_runner::{get_contract_cache_key, CompiledContract, CompiledContractInfo}; use num_rational::Ratio; use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; @@ -1828,6 +1831,76 @@ fn test_storage_proof_garbage() { assert_eq!(total_size / 1000_000, garbage_size_mb); } +/// Tests that precompiling a set of contracts updates the compiled contract cache. +#[test] +fn test_precompile_contracts_updates_cache() { + struct FakeTestCompiledContractType; // For testing AnyCache. + let genesis = Genesis::test(vec!["test0".parse().unwrap()], 1); + let store = near_store::test_utils::create_test_store(); + let tempdir = tempfile::tempdir().unwrap(); + initialize_genesis_state(store.clone(), &genesis, Some(tempdir.path())); + let epoch_manager = EpochManager::new_arc_handle(store.clone(), &genesis.config, None); + + let contract_cache = FilesystemContractRuntimeCache::new(tempdir.path(), None::<&str>) + .expect("filesystem contract cache"); + let runtime = NightshadeRuntime::test_with_runtime_config_store( + tempdir.path(), + store, + contract_cache.handle(), + &genesis.config, + epoch_manager, + RuntimeConfigStore::new(None), + StateSnapshotType::EveryEpoch, + ); + + let contracts = vec![ + ContractCode::new(near_test_contracts::sized_contract(100).to_vec(), None), + ContractCode::new(near_test_contracts::rs_contract().to_vec(), None), + ContractCode::new(near_test_contracts::trivial_contract().to_vec(), None), + ]; + let code_hashes: Vec = contracts.iter().map(|c| c.hash()).cloned().collect(); + + // First check that the cache does not have the contracts. + for code_hash in code_hashes.iter() { + let cache_key = get_contract_cache_key( + *code_hash, + &runtime.get_runtime_config(PROTOCOL_VERSION).unwrap().wasm_config, + ); + let contract = contract_cache.get(&cache_key).unwrap(); + assert!(contract.is_none()); + } + + runtime.precompile_contracts(&EpochId::default(), contracts).unwrap(); + + // Check that the persistent cache contains the compiled contract after precompilation, + // but it does not populate the in-memory cache (so that the value is generated by try_lookup call). + for code_hash in code_hashes.into_iter() { + let cache_key = get_contract_cache_key( + code_hash, + &runtime.get_runtime_config(PROTOCOL_VERSION).unwrap().wasm_config, + ); + + let contract = contract_cache.get(&cache_key).unwrap(); + assert_matches!( + contract, + Some(CompiledContractInfo { compiled: CompiledContract::Code(_), .. }) + ); + + let result = contract_cache + .memory_cache() + .try_lookup( + cache_key, + || Ok::<_, ()>(Box::new(FakeTestCompiledContractType)), + |v| { + assert!(v.is::()); + "compiled code" + }, + ) + .unwrap(); + assert_eq!(result, "compiled code"); + } +} + fn stake( nonce: Nonce, signer: &Signer, diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index cf8046adff4..923f4ed37ba 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -57,7 +57,7 @@ use near_store::{ set_genesis_hash, set_genesis_state_roots, DBCol, ShardTries, Store, StoreUpdate, Trie, TrieChanges, WrappedTrieChanges, }; -use near_vm_runner::{ContractRuntimeCache, NoContractRuntimeCache}; +use near_vm_runner::{ContractCode, ContractRuntimeCache, NoContractRuntimeCache}; use num_rational::Ratio; use rand::Rng; use std::cmp::Ordering; @@ -1580,4 +1580,13 @@ impl RuntimeAdapter for KeyValueRuntime { fn compiled_contract_cache(&self) -> &dyn ContractRuntimeCache { &self.contract_cache } + + fn precompile_contracts( + &self, + _epoch_id: &EpochId, + _contract_codes: Vec, + ) -> Result<(), Error> { + // Note that KeyValueRuntime does not use compiled contract cache, so this is no-op. + Ok(()) + } } diff --git a/chain/chain/src/types.rs b/chain/chain/src/types.rs index 087b1b4b2b0..221d11f832e 100644 --- a/chain/chain/src/types.rs +++ b/chain/chain/src/types.rs @@ -40,6 +40,7 @@ use near_primitives::views::{QueryRequest, QueryResponse}; use near_schema_checker_lib::ProtocolSchema; use near_store::flat::FlatStorageManager; use near_store::{PartialStorage, ShardTries, Store, Trie, WrappedTrieChanges}; +use near_vm_runner::ContractCode; use near_vm_runner::ContractRuntimeCache; use num_rational::Rational32; use tracing::instrument; @@ -536,6 +537,13 @@ pub trait RuntimeAdapter: Send + Sync { -> Result; fn compiled_contract_cache(&self) -> &dyn ContractRuntimeCache; + + /// Precompiles the contracts and stores them in the compiled contract cache. + fn precompile_contracts( + &self, + epoch_id: &EpochId, + contract_codes: Vec, + ) -> Result<(), Error>; } /// The last known / checked height and time when we have processed it. From 7a7f3ea43da317c62db80e7ff769ea8c03f2cc7c Mon Sep 17 00:00:00 2001 From: Tayfun Elmas Date: Wed, 30 Oct 2024 18:20:56 +0300 Subject: [PATCH 11/13] feat(contract-distribution): Replace ChunkContractDeployments with new message for sending code partially (#12346) This is on top of https://github.com/near/nearcore/pull/12340, thus it is missing some code from the other PR. This implements the plumbing of the new message through network and testloop stacks. --- .../partial_witness/partial_witness_actor.rs | 28 ++----- chain/client/src/test_utils/setup.rs | 26 +++--- chain/network/src/network_protocol/mod.rs | 10 +-- .../src/peer_manager/network_state/mod.rs | 12 +-- .../src/peer_manager/peer_manager_actor.rs | 27 +++--- .../src/rate_limits/messages_limits.rs | 6 +- chain/network/src/state_witness.rs | 9 +- chain/network/src/test_loop.rs | 18 +++- chain/network/src/types.rs | 8 +- .../contract_distribution.rs | 83 ++----------------- core/primitives/src/validator_signer.rs | 23 +---- pytest/lib/messages/block.py | 33 ++++++-- pytest/lib/messages/network.py | 7 +- .../res/protocol_schema.toml | 9 +- 14 files changed, 115 insertions(+), 184 deletions(-) diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs index d8148598a3c..152333ad1fe 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs @@ -10,8 +10,8 @@ use near_chain::Error; use near_chain_configs::MutableValidatorSigner; use near_epoch_manager::EpochManagerAdapter; use near_network::state_witness::{ - ChunkContractAccessesMessage, ChunkContractDeploymentsMessage, ChunkStateWitnessAckMessage, - ContractCodeRequestMessage, ContractCodeResponseMessage, + ChunkContractAccessesMessage, ChunkStateWitnessAckMessage, ContractCodeRequestMessage, + ContractCodeResponseMessage, PartialEncodedContractDeploysMessage, PartialEncodedStateWitnessForwardMessage, PartialEncodedStateWitnessMessage, }; use near_network::types::{NetworkRequests, PeerManagerAdapter, PeerManagerMessageRequest}; @@ -20,9 +20,8 @@ use near_performance_metrics_macros::perf; use near_primitives::reed_solomon::{ReedSolomonEncoder, ReedSolomonEncoderCache}; use near_primitives::sharding::ShardChunkHeader; use near_primitives::stateless_validation::contract_distribution::{ - ChunkContractAccesses, ChunkContractDeployments, ChunkContractDeploys, CodeBytes, CodeHash, - ContractCodeRequest, ContractCodeResponse, PartialEncodedContractDeploys, - PartialEncodedContractDeploysPart, + ChunkContractAccesses, ChunkContractDeploys, CodeBytes, CodeHash, ContractCodeRequest, + ContractCodeResponse, PartialEncodedContractDeploys, PartialEncodedContractDeploysPart, }; use near_primitives::stateless_validation::partial_witness::PartialEncodedStateWitness; use near_primitives::stateless_validation::state_witness::{ @@ -123,10 +122,10 @@ impl Handler for PartialWitnessActor { } } -impl Handler for PartialWitnessActor { - fn handle(&mut self, msg: ChunkContractDeploymentsMessage) { - if let Err(err) = self.handle_chunk_contract_deployments(msg.0) { - tracing::error!(target: "client", ?err, "Failed to handle ChunkContractDeploymentsMessage"); +impl Handler for PartialWitnessActor { + fn handle(&mut self, msg: PartialEncodedContractDeploysMessage) { + if let Err(err) = self.handle_partial_encoded_contract_deploys(msg.0) { + tracing::error!(target: "client", ?err, "Failed to handle PartialEncodedContractDeploysMessage"); } } } @@ -485,17 +484,6 @@ impl PartialWitnessActor { Ok(()) } - /// Handles new contract deployments message from chunk producer. - /// This is sent in parallel to a chunk state witness and contains the code-hashes - /// of the contracts deployed when applying the previous chunk of the witness. - fn handle_chunk_contract_deployments( - &mut self, - _deploys: ChunkContractDeployments, - ) -> Result<(), Error> { - // TODO(#11099): Implement the handling of this message. - unreachable!("code for sending message is not implemented yet") - } - #[allow(unused)] fn handle_distribute_chunk_contract_deploys_request( &mut self, diff --git a/chain/client/src/test_utils/setup.rs b/chain/client/src/test_utils/setup.rs index ac9762edee7..2e82a3ee34d 100644 --- a/chain/client/src/test_utils/setup.rs +++ b/chain/client/src/test_utils/setup.rs @@ -45,8 +45,8 @@ use near_network::client::{ }; use near_network::shards_manager::ShardsManagerRequestFromNetwork; use near_network::state_witness::{ - ChunkContractAccessesMessage, ChunkContractDeploymentsMessage, ContractCodeRequestMessage, - ContractCodeResponseMessage, PartialEncodedStateWitnessForwardMessage, + ChunkContractAccessesMessage, ContractCodeRequestMessage, ContractCodeResponseMessage, + PartialEncodedContractDeploysMessage, PartialEncodedStateWitnessForwardMessage, PartialEncodedStateWitnessMessage, PartialWitnessSenderForNetwork, }; use near_network::types::{BlockInfo, PeerChainInfo}; @@ -787,17 +787,6 @@ fn process_peer_manager_message_default( } } } - NetworkRequests::ChunkContractDeployments(accounts, deploys) => { - for account in accounts { - for (i, name) in validators.iter().enumerate() { - if name == account { - connectors[i] - .partial_witness_sender - .send(ChunkContractDeploymentsMessage(deploys.clone())); - } - } - } - } NetworkRequests::ContractCodeRequest(account, request) => { for (i, name) in validators.iter().enumerate() { if name == account { @@ -816,6 +805,17 @@ fn process_peer_manager_message_default( } } } + NetworkRequests::PartialEncodedContractDeploys(accounts, deploys) => { + for account in accounts { + for (i, name) in validators.iter().enumerate() { + if name == account { + connectors[i] + .partial_witness_sender + .send(PartialEncodedContractDeploysMessage(deploys.clone())); + } + } + } + } NetworkRequests::ForwardTx(_, _) | NetworkRequests::BanPeer { .. } | NetworkRequests::TxStatus(_, _, _) diff --git a/chain/network/src/network_protocol/mod.rs b/chain/network/src/network_protocol/mod.rs index 6c6e96d4752..a4efff4837d 100644 --- a/chain/network/src/network_protocol/mod.rs +++ b/chain/network/src/network_protocol/mod.rs @@ -10,9 +10,9 @@ pub use edge::*; use near_primitives::stateless_validation::chunk_endorsement::ChunkEndorsement; use near_primitives::stateless_validation::chunk_endorsement::ChunkEndorsementV1; use near_primitives::stateless_validation::contract_distribution::ChunkContractAccesses; -use near_primitives::stateless_validation::contract_distribution::ChunkContractDeployments; use near_primitives::stateless_validation::contract_distribution::ContractCodeRequest; use near_primitives::stateless_validation::contract_distribution::ContractCodeResponse; +use near_primitives::stateless_validation::contract_distribution::PartialEncodedContractDeploys; use near_primitives::stateless_validation::partial_witness::PartialEncodedStateWitness; use near_primitives::stateless_validation::state_witness::ChunkStateWitnessAck; pub use peer::*; @@ -563,9 +563,9 @@ pub enum RoutedMessageBody { _UnusedEpochSyncResponse(CompressedEpochSyncProof), StatePartRequest(StatePartRequest), ChunkContractAccesses(ChunkContractAccesses), - ChunkContractDeployments(ChunkContractDeployments), ContractCodeRequest(ContractCodeRequest), ContractCodeResponse(ContractCodeResponse), + PartialEncodedContractDeploys(PartialEncodedContractDeploys), } impl RoutedMessageBody { @@ -663,13 +663,13 @@ impl fmt::Debug for RoutedMessageBody { RoutedMessageBody::ChunkContractAccesses(accesses) => { write!(f, "ChunkContractAccesses(code_hashes={:?})", accesses.contracts()) } - RoutedMessageBody::ChunkContractDeployments(deploys) => { - write!(f, "ChunkContractDeployments(code_hashes={:?}", deploys.contracts()) - } RoutedMessageBody::ContractCodeRequest(request) => { write!(f, "ContractCodeRequest(code_hashes={:?})", request.contracts()) } RoutedMessageBody::ContractCodeResponse(_) => write!(f, "ContractCodeResponse",), + RoutedMessageBody::PartialEncodedContractDeploys(deploys) => { + write!(f, "PartialEncodedContractDeploys(part={:?}", deploys.part()) + } } } } diff --git a/chain/network/src/peer_manager/network_state/mod.rs b/chain/network/src/peer_manager/network_state/mod.rs index a781721ea90..e8be8b7565f 100644 --- a/chain/network/src/peer_manager/network_state/mod.rs +++ b/chain/network/src/peer_manager/network_state/mod.rs @@ -22,8 +22,8 @@ use crate::routing::NetworkTopologyChange; use crate::shards_manager::ShardsManagerRequestFromNetwork; use crate::snapshot_hosts::{SnapshotHostInfoError, SnapshotHostsCache}; use crate::state_witness::{ - ChunkContractAccessesMessage, ChunkContractDeploymentsMessage, ChunkStateWitnessAckMessage, - ContractCodeRequestMessage, ContractCodeResponseMessage, + ChunkContractAccessesMessage, ChunkStateWitnessAckMessage, ContractCodeRequestMessage, + ContractCodeResponseMessage, PartialEncodedContractDeploysMessage, PartialEncodedStateWitnessForwardMessage, PartialEncodedStateWitnessMessage, PartialWitnessSenderForNetwork, }; @@ -802,10 +802,6 @@ impl NetworkState { self.partial_witness_adapter.send(ChunkContractAccessesMessage(accesses)); None } - RoutedMessageBody::ChunkContractDeployments(deploys) => { - self.partial_witness_adapter.send(ChunkContractDeploymentsMessage(deploys)); - None - } RoutedMessageBody::ContractCodeRequest(request) => { self.partial_witness_adapter.send(ContractCodeRequestMessage(request)); None @@ -814,6 +810,10 @@ impl NetworkState { self.partial_witness_adapter.send(ContractCodeResponseMessage(response)); None } + RoutedMessageBody::PartialEncodedContractDeploys(deploys) => { + self.partial_witness_adapter.send(PartialEncodedContractDeploysMessage(deploys)); + None + } body => { tracing::error!(target: "network", "Peer received unexpected message type: {:?}", body); None diff --git a/chain/network/src/peer_manager/peer_manager_actor.rs b/chain/network/src/peer_manager/peer_manager_actor.rs index 36378535927..27745a0b88a 100644 --- a/chain/network/src/peer_manager/peer_manager_actor.rs +++ b/chain/network/src/peer_manager/peer_manager_actor.rs @@ -1181,16 +1181,6 @@ impl PeerManagerActor { } NetworkResponses::NoResponse } - NetworkRequests::ChunkContractDeployments(validators, deploys) => { - for validator in validators { - self.state.send_message_to_account( - &self.clock, - &validator, - RoutedMessageBody::ChunkContractDeployments(deploys.clone()), - ); - } - NetworkResponses::NoResponse - } NetworkRequests::ContractCodeRequest(target, request) => { self.state.send_message_to_account( &self.clock, @@ -1207,6 +1197,23 @@ impl PeerManagerActor { ); NetworkResponses::NoResponse } + NetworkRequests::PartialEncodedContractDeploys(accounts, deploys) => { + // Send to last account separately to avoid clone when sending to a single target. + let (last_account, other_accounts) = accounts.split_last().unwrap(); + for account in other_accounts { + self.state.send_message_to_account( + &self.clock, + &account, + RoutedMessageBody::PartialEncodedContractDeploys(deploys.clone()), + ); + } + self.state.send_message_to_account( + &self.clock, + &last_account, + RoutedMessageBody::PartialEncodedContractDeploys(deploys), + ); + NetworkResponses::NoResponse + } } } diff --git a/chain/network/src/rate_limits/messages_limits.rs b/chain/network/src/rate_limits/messages_limits.rs index 0a3c797d44a..feef78b85cd 100644 --- a/chain/network/src/rate_limits/messages_limits.rs +++ b/chain/network/src/rate_limits/messages_limits.rs @@ -181,9 +181,9 @@ pub enum RateLimitedPeerMessageKey { PartialEncodedStateWitness, PartialEncodedStateWitnessForward, ChunkContractAccesses, - ChunkContractDeployments, ContractCodeRequest, ContractCodeResponse, + PartialEncodedContractDeploys, EpochSyncRequest, } @@ -233,9 +233,11 @@ fn get_key_and_token_cost(message: &PeerMessage) -> Option<(RateLimitedPeerMessa Some((PartialEncodedStateWitnessForward, 1)) } RoutedMessageBody::ChunkContractAccesses(_) => Some((ChunkContractAccesses, 1)), - RoutedMessageBody::ChunkContractDeployments(_) => Some((ChunkContractDeployments, 1)), RoutedMessageBody::ContractCodeRequest(_) => Some((ContractCodeRequest, 1)), RoutedMessageBody::ContractCodeResponse(_) => Some((ContractCodeResponse, 1)), + RoutedMessageBody::PartialEncodedContractDeploys(_) => { + Some((PartialEncodedContractDeploys, 1)) + } RoutedMessageBody::VersionedChunkEndorsement(_) => Some((ChunkEndorsement, 1)), RoutedMessageBody::_UnusedEpochSyncRequest => None, RoutedMessageBody::_UnusedEpochSyncResponse(_) => None, diff --git a/chain/network/src/state_witness.rs b/chain/network/src/state_witness.rs index e92c2e2cca0..227561944cf 100644 --- a/chain/network/src/state_witness.rs +++ b/chain/network/src/state_witness.rs @@ -1,7 +1,7 @@ use near_async::messaging::Sender; use near_async::{MultiSend, MultiSendMessage, MultiSenderFrom}; use near_primitives::stateless_validation::contract_distribution::{ - ChunkContractAccesses, ChunkContractDeployments, ContractCodeRequest, ContractCodeResponse, + ChunkContractAccesses, ContractCodeRequest, ContractCodeResponse, PartialEncodedContractDeploys, }; use near_primitives::stateless_validation::partial_witness::PartialEncodedStateWitness; use near_primitives::stateless_validation::state_witness::ChunkStateWitnessAck; @@ -24,11 +24,10 @@ pub struct PartialEncodedStateWitnessForwardMessage(pub PartialEncodedStateWitne #[rtype(result = "()")] pub struct ChunkContractAccessesMessage(pub ChunkContractAccesses); -/// Message to partial witness actor (on a chunk validator) that contains code-hashes of -/// the contracts that are deployed when applying the previous chunk. +/// Message to partial witness actor that contains part of code for newly-deployed contracts. #[derive(actix::Message, Clone, Debug, PartialEq, Eq)] #[rtype(result = "()")] -pub struct ChunkContractDeploymentsMessage(pub ChunkContractDeployments); +pub struct PartialEncodedContractDeploysMessage(pub PartialEncodedContractDeploys); /// Message to partial witness actor (on a chunk producer) that requests contract code /// by their code hashes. @@ -50,7 +49,7 @@ pub struct PartialWitnessSenderForNetwork { pub partial_encoded_state_witness: Sender, pub partial_encoded_state_witness_forward: Sender, pub chunk_contract_accesses: Sender, - pub chunk_contract_deploys: Sender, pub contract_code_request: Sender, pub contract_code_response: Sender, + pub partial_encoded_contract_deploys: Sender, } diff --git a/chain/network/src/test_loop.rs b/chain/network/src/test_loop.rs index ba23602db1a..8059fae5457 100644 --- a/chain/network/src/test_loop.rs +++ b/chain/network/src/test_loop.rs @@ -9,8 +9,9 @@ use crate::client::{ use crate::shards_manager::ShardsManagerRequestFromNetwork; use crate::state_witness::{ ChunkContractAccessesMessage, ChunkStateWitnessAckMessage, ContractCodeRequestMessage, - ContractCodeResponseMessage, PartialEncodedStateWitnessForwardMessage, - PartialEncodedStateWitnessMessage, PartialWitnessSenderForNetwork, + ContractCodeResponseMessage, PartialEncodedContractDeploysMessage, + PartialEncodedStateWitnessForwardMessage, PartialEncodedStateWitnessMessage, + PartialWitnessSenderForNetwork, }; use crate::types::{ NetworkRequests, NetworkResponses, PeerManagerMessageRequest, PeerManagerMessageResponse, @@ -361,12 +362,12 @@ fn network_message_to_partial_witness_handler( } None } - NetworkRequests::ChunkContractAccesses(chunk_validators, contract_accesses) => { + NetworkRequests::ChunkContractAccesses(chunk_validators, accesses) => { for target in chunk_validators { shared_state .senders_for_account(&target) .partial_witness_sender - .send(ChunkContractAccessesMessage(contract_accesses.clone())); + .send(ChunkContractAccessesMessage(accesses.clone())); } None } @@ -384,6 +385,15 @@ fn network_message_to_partial_witness_handler( .send(ContractCodeResponseMessage(response)); None } + NetworkRequests::PartialEncodedContractDeploys(accounts, deploys) => { + for account in accounts { + shared_state + .senders_for_account(&account) + .partial_witness_sender + .send(PartialEncodedContractDeploysMessage(deploys.clone())); + } + None + } _ => Some(request), }) } diff --git a/chain/network/src/types.rs b/chain/network/src/types.rs index 1f9df339e89..830b3e4625b 100644 --- a/chain/network/src/types.rs +++ b/chain/network/src/types.rs @@ -22,7 +22,7 @@ use near_primitives::network::{AnnounceAccount, PeerId}; use near_primitives::sharding::PartialEncodedChunkWithArcReceipts; use near_primitives::stateless_validation::chunk_endorsement::ChunkEndorsement; use near_primitives::stateless_validation::contract_distribution::{ - ChunkContractAccesses, ChunkContractDeployments, ContractCodeRequest, ContractCodeResponse, + ChunkContractAccesses, ContractCodeRequest, ContractCodeResponse, PartialEncodedContractDeploys, }; use near_primitives::stateless_validation::partial_witness::PartialEncodedStateWitness; use near_primitives::stateless_validation::state_witness::ChunkStateWitnessAck; @@ -298,15 +298,15 @@ pub enum NetworkRequests { /// Message from chunk producer to chunk validators containing the code-hashes of contracts /// accessed for the main state transition in the witness. ChunkContractAccesses(Vec, ChunkContractAccesses), - /// Message from chunk producer to other validators containing the code-hashes of contracts - /// deployed for the main state transition in the witness. - ChunkContractDeployments(Vec, ChunkContractDeployments), /// Message from chunk validator to chunk producer to request missing contract code. /// This message is currently sent as a result of receiving the ChunkContractAccesses message /// and failing to find the corresponding code for the hashes received. ContractCodeRequest(AccountId, ContractCodeRequest), /// Message from chunk producer to chunk validators to send the contract code as response to ContractCodeRequest. ContractCodeResponse(AccountId, ContractCodeResponse), + /// Message originates from the chunk producer and distributed among other validators, + /// containing the code of the newly-deployed contracts during the main state transition of the witness. + PartialEncodedContractDeploys(Vec, PartialEncodedContractDeploys), } #[derive(Debug, actix::Message, strum::IntoStaticStr)] diff --git a/core/primitives/src/stateless_validation/contract_distribution.rs b/core/primitives/src/stateless_validation/contract_distribution.rs index 55bae7148ce..ddaab644eae 100644 --- a/core/primitives/src/stateless_validation/contract_distribution.rs +++ b/core/primitives/src/stateless_validation/contract_distribution.rs @@ -95,79 +95,6 @@ impl ChunkContractAccessesInner { } } -// Data structures for chunk producers to send deployed contracts to chunk validators. - -/// Contains contracts (as code-hashes) deployed during the application of a chunk. -/// This is used by the chunk producer to let other validators know about which contracts -/// could be needed for validating a witness in the future, so that the validators can request missing code. -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] -pub enum ChunkContractDeployments { - V1(ChunkContractDeploymentsV1), -} - -impl ChunkContractDeployments { - pub fn new( - next_chunk: ChunkProductionKey, - contracts: HashSet, - signer: &ValidatorSigner, - ) -> Self { - Self::V1(ChunkContractDeploymentsV1::new(next_chunk, contracts, signer)) - } - - pub fn contracts(&self) -> &[CodeHash] { - match self { - Self::V1(deploys) => &deploys.inner.contracts, - } - } - - pub fn chunk_production_key(&self) -> &ChunkProductionKey { - match self { - Self::V1(deploys) => &deploys.inner.next_chunk, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] -pub struct ChunkContractDeploymentsV1 { - inner: ChunkContractDeploymentsInner, - /// Signature of the inner, signed by the chunk producer of the next chunk. - signature: Signature, -} - -impl ChunkContractDeploymentsV1 { - fn new( - next_chunk: ChunkProductionKey, - contracts: HashSet, - signer: &ValidatorSigner, - ) -> Self { - let inner = ChunkContractDeploymentsInner::new(next_chunk, contracts); - let signature = signer.sign_chunk_contract_deployments(&inner); - Self { inner, signature } - } -} - -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] -pub struct ChunkContractDeploymentsInner { - /// Production metadata of the chunk created after the chunk the deployments belong to. - /// We associate this message with the next-chunk info because this message is generated - /// and distributed while generating the state-witness of the next chunk - /// (by the chunk producer of the next chunk). - next_chunk: ChunkProductionKey, - /// List of code-hashes for the contracts accessed. - contracts: Vec, - signature_differentiator: SignatureDifferentiator, -} - -impl ChunkContractDeploymentsInner { - fn new(next_chunk: ChunkProductionKey, contracts: HashSet) -> Self { - Self { - next_chunk, - contracts: contracts.into_iter().collect(), - signature_differentiator: "ChunkContractDeploymentsInner".to_owned(), - } - } -} - // Data structures for chunk validators to request contract code from chunk producers. /// Message to request missing code for a set of contracts. @@ -393,6 +320,8 @@ pub struct ContractUpdates { pub contract_deploys: HashSet, } +// Data structures for chunk producers to send deployed contracts to chunk validators. + #[derive(Clone, BorshSerialize, BorshDeserialize, ProtocolSchema)] pub struct ChunkContractDeploys { compressed_contracts: CompressedContractCode, @@ -412,7 +341,7 @@ impl ChunkContractDeploys { impl ReedSolomonEncoderSerialize for ChunkContractDeploys {} impl ReedSolomonEncoderDeserialize for ChunkContractDeploys {} -#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)] +#[derive(Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] pub enum PartialEncodedContractDeploys { V1(PartialEncodedContractDeploysV1), } @@ -451,7 +380,7 @@ impl Into<(ChunkProductionKey, PartialEncodedContractDeploysPart)> } } -#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)] +#[derive(Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] pub struct PartialEncodedContractDeploysV1 { inner: PartialEncodedContractDeploysInner, signature: Signature, @@ -469,7 +398,7 @@ impl PartialEncodedContractDeploysV1 { } } -#[derive(Clone, BorshSerialize, BorshDeserialize, ProtocolSchema)] +#[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] pub struct PartialEncodedContractDeploysPart { pub part_ord: usize, pub data: Box<[u8]>, @@ -486,7 +415,7 @@ impl std::fmt::Debug for PartialEncodedContractDeploysPart { } } -#[derive(Clone, Debug, BorshSerialize, BorshDeserialize, ProtocolSchema)] +#[derive(Clone, Debug, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] pub struct PartialEncodedContractDeploysInner { next_chunk: ChunkProductionKey, part: PartialEncodedContractDeploysPart, diff --git a/core/primitives/src/validator_signer.rs b/core/primitives/src/validator_signer.rs index 8aa53daa347..2857c450ded 100644 --- a/core/primitives/src/validator_signer.rs +++ b/core/primitives/src/validator_signer.rs @@ -13,8 +13,8 @@ use crate::stateless_validation::chunk_endorsement::{ ChunkEndorsementInner, ChunkEndorsementMetadata, }; use crate::stateless_validation::contract_distribution::{ - ChunkContractAccessesInner, ChunkContractDeploymentsInner, ContractCodeRequestInner, - ContractCodeResponseInner, PartialEncodedContractDeploysInner, + ChunkContractAccessesInner, ContractCodeRequestInner, ContractCodeResponseInner, + PartialEncodedContractDeploysInner, }; use crate::stateless_validation::partial_witness::PartialEncodedStateWitnessInner; use crate::stateless_validation::state_witness::EncodedChunkStateWitness; @@ -158,17 +158,6 @@ impl ValidatorSigner { } } - /// Signs the inner contents of a ChunkContractDeployments message. - pub fn sign_chunk_contract_deployments( - &self, - inner: &ChunkContractDeploymentsInner, - ) -> Signature { - match self { - ValidatorSigner::Empty(signer) => signer.sign_chunk_contract_deployments(inner), - ValidatorSigner::InMemory(signer) => signer.sign_chunk_contract_deployments(inner), - } - } - pub fn sign_partial_encoded_contract_deploys( &self, inner: &PartialEncodedContractDeploysInner, @@ -329,10 +318,6 @@ impl EmptyValidatorSigner { Signature::default() } - fn sign_chunk_contract_deployments(&self, _inner: &ChunkContractDeploymentsInner) -> Signature { - Signature::default() - } - fn sign_partial_encoded_contract_deploys( &self, _inner: &PartialEncodedContractDeploysInner, @@ -456,10 +441,6 @@ impl InMemoryValidatorSigner { self.signer.sign(&borsh::to_vec(inner).unwrap()) } - fn sign_chunk_contract_deployments(&self, inner: &ChunkContractDeploymentsInner) -> Signature { - self.signer.sign(&borsh::to_vec(inner).unwrap()) - } - fn sign_partial_encoded_contract_deploys( &self, inner: &PartialEncodedContractDeploysInner, diff --git a/pytest/lib/messages/block.py b/pytest/lib/messages/block.py index b04516cec3e..6ebe82c8b81 100644 --- a/pytest/lib/messages/block.py +++ b/pytest/lib/messages/block.py @@ -376,15 +376,19 @@ class ChunkContractAccessesInner: pass -class ChunkContractDeployments: +class PartialEncodedContractDeploys: pass -class ChunkContractDeploymentsV1: +class PartialEncodedContractDeploysV1: pass -class ChunkContractDeploymentsInner: +class PartialEncodedContractDeploysInner: + pass + + +class PartialEncodedContractDeploysPart: pass @@ -1214,33 +1218,44 @@ class ChunkProductionKey: } ], [ - ChunkContractDeployments, { + PartialEncodedContractDeploys, { 'kind': 'enum', 'field': 'enum', - 'values': [['V1', ChunkContractDeploymentsV1],] + 'values': [['V1', PartialEncodedContractDeploysV1],] } ], [ - ChunkContractDeploymentsV1, { + PartialEncodedContractDeploysV1, { 'kind': 'struct', 'fields': [ - ['inner', ChunkContractDeploymentsInner], + ['inner', PartialEncodedContractDeploysInner], ['signature', Signature], ] } ], [ - ChunkContractDeploymentsInner, { + PartialEncodedContractDeploysInner, { 'kind': 'struct', 'fields': [ ['next_chunk', ChunkProductionKey], - ['contracts', [[32]]], + ['part', PartialEncodedContractDeploysPart], ['signature_differentiator', SignatureDifferentiator], ] } ], + [ + PartialEncodedContractDeploysPart, { + 'kind': + 'struct', + 'fields': [ + ['part_ord', 'u64'], + ['data', ['u8']], + ['encoded_length', 'u64'], + ] + } + ], [ ContractCodeRequest, { 'kind': 'enum', diff --git a/pytest/lib/messages/network.py b/pytest/lib/messages/network.py index 5d17333d5e2..75e00e387cd 100644 --- a/pytest/lib/messages/network.py +++ b/pytest/lib/messages/network.py @@ -1,6 +1,6 @@ from messages.crypto import Signature, PublicKey, MerklePath, ShardProof from messages.tx import SignedTransaction, Receipt -from messages.block import Block, Approval, PartialEncodedChunk, PartialEncodedChunkRequestMsg, PartialEncodedChunkResponseMsg, PartialEncodedChunkForwardMsg, BlockHeader, ShardChunk, ShardChunkHeader, ShardChunkHeaderV1, ChunkEndorsement, ChunkEndorsementV1, ChunkStateWitnessAck, PartialEncodedStateWitness, ChunkContractAccesses, ChunkContractDeployments, ContractCodeRequest, ContractCodeResponse +from messages.block import Block, Approval, PartialEncodedChunk, PartialEncodedChunkRequestMsg, PartialEncodedChunkResponseMsg, PartialEncodedChunkForwardMsg, BlockHeader, ShardChunk, ShardChunkHeader, ShardChunkHeaderV1, ChunkEndorsement, ChunkEndorsementV1, ChunkStateWitnessAck, PartialEncodedStateWitness, ChunkContractAccesses, PartialEncodedContractDeploys, ContractCodeRequest, ContractCodeResponse from messages.shard import StateRootNode @@ -398,9 +398,12 @@ class AdvertisedPeerDistance: ], ['VersionedChunkEndorsement', ChunkEndorsement], ['ChunkContractAccesses', ChunkContractAccesses], - ['ChunkContractDeployments', ChunkContractDeployments], ['ContractCodeRequest', ContractCodeRequest], ['ContractCodeResponse', ContractCodeResponse], + [ + 'PartialEncodedContractDeploys', + PartialEncodedContractDeploys + ], ] } ], diff --git a/tools/protocol-schema-check/res/protocol_schema.toml b/tools/protocol-schema-check/res/protocol_schema.toml index eed9c1d2489..fe50b5ac8af 100644 --- a/tools/protocol-schema-check/res/protocol_schema.toml +++ b/tools/protocol-schema-check/res/protocol_schema.toml @@ -56,9 +56,6 @@ ChallengeBody = 2988613810 ChunkContractAccesses = 4097831706 ChunkContractAccessesInner = 2563086819 ChunkContractAccessesV1 = 2405344532 -ChunkContractDeployments = 599007946 -ChunkContractDeploymentsInner = 308114150 -ChunkContractDeploymentsV1 = 3752738828 ChunkContractDeploys = 414270153 ChunkEndorsement = 1294072929 ChunkEndorsementInner = 2425301775 @@ -179,7 +176,7 @@ PeerChainInfoV2 = 1260985250 PeerId = 2447445523 PeerIdOrHash = 4080492546 PeerInfo = 3831734408 -PeerMessage = 4064649473 +PeerMessage = 1307004637 Ping = 2783493472 Pong = 3159638327 PrepareError = 4009037507 @@ -204,8 +201,8 @@ ReceiptV1 = 2994842769 ReceiptValidationError = 551721215 ReceivedData = 3601438283 RootProof = 3135729669 -RoutedMessage = 2449624716 -RoutedMessageBody = 22919225 +RoutedMessage = 1746588970 +RoutedMessageBody = 248291540 RoutingTableUpdate = 2987752645 Secp256K1PublicKey = 4117078281 Secp256K1Signature = 3687154735 From ebb8b8b81c7c229b196131a2500eb505d163d843 Mon Sep 17 00:00:00 2001 From: Jan Malinowski <149345204+jancionear@users.noreply.github.com> Date: Wed, 30 Oct 2024 15:27:03 +0000 Subject: [PATCH 12/13] Add information about 2.2.1 to CHANGELOG.md (#12097) --- CHANGELOG.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a3b17da560..a1000f77614 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,23 @@ * **Archival nodes only:** Stop saving partial chunks to `PartialChunks` column in the Cold DB. Instead, archival nodes will reconstruct partial chunks from the `Chunks` column. * Decentralized state sync: Before, nodes that needed to download state (either because they're several epochs behind the chain or because they're going to start producing chunks for a shard they don't currently track) would download them from a centralized GCS bucket. Now, nodes will attempt to download pieces of the state from peers in the network, and only fallback to downloading from GCS if that fails. Please note that in order to participate in providing state parts to peers, your node may generate snapshots of the state. These snapshots should not take too much space, since they're hard links to database files that get cleaned up on every epoch. +### 2.2.1 + +This release patches a bug found in the 2.2.0 release + +# Non-protocol changes +There was a bug in the integration between ethereum implicit accounts and the compiled contract cache which sometimes caused the nodes to get stuck. This would most often happen during state sync, but could also happen by itself. Please update your nodes to avoid getting stuck. + +A node that hits this bug will print an error about an `InvalidStateRoot` in the logs and then it'll be unable to sync. +It's possible to recover a stalled node by clearing the compiled contract cache and rolling back one block: +1. Stop the neard process +2. Download the new version of neard +3. Clear the compiled contract cache: rm -rf ~/.near/data/contracts +4. Undo the last block: ./neard undo-block +5. Start neard + +After that the node should be able to recover and sync with the rest of the network. + ### 2.2.0 ### Protocol Changes From 8e30ccdd425ecbbeeec8d96bfc9a7e02bc35c2d3 Mon Sep 17 00:00:00 2001 From: Marcelo Diop-Gonzalez Date: Wed, 30 Oct 2024 12:24:46 -0400 Subject: [PATCH 13/13] docs(state-sync): document the the fields of the state sync header (#12063) This part is a little light on comments, so it'll be helpful to add comments explaining what these fields are, so that when we make changes to state sync, we can be on the same page about what we're changing --- core/primitives/src/state_sync.rs | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/core/primitives/src/state_sync.rs b/core/primitives/src/state_sync.rs index 58f5d24bf94..5156559c9d7 100644 --- a/core/primitives/src/state_sync.rs +++ b/core/primitives/src/state_sync.rs @@ -32,14 +32,45 @@ pub struct ShardStateSyncResponseHeaderV1 { pub state_root_node: StateRootNode, } +/// Let B[h] be the block with hash h. +/// Let shard_id be the shard ID of the shard this header is meant for +/// As a shorthand,let B_sync = B[sync_hash], B_prev = B[B_sync.prev_hash] +/// +/// Also let B_chunk be the block with height B_prev.chunks[shard_id].height_included +/// that is an ancestor of B_sync. So, the last block with a new chunk before B_sync. +/// And let B_prev_chunk = B[B_chunk.prev_hash]. So, the block before the last block with a new chunk before B_sync. +/// +/// Given these definitiions, the meaning of fields are explained below. #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, ProtocolSchema)] pub struct ShardStateSyncResponseHeaderV2 { + /// The chunk whose header in included as B_prev.chunks[shard_id] + /// This chunk will be applied after downloading state pub chunk: ShardChunk, + /// A merkle path for (Self::chunk.hash, Self::chunk.height_included), verifiable + /// against B_prev.chunk_headers_root pub chunk_proof: MerklePath, + /// This is None if sync_hash is the genesis hash. Otherwise, it's B_prev_chunk.chunks[shard_id] pub prev_chunk_header: Option, + /// A merkle path for (Self::prev_chunk_header.hash, Self::prev_chunk_header.height_included), verifiable + /// against B_prev_chunk.chunk_headers_root pub prev_chunk_proof: Option, + /// This field contains the incoming receipts for shard_id for B_sync and B_prev_chunk. + /// So, this field has at most two elements. + /// These receipts are used to apply `chunk` after downloading state pub incoming_receipts_proofs: Vec, + /// This field contains the info necessary to verify that the receipt proofs in Self::incoming_receipts_proofs + /// are actually the ones referenced on chain + /// + /// The length of this field is the same as the length of Self::incoming_receipts_proofs, and elements + /// of the two at a given index are taken together for verification. For a given index i, + /// root_proofs[i] is a vector of the same length as incoming_receipts_proofs[i].1 , which itself is a + /// vector of receipt proofs for all "from_shard_ids" that sent receipts to shard_id. root_proofs[i][j] + /// contains a merkle root equal to the prev_outgoing_receipts_root field of the corresponding chunk + /// included in the block with hash incoming_receipts_proofs[i].0, and a merkle path to verify it against + /// that block's prev_chunk_outgoing_receipts_root field. pub root_proofs: Vec>, + /// The state root with hash equal to B_prev.chunks[shard_id].prev_state_root. + /// That is, the state root node of the trie before applying the chunks in B_prev pub state_root_node: StateRootNode, }