From 61c32617cd02156d920c7c89b7b6fa8a79ddc799 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Mon, 6 Feb 2023 06:46:04 +0000 Subject: [PATCH 01/14] trivial: StateView::get_state_value_bytes in preparation for exposing StateValue --- api/src/context.rs | 2 +- api/src/state.rs | 6 +++--- aptos-move/aptos-aggregator/src/aggregator_extension.rs | 4 ++-- aptos-move/aptos-aggregator/src/delta_change_set.rs | 4 ++-- .../src/aptos_test_harness.rs | 6 +++--- aptos-move/aptos-validator-interface/src/lib.rs | 2 +- aptos-move/aptos-vm/src/aptos_vm.rs | 2 +- aptos-move/aptos-vm/src/data_cache.rs | 4 ++-- aptos-move/aptos-vm/src/delta_state_view.rs | 4 ++-- aptos-move/block-executor/src/output_delta_resolver.rs | 2 +- aptos-move/block-executor/src/proptest_types/types.rs | 6 +++--- aptos-move/block-executor/src/view.rs | 8 ++++---- aptos-move/e2e-tests/src/data_store.rs | 2 +- aptos-move/e2e-tests/src/executor.rs | 9 +++++---- aptos-move/vm-genesis/src/genesis_context.rs | 2 +- execution/executor-benchmark/src/fake_executor.rs | 2 +- execution/executor/src/db_bootstrapper.rs | 4 ++-- execution/executor/src/mock_vm/mock_vm_test.rs | 2 +- execution/executor/src/mock_vm/mod.rs | 4 ++-- storage/state-view/src/account_with_state_view.rs | 2 +- storage/state-view/src/lib.rs | 6 +++--- storage/storage-interface/src/cached_state_view.rs | 8 ++++---- storage/storage-interface/src/state_view.rs | 2 +- 23 files changed, 47 insertions(+), 46 deletions(-) diff --git a/api/src/context.rs b/api/src/context.rs index e98f3bb040573..ee169e5a6657a 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -238,7 +238,7 @@ impl Context { pub fn get_state_value(&self, state_key: &StateKey, version: u64) -> Result>> { self.db .state_view_at_version(Some(version))? - .get_state_value(state_key) + .get_state_value_bytes(state_key) } pub fn get_state_value_poem( diff --git a/api/src/state.rs b/api/src/state.rs index 6f33b23974221..86efc33eb7230 100644 --- a/api/src/state.rs +++ b/api/src/state.rs @@ -305,7 +305,7 @@ impl StateApi { let (ledger_info, ledger_version, state_view) = self.preprocess_request(ledger_version.map(|inner| inner.0))?; let bytes = state_view - .get_state_value(&state_key) + .get_state_value_bytes(&state_key) .context(format!("Failed to query DB to check for {:?}", state_key)) .map_err(|err| { BasicErrorWith404::internal_with_code( @@ -392,7 +392,7 @@ impl StateApi { // Retrieve value from the state key let state_key = StateKey::table_item(TableHandle(table_handle.into()), raw_key); let bytes = state_view - .get_state_value(&state_key) + .get_state_value_bytes(&state_key) .context(format!( "Failed when trying to retrieve table item from the DB with key: {}", key @@ -446,7 +446,7 @@ impl StateApi { table_item_request.key.0.clone(), ); let bytes = state_view - .get_state_value(&state_key) + .get_state_value_bytes(&state_key) .context(format!( "Failed when trying to retrieve table item from the DB with key: {}", table_item_request.key, diff --git a/aptos-move/aptos-aggregator/src/aggregator_extension.rs b/aptos-move/aptos-aggregator/src/aggregator_extension.rs index e4e738f19379b..5d9d7123a0ef7 100644 --- a/aptos-move/aptos-aggregator/src/aggregator_extension.rs +++ b/aptos-move/aptos-aggregator/src/aggregator_extension.rs @@ -394,7 +394,7 @@ mod test { impl TStateView for FakeTestStorage { type Key = StateKey; - fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> anyhow::Result>> { Ok(self.data.get(state_key).cloned()) } @@ -414,7 +414,7 @@ mod test { key: &[u8], ) -> Result>, anyhow::Error> { let state_key = StateKey::table_item(AptosTableHandle::from(*handle), key.to_vec()); - self.get_state_value(&state_key) + self.get_state_value_bytes(&state_key) } } diff --git a/aptos-move/aptos-aggregator/src/delta_change_set.rs b/aptos-move/aptos-aggregator/src/delta_change_set.rs index c7327bc72d1fd..bf0b749101f1f 100644 --- a/aptos-move/aptos-aggregator/src/delta_change_set.rs +++ b/aptos-move/aptos-aggregator/src/delta_change_set.rs @@ -170,7 +170,7 @@ impl DeltaOp { state_key: &StateKey, ) -> anyhow::Result { state_view - .get_state_value(state_key) + .get_state_value_bytes(state_key) .map_err(|_| VMStatus::Error(StatusCode::STORAGE_ERROR)) .and_then(|maybe_bytes| { match maybe_bytes { @@ -540,7 +540,7 @@ mod tests { impl TStateView for FakeView { type Key = StateKey; - fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> anyhow::Result>> { Ok(self.data.get(state_key).cloned()) } diff --git a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs index ea28127e9da86..f55d6327a07c1 100644 --- a/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs +++ b/aptos-move/aptos-transactional-test-harness/src/aptos_test_harness.rs @@ -368,7 +368,7 @@ impl<'a> AptosTestAdapter<'a> { AccessPath::resource_access_path(*signer_addr, AccountResource::struct_tag()); let account_blob = self .storage - .get_state_value(&StateKey::access_path(account_access_path)) + .get_state_value_bytes(&StateKey::access_path(account_access_path)) .unwrap() .ok_or_else(|| { format_err!( @@ -388,7 +388,7 @@ impl<'a> AptosTestAdapter<'a> { let balance_blob = self .storage - .get_state_value(&StateKey::access_path(coin_access_path)) + .get_state_value_bytes(&StateKey::access_path(coin_access_path)) .unwrap() .ok_or_else(|| { format_err!( @@ -895,7 +895,7 @@ impl<'a> MoveTestAdapter<'a> for AptosTestAdapter<'a> { let bytes = self .storage - .get_state_value(&state_key) + .get_state_value_bytes(&state_key) .unwrap() .ok_or_else(|| format_err!("Failed to fetch table item.",))?; diff --git a/aptos-move/aptos-validator-interface/src/lib.rs b/aptos-move/aptos-validator-interface/src/lib.rs index 8e905d821e8c4..62aceaadd363f 100644 --- a/aptos-move/aptos-validator-interface/src/lib.rs +++ b/aptos-move/aptos-validator-interface/src/lib.rs @@ -186,7 +186,7 @@ impl DebuggerStateView { impl TStateView for DebuggerStateView { type Key = StateKey; - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { self.get_state_value_internal(state_key, self.version) } diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 770e188c6b714..ebc6dd13273e0 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -867,7 +867,7 @@ impl AptosVM { // access path that the write set is going to update. for (state_key, _) in write_set.iter() { state_view - .get_state_value(state_key) + .get_state_value_bytes(state_key) .map_err(|_| VMStatus::Error(StatusCode::STORAGE_ERROR))?; } Ok(()) diff --git a/aptos-move/aptos-vm/src/data_cache.rs b/aptos-move/aptos-vm/src/data_cache.rs index 60a1a6005edff..68c7fd1d0977b 100644 --- a/aptos-move/aptos-vm/src/data_cache.rs +++ b/aptos-move/aptos-vm/src/data_cache.rs @@ -109,7 +109,7 @@ impl<'a, S: StateView> StorageAdapter<'a, S> { pub fn get(&self, access_path: AccessPath) -> PartialVMResult>> { self.0 - .get_state_value(&StateKey::access_path(access_path)) + .get_state_value_bytes(&StateKey::access_path(access_path)) .map_err(|_| PartialVMError::new(StatusCode::STORAGE_ERROR)) } } @@ -168,7 +168,7 @@ impl<'a, S: StateView> TableResolver for StorageAdapter<'a, S> { handle: &TableHandle, key: &[u8], ) -> Result>, Error> { - self.get_state_value(&StateKey::table_item((*handle).into(), key.to_vec())) + self.get_state_value_bytes(&StateKey::table_item((*handle).into(), key.to_vec())) } } diff --git a/aptos-move/aptos-vm/src/delta_state_view.rs b/aptos-move/aptos-vm/src/delta_state_view.rs index 2b0ca0df6f12b..9b726ab7c8b34 100644 --- a/aptos-move/aptos-vm/src/delta_state_view.rs +++ b/aptos-move/aptos-vm/src/delta_state_view.rs @@ -29,11 +29,11 @@ where self.base.id() } - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { match self.write_set.get(state_key) { Some(WriteOp::Creation(data) | WriteOp::Modification(data)) => Ok(Some(data.clone())), Some(WriteOp::Deletion) => Ok(None), - None => self.base.get_state_value(state_key), + None => self.base.get_state_value_bytes(state_key), } } diff --git a/aptos-move/block-executor/src/output_delta_resolver.rs b/aptos-move/block-executor/src/output_delta_resolver.rs index 3f3b5e057b607..dc284a879d36f 100644 --- a/aptos-move/block-executor/src/output_delta_resolver.rs +++ b/aptos-move/block-executor/src/output_delta_resolver.rs @@ -29,7 +29,7 @@ impl OutputDeltaResolver { // TODO: with more deltas, re-use executor threads and process in parallel. for key in self.versioned_outputs.aggregator_keys() { let mut latest_value: Option = base_view - .get_state_value(&key) + .get_state_value_bytes(&key) .ok() // Was anything found in storage .and_then(|value| value.map(|bytes| deserialize(&bytes))); diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index 351a329e9bba5..88af6221c6250 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -51,7 +51,7 @@ where type Key = K; /// Gets the state value for a given state key. - fn get_state_value(&self, _: &K) -> anyhow::Result>> { + fn get_state_value_bytes(&self, _: &K) -> anyhow::Result>> { // When aggregator value has to be resolved from storage, pretend it is 100. Ok(Some(serialize(&STORAGE_AGGREGATOR_VALUE))) } @@ -81,7 +81,7 @@ where type Key = K; /// Gets the state value for a given state key. - fn get_state_value(&self, _: &K) -> anyhow::Result>> { + fn get_state_value_bytes(&self, _: &K) -> anyhow::Result>> { Ok(None) } @@ -441,7 +441,7 @@ where let mut reads_result = vec![]; for k in reads[read_idx].iter() { // TODO: later test errors as well? (by fixing state_view behavior). - reads_result.push(view.get_state_value(k).unwrap()); + reads_result.push(view.get_state_value_bytes(k).unwrap()); } ExecutionStatus::Success(Output( writes_and_deltas[write_idx].0.clone(), diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index 575126d761199..e9103f2710dc3 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -182,7 +182,7 @@ impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { impl<'a, T: Transaction, S: TStateView> TStateView for LatestView<'a, T, S> { type Key = T::Key; - fn get_state_value(&self, state_key: &T::Key) -> anyhow::Result>> { + fn get_state_value_bytes(&self, state_key: &T::Key) -> anyhow::Result>> { match self.latest_view { ViewMapKind::MultiVersion(map) => match map.read(state_key, self.txn_idx) { ReadResult::Value(v) => Ok(v.extract_raw_bytes()), @@ -190,7 +190,7 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< ReadResult::Unresolved(delta) => { let from_storage = self .base_view - .get_state_value(state_key)? + .get_state_value_bytes(state_key)? .map_or(Err(VMStatus::Error(StatusCode::STORAGE_ERROR)), |bytes| { Ok(deserialize(&bytes)) })?; @@ -199,12 +199,12 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< .map_err(|pe| pe.finish(Location::Undefined).into_vm_status())?; Ok(Some(serialize(&result))) }, - ReadResult::None => self.base_view.get_state_value(state_key), + ReadResult::None => self.base_view.get_state_value_bytes(state_key), }, ViewMapKind::BTree(map) => map.get(state_key).map_or_else( || { // let ret = - self.base_view.get_state_value(state_key) + self.base_view.get_state_value_bytes(state_key) // TODO: common treatment with the above case. // TODO: enable below when logging isn't a circular dependency. diff --git a/aptos-move/e2e-tests/src/data_store.rs b/aptos-move/e2e-tests/src/data_store.rs index 9c6527aff7714..6639c9fa1b466 100644 --- a/aptos-move/e2e-tests/src/data_store.rs +++ b/aptos-move/e2e-tests/src/data_store.rs @@ -106,7 +106,7 @@ impl FakeDataStore { impl TStateView for FakeDataStore { type Key = StateKey; - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { Ok(self.state_data.get(state_key).cloned()) } diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index 2cc65a31950a8..9b232d57f2b33 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -289,9 +289,10 @@ impl FakeExecutor { pub fn read_resource(&self, addr: &AccountAddress) -> Option { let ap = AccessPath::resource_access_path(*addr, T::struct_tag()); - let data_blob = TStateView::get_state_value(&self.data_store, &StateKey::access_path(ap)) - .expect("account must exist in data store") - .unwrap_or_else(|| panic!("Can't fetch {} resource for {}", T::STRUCT_NAME, addr)); + let data_blob = + TStateView::get_state_value_bytes(&self.data_store, &StateKey::access_path(ap)) + .expect("account must exist in data store") + .unwrap_or_else(|| panic!("Can't fetch {} resource for {}", T::STRUCT_NAME, addr)); bcs::from_bytes(data_blob.as_slice()).ok() } @@ -465,7 +466,7 @@ impl FakeExecutor { /// Get the blob for the associated AccessPath pub fn read_state_value(&self, state_key: &StateKey) -> Option> { - TStateView::get_state_value(&self.data_store, state_key).unwrap() + TStateView::get_state_value_bytes(&self.data_store, state_key).unwrap() } /// Set the blob for the associated AccessPath diff --git a/aptos-move/vm-genesis/src/genesis_context.rs b/aptos-move/vm-genesis/src/genesis_context.rs index 42d395f3a5903..440bc3e9f276c 100644 --- a/aptos-move/vm-genesis/src/genesis_context.rs +++ b/aptos-move/vm-genesis/src/genesis_context.rs @@ -35,7 +35,7 @@ impl GenesisStateView { impl TStateView for GenesisStateView { type Key = StateKey; - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { Ok(self.state_data.get(state_key).cloned()) } diff --git a/execution/executor-benchmark/src/fake_executor.rs b/execution/executor-benchmark/src/fake_executor.rs index 5eaf7a22dca08..176d311a96af6 100644 --- a/execution/executor-benchmark/src/fake_executor.rs +++ b/execution/executor-benchmark/src/fake_executor.rs @@ -172,7 +172,7 @@ impl FakeExecutor { state_view: &CachedStateView, ) -> Result> { let value = state_view - .get_state_value(state_key)? + .get_state_value_bytes(state_key)? .map(move |value| bcs::from_bytes(value.as_slice())); value.transpose().map_err(anyhow::Error::msg) } diff --git a/execution/executor/src/db_bootstrapper.rs b/execution/executor/src/db_bootstrapper.rs index 0a9cdbdb4bb08..13dffbd81fa3d 100644 --- a/execution/executor/src/db_bootstrapper.rs +++ b/execution/executor/src/db_bootstrapper.rs @@ -197,7 +197,7 @@ pub fn calculate_genesis( fn get_state_timestamp(state_view: &CachedStateView) -> Result { let rsrc_bytes = &state_view - .get_state_value(&StateKey::access_path(AccessPath::new( + .get_state_value_bytes(&StateKey::access_path(AccessPath::new( CORE_CODE_ADDRESS, TimestampResource::resource_path(), )))? @@ -208,7 +208,7 @@ fn get_state_timestamp(state_view: &CachedStateView) -> Result { fn get_state_epoch(state_view: &CachedStateView) -> Result { let rsrc_bytes = &state_view - .get_state_value(&StateKey::access_path(AccessPath::new( + .get_state_value_bytes(&StateKey::access_path(AccessPath::new( CORE_CODE_ADDRESS, ConfigurationResource::resource_path(), )))? diff --git a/execution/executor/src/mock_vm/mock_vm_test.rs b/execution/executor/src/mock_vm/mock_vm_test.rs index 598070ed0654f..c0851f6ee0d1e 100644 --- a/execution/executor/src/mock_vm/mock_vm_test.rs +++ b/execution/executor/src/mock_vm/mock_vm_test.rs @@ -21,7 +21,7 @@ struct MockStateView; impl TStateView for MockStateView { type Key = StateKey; - fn get_state_value(&self, _state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, _state_key: &StateKey) -> Result>> { Ok(None) } diff --git a/execution/executor/src/mock_vm/mod.rs b/execution/executor/src/mock_vm/mod.rs index ee02d3f643ebe..30ecce9703510 100644 --- a/execution/executor/src/mock_vm/mod.rs +++ b/execution/executor/src/mock_vm/mod.rs @@ -233,7 +233,7 @@ fn read_seqnum_from_storage(state_view: &impl StateView, seqnum_access_path: &Ac fn read_u64_from_storage(state_view: &impl StateView, access_path: &AccessPath) -> u64 { state_view - .get_state_value(&StateKey::access_path(access_path.clone())) + .get_state_value_bytes(&StateKey::access_path(access_path.clone())) .expect("Failed to query storage.") .map_or(0, |bytes| decode_bytes(&bytes)) } @@ -243,7 +243,7 @@ fn read_state_value_from_storage( access_path: &AccessPath, ) -> Option> { state_view - .get_state_value(&StateKey::access_path(access_path.clone())) + .get_state_value_bytes(&StateKey::access_path(access_path.clone())) .expect("Failed to query storage.") } diff --git a/storage/state-view/src/account_with_state_view.rs b/storage/state-view/src/account_with_state_view.rs index 31ad7e4651e3b..15fe6ae40ecba 100644 --- a/storage/state-view/src/account_with_state_view.rs +++ b/storage/state-view/src/account_with_state_view.rs @@ -21,7 +21,7 @@ impl<'a> AccountWithStateView<'a> { impl<'a> AccountView for AccountWithStateView<'a> { fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result>> { - self.state_view.get_state_value(state_key) + self.state_view.get_state_value_bytes(state_key) } fn get_account_address(&self) -> anyhow::Result> { diff --git a/storage/state-view/src/lib.rs b/storage/state-view/src/lib.rs index 99856b4bc7d89..152a3f4a34d6f 100644 --- a/storage/state-view/src/lib.rs +++ b/storage/state-view/src/lib.rs @@ -30,7 +30,7 @@ pub trait TStateView { } /// Gets the state value for a given state key. - fn get_state_value(&self, state_key: &Self::Key) -> Result>>; + fn get_state_value_bytes(&self, state_key: &Self::Key) -> Result>>; /// VM needs this method to know whether the current state view is for genesis state creation. /// Currently TransactionPayload::WriteSet is only valid for genesis state creation. @@ -67,8 +67,8 @@ where self.deref().id() } - fn get_state_value(&self, state_key: &K) -> Result>> { - self.deref().get_state_value(state_key) + fn get_state_value_bytes(&self, state_key: &K) -> Result>> { + self.deref().get_state_value_bytes(state_key) } fn is_genesis(&self) -> bool { diff --git a/storage/storage-interface/src/cached_state_view.rs b/storage/storage-interface/src/cached_state_view.rs index 223ef2ef0cf4d..2452701b3fda1 100644 --- a/storage/storage-interface/src/cached_state_view.rs +++ b/storage/storage-interface/src/cached_state_view.rs @@ -126,7 +126,7 @@ impl CachedStateView { .into_iter() .for_each(|key| { s.spawn(move |_| { - self.get_state_value(key).expect("Must succeed."); + self.get_state_value_bytes(key).expect("Must succeed."); }) }); }); @@ -194,7 +194,7 @@ impl TStateView for CachedStateView { self.id } - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { let _timer = TIMER.with_label_values(&["get_state_value"]).start_timer(); // First check if the cache has the state value. if let Some(contents) = self.state_cache.get(state_key) { @@ -240,13 +240,13 @@ impl TStateView for CachedDbStateView { self.db_state_view.id() } - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { // First check if the cache has the state value. if let Some(contents) = self.state_cache.read().get(state_key) { // This can return None, which means the value has been deleted from the DB. return Ok(contents.clone()); } - let state_value_option = self.db_state_view.get_state_value(state_key)?; + let state_value_option = self.db_state_view.get_state_value_bytes(state_key)?; // Update the cache if still empty let mut cache = self.state_cache.write(); let new_value = cache diff --git a/storage/storage-interface/src/state_view.rs b/storage/storage-interface/src/state_view.rs index c1aac03b52b2b..8b0dac6a2ce61 100644 --- a/storage/storage-interface/src/state_view.rs +++ b/storage/storage-interface/src/state_view.rs @@ -30,7 +30,7 @@ impl DbStateView { impl TStateView for DbStateView { type Key = StateKey; - fn get_state_value(&self, state_key: &StateKey) -> Result>> { + fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { self.get(state_key) } From 0e28d3ad197f97758de8995a3f70b85efedd38a6 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Mon, 6 Feb 2023 07:28:06 +0000 Subject: [PATCH 02/14] expose StateValue via StateView::get_state_value() --- .../src/aggregator_extension.rs | 6 +++--- .../aptos-aggregator/src/delta_change_set.rs | 8 +++++--- .../aptos-validator-interface/src/lib.rs | 7 ++++--- aptos-move/aptos-vm/src/delta_state_view.rs | 12 ++++++++---- .../block-executor/src/proptest_types/types.rs | 8 ++++---- aptos-move/block-executor/src/view.rs | 16 ++++++++-------- aptos-move/e2e-tests/src/data_store.rs | 8 +++++--- aptos-move/vm-genesis/src/genesis_context.rs | 8 +++++--- execution/executor/src/mock_vm/mock_vm_test.rs | 6 ++++-- storage/state-view/src/lib.rs | 16 ++++++++++++---- .../storage-interface/src/cached_state_view.rs | 18 +++++++++--------- storage/storage-interface/src/state_view.rs | 12 ++++++------ 12 files changed, 73 insertions(+), 52 deletions(-) diff --git a/aptos-move/aptos-aggregator/src/aggregator_extension.rs b/aptos-move/aptos-aggregator/src/aggregator_extension.rs index 5d9d7123a0ef7..eeb3341f09e85 100644 --- a/aptos-move/aptos-aggregator/src/aggregator_extension.rs +++ b/aptos-move/aptos-aggregator/src/aggregator_extension.rs @@ -368,7 +368,7 @@ mod test { use aptos_types::{ account_address::AccountAddress, state_store::{ - state_key::StateKey, state_storage_usage::StateStorageUsage, + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, table::TableHandle as AptosTableHandle, }, }; @@ -394,8 +394,8 @@ mod test { impl TStateView for FakeTestStorage { type Key = StateKey; - fn get_state_value_bytes(&self, state_key: &StateKey) -> anyhow::Result>> { - Ok(self.data.get(state_key).cloned()) + fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result> { + Ok(self.data.get(state_key).cloned().map(StateValue::new)) } fn is_genesis(&self) -> bool { diff --git a/aptos-move/aptos-aggregator/src/delta_change_set.rs b/aptos-move/aptos-aggregator/src/delta_change_set.rs index bf0b749101f1f..41d087ca6bfa0 100644 --- a/aptos-move/aptos-aggregator/src/delta_change_set.rs +++ b/aptos-move/aptos-aggregator/src/delta_change_set.rs @@ -352,7 +352,9 @@ impl ::std::iter::IntoIterator for DeltaChangeSet { mod tests { use super::*; use aptos_state_view::TStateView; - use aptos_types::state_store::state_storage_usage::StateStorageUsage; + use aptos_types::state_store::{ + state_storage_usage::StateStorageUsage, state_value::StateValue, + }; use claims::{assert_err, assert_matches, assert_ok, assert_ok_eq}; use once_cell::sync::Lazy; use std::collections::HashMap; @@ -540,8 +542,8 @@ mod tests { impl TStateView for FakeView { type Key = StateKey; - fn get_state_value_bytes(&self, state_key: &StateKey) -> anyhow::Result>> { - Ok(self.data.get(state_key).cloned()) + fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result> { + Ok(self.data.get(state_key).cloned().map(StateValue::new)) } fn is_genesis(&self) -> bool { diff --git a/aptos-move/aptos-validator-interface/src/lib.rs b/aptos-move/aptos-validator-interface/src/lib.rs index 62aceaadd363f..2012d59c87c2c 100644 --- a/aptos-move/aptos-validator-interface/src/lib.rs +++ b/aptos-move/aptos-validator-interface/src/lib.rs @@ -173,20 +173,21 @@ impl DebuggerStateView { &self, state_key: &StateKey, version: Version, - ) -> Result>> { + ) -> Result> { let (tx, rx) = std::sync::mpsc::channel(); let query_handler_locked = self.query_sender.lock().unwrap(); query_handler_locked .send((state_key.clone(), version, tx)) .unwrap(); - Ok(rx.recv()?) + let bytes_opt = rx.recv()?; + Ok(bytes_opt.map(StateValue::new)) } } impl TStateView for DebuggerStateView { type Key = StateKey; - fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { + fn get_state_value(&self, state_key: &StateKey) -> Result> { self.get_state_value_internal(state_key, self.version) } diff --git a/aptos-move/aptos-vm/src/delta_state_view.rs b/aptos-move/aptos-vm/src/delta_state_view.rs index 9b726ab7c8b34..05fc1305b0917 100644 --- a/aptos-move/aptos-vm/src/delta_state_view.rs +++ b/aptos-move/aptos-vm/src/delta_state_view.rs @@ -4,7 +4,9 @@ use anyhow::Result; use aptos_state_view::{StateViewId, TStateView}; use aptos_types::{ - state_store::{state_key::StateKey, state_storage_usage::StateStorageUsage}, + state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, + }, write_set::{WriteOp, WriteSet}, }; @@ -29,11 +31,13 @@ where self.base.id() } - fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { + fn get_state_value(&self, state_key: &StateKey) -> Result> { match self.write_set.get(state_key) { - Some(WriteOp::Creation(data) | WriteOp::Modification(data)) => Ok(Some(data.clone())), + Some(WriteOp::Creation(data) | WriteOp::Modification(data)) => { + Ok(Some(StateValue::new(data.clone()))) + }, Some(WriteOp::Deletion) => Ok(None), - None => self.base.get_state_value_bytes(state_key), + None => self.base.get_state_value(state_key), } } diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index 88af6221c6250..ac52eeb301c4d 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -17,7 +17,7 @@ use aptos_state_view::{StateViewId, TStateView}; use aptos_types::{ access_path::AccessPath, account_address::AccountAddress, - state_store::state_storage_usage::StateStorageUsage, + state_store::{state_storage_usage::StateStorageUsage, state_value::StateValue}, write_set::{TransactionWrite, WriteOp}, }; use claims::assert_none; @@ -51,9 +51,9 @@ where type Key = K; /// Gets the state value for a given state key. - fn get_state_value_bytes(&self, _: &K) -> anyhow::Result>> { + fn get_state_value(&self, _: &K) -> anyhow::Result> { // When aggregator value has to be resolved from storage, pretend it is 100. - Ok(Some(serialize(&STORAGE_AGGREGATOR_VALUE))) + Ok(Some(StateValue::new(serialize(&STORAGE_AGGREGATOR_VALUE)))) } fn id(&self) -> StateViewId { @@ -81,7 +81,7 @@ where type Key = K; /// Gets the state value for a given state key. - fn get_state_value_bytes(&self, _: &K) -> anyhow::Result>> { + fn get_state_value(&self, _: &K) -> anyhow::Result> { Ok(None) } diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index e9103f2710dc3..4ac4932eff9ce 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -12,7 +12,7 @@ use aptos_aggregator::delta_change_set::{deserialize, serialize, DeltaOp}; use aptos_mvhashmap::{MVHashMap, MVHashMapError, MVHashMapOutput}; use aptos_state_view::{StateViewId, TStateView}; use aptos_types::{ - state_store::state_storage_usage::StateStorageUsage, + state_store::{state_storage_usage::StateStorageUsage, state_value::StateValue}, vm_status::{StatusCode, VMStatus}, write_set::TransactionWrite, }; @@ -182,11 +182,11 @@ impl<'a, T: Transaction, S: TStateView> LatestView<'a, T, S> { impl<'a, T: Transaction, S: TStateView> TStateView for LatestView<'a, T, S> { type Key = T::Key; - fn get_state_value_bytes(&self, state_key: &T::Key) -> anyhow::Result>> { + fn get_state_value(&self, state_key: &T::Key) -> anyhow::Result> { match self.latest_view { ViewMapKind::MultiVersion(map) => match map.read(state_key, self.txn_idx) { - ReadResult::Value(v) => Ok(v.extract_raw_bytes()), - ReadResult::U128(v) => Ok(Some(serialize(&v))), + ReadResult::Value(v) => Ok(v.extract_raw_bytes().map(StateValue::new)), + ReadResult::U128(v) => Ok(Some(StateValue::new(serialize(&v)))), ReadResult::Unresolved(delta) => { let from_storage = self .base_view @@ -197,14 +197,14 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< let result = delta .apply_to(from_storage) .map_err(|pe| pe.finish(Location::Undefined).into_vm_status())?; - Ok(Some(serialize(&result))) + Ok(Some(StateValue::new(serialize(&result)))) }, - ReadResult::None => self.base_view.get_state_value_bytes(state_key), + ReadResult::None => self.base_view.get_state_value(state_key), }, ViewMapKind::BTree(map) => map.get(state_key).map_or_else( || { // let ret = - self.base_view.get_state_value_bytes(state_key) + self.base_view.get_state_value(state_key) // TODO: common treatment with the above case. // TODO: enable below when logging isn't a circular dependency. @@ -218,7 +218,7 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< // log_context.alert(); // ret }, - |v| Ok(v.extract_raw_bytes()), + |v| Ok(v.extract_raw_bytes().map(StateValue::new)), ), } } diff --git a/aptos-move/e2e-tests/src/data_store.rs b/aptos-move/e2e-tests/src/data_store.rs index 6639c9fa1b466..2a6954097543a 100644 --- a/aptos-move/e2e-tests/src/data_store.rs +++ b/aptos-move/e2e-tests/src/data_store.rs @@ -9,7 +9,9 @@ use aptos_state_view::TStateView; use aptos_types::{ access_path::AccessPath, account_config::CoinInfoResource, - state_store::{state_key::StateKey, state_storage_usage::StateStorageUsage}, + state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, + }, transaction::ChangeSet, write_set::{WriteOp, WriteSet}, }; @@ -106,8 +108,8 @@ impl FakeDataStore { impl TStateView for FakeDataStore { type Key = StateKey; - fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { - Ok(self.state_data.get(state_key).cloned()) + fn get_state_value(&self, state_key: &StateKey) -> Result> { + Ok(self.state_data.get(state_key).cloned().map(StateValue::new)) } fn is_genesis(&self) -> bool { diff --git a/aptos-move/vm-genesis/src/genesis_context.rs b/aptos-move/vm-genesis/src/genesis_context.rs index 440bc3e9f276c..889466bd4a50f 100644 --- a/aptos-move/vm-genesis/src/genesis_context.rs +++ b/aptos-move/vm-genesis/src/genesis_context.rs @@ -7,7 +7,9 @@ use anyhow::Result; use aptos_state_view::TStateView; use aptos_types::{ access_path::AccessPath, - state_store::{state_key::StateKey, state_storage_usage::StateStorageUsage}, + state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, + }, }; use move_core_types::language_storage::ModuleId; use std::collections::HashMap; @@ -35,8 +37,8 @@ impl GenesisStateView { impl TStateView for GenesisStateView { type Key = StateKey; - fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { - Ok(self.state_data.get(state_key).cloned()) + fn get_state_value(&self, state_key: &StateKey) -> Result> { + Ok(self.state_data.get(state_key).cloned().map(StateValue::new)) } fn is_genesis(&self) -> bool { diff --git a/execution/executor/src/mock_vm/mock_vm_test.rs b/execution/executor/src/mock_vm/mock_vm_test.rs index c0851f6ee0d1e..fd3b34df4b12a 100644 --- a/execution/executor/src/mock_vm/mock_vm_test.rs +++ b/execution/executor/src/mock_vm/mock_vm_test.rs @@ -6,7 +6,9 @@ use anyhow::Result; use aptos_state_view::TStateView; use aptos_types::{ account_address::AccountAddress, - state_store::{state_key::StateKey, state_storage_usage::StateStorageUsage}, + state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, + }, write_set::WriteOp, }; use aptos_vm::VMExecutor; @@ -21,7 +23,7 @@ struct MockStateView; impl TStateView for MockStateView { type Key = StateKey; - fn get_state_value_bytes(&self, _state_key: &StateKey) -> Result>> { + fn get_state_value(&self, _state_key: &StateKey) -> Result> { Ok(None) } diff --git a/storage/state-view/src/lib.rs b/storage/state-view/src/lib.rs index 152a3f4a34d6f..7d17898d6c0a9 100644 --- a/storage/state-view/src/lib.rs +++ b/storage/state-view/src/lib.rs @@ -10,7 +10,9 @@ use anyhow::Result; use aptos_crypto::HashValue; use aptos_types::{ account_address::AccountAddress, - state_store::{state_key::StateKey, state_storage_usage::StateStorageUsage}, + state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, + }, transaction::Version, }; use std::ops::Deref; @@ -29,8 +31,14 @@ pub trait TStateView { StateViewId::Miscellaneous } + /// Gets the state value bytes for a given state key. + fn get_state_value_bytes(&self, state_key: &Self::Key) -> Result>> { + let val_opt = self.get_state_value(state_key)?; + Ok(val_opt.map(|val| val.into_bytes())) + } + /// Gets the state value for a given state key. - fn get_state_value_bytes(&self, state_key: &Self::Key) -> Result>>; + fn get_state_value(&self, state_key: &Self::Key) -> Result>; /// VM needs this method to know whether the current state view is for genesis state creation. /// Currently TransactionPayload::WriteSet is only valid for genesis state creation. @@ -67,8 +75,8 @@ where self.deref().id() } - fn get_state_value_bytes(&self, state_key: &K) -> Result>> { - self.deref().get_state_value_bytes(state_key) + fn get_state_value(&self, state_key: &K) -> Result> { + self.deref().get_state_value(state_key) } fn is_genesis(&self) -> bool { diff --git a/storage/storage-interface/src/cached_state_view.rs b/storage/storage-interface/src/cached_state_view.rs index 2452701b3fda1..7eb58d42ce197 100644 --- a/storage/storage-interface/src/cached_state_view.rs +++ b/storage/storage-interface/src/cached_state_view.rs @@ -194,12 +194,12 @@ impl TStateView for CachedStateView { self.id } - fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { + fn get_state_value(&self, state_key: &StateKey) -> Result> { let _timer = TIMER.with_label_values(&["get_state_value"]).start_timer(); // First check if the cache has the state value. - if let Some(contents) = self.state_cache.get(state_key) { + if let Some(val_opt) = self.state_cache.get(state_key) { // This can return None, which means the value has been deleted from the DB. - return Ok(contents.as_ref().map(|v| v.bytes().to_vec())); + return Ok(val_opt.clone()); } let state_value_option = self.get_state_value_internal(state_key)?; // Update the cache if still empty @@ -207,7 +207,7 @@ impl TStateView for CachedStateView { .state_cache .entry(state_key.clone()) .or_insert(state_value_option); - Ok(new_value.as_ref().map(|v| v.bytes().to_vec())) + Ok(new_value.clone()) } fn is_genesis(&self) -> bool { @@ -221,7 +221,7 @@ impl TStateView for CachedStateView { pub struct CachedDbStateView { db_state_view: DbStateView, - state_cache: RwLock>>>, + state_cache: RwLock>>, } impl From for CachedDbStateView { @@ -240,13 +240,13 @@ impl TStateView for CachedDbStateView { self.db_state_view.id() } - fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { + fn get_state_value(&self, state_key: &StateKey) -> Result> { // First check if the cache has the state value. - if let Some(contents) = self.state_cache.read().get(state_key) { + if let Some(val_opt) = self.state_cache.read().get(state_key) { // This can return None, which means the value has been deleted from the DB. - return Ok(contents.clone()); + return Ok(val_opt.clone()); } - let state_value_option = self.db_state_view.get_state_value_bytes(state_key)?; + let state_value_option = self.db_state_view.get_state_value(state_key)?; // Update the cache if still empty let mut cache = self.state_cache.write(); let new_value = cache diff --git a/storage/storage-interface/src/state_view.rs b/storage/storage-interface/src/state_view.rs index 8b0dac6a2ce61..9eaac8ddcd9dd 100644 --- a/storage/storage-interface/src/state_view.rs +++ b/storage/storage-interface/src/state_view.rs @@ -5,7 +5,9 @@ use crate::DbReader; use anyhow::Result; use aptos_state_view::TStateView; use aptos_types::{ - state_store::{state_key::StateKey, state_storage_usage::StateStorageUsage}, + state_store::{ + state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, + }, transaction::Version, }; use std::sync::Arc; @@ -16,11 +18,9 @@ pub struct DbStateView { } impl DbStateView { - fn get(&self, key: &StateKey) -> Result>> { + fn get(&self, key: &StateKey) -> Result> { Ok(if let Some(version) = self.version { - self.db - .get_state_value_by_version(key, version)? - .map(|value| value.into_bytes()) + self.db.get_state_value_by_version(key, version)? } else { None }) @@ -30,7 +30,7 @@ impl DbStateView { impl TStateView for DbStateView { type Key = StateKey; - fn get_state_value_bytes(&self, state_key: &StateKey) -> Result>> { + fn get_state_value(&self, state_key: &StateKey) -> Result> { self.get(state_key) } From 73f71d04bfc3de498d0a5e1842e36de333d83d92 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Mon, 6 Feb 2023 17:12:16 +0000 Subject: [PATCH 03/14] trivial: define feature flag to control stoarge slot metadata --- .../src/components/feature_flags.rs | 3 + .../framework/move-stdlib/doc/features.md | 58 +++++++++++++++++++ .../move-stdlib/sources/configs/features.move | 9 +++ types/src/on_chain_config/aptos_features.rs | 1 + 4 files changed, 71 insertions(+) diff --git a/aptos-move/aptos-release-builder/src/components/feature_flags.rs b/aptos-move/aptos-release-builder/src/components/feature_flags.rs index 5d11bf6213c27..a947899043dc3 100644 --- a/aptos-move/aptos-release-builder/src/components/feature_flags.rs +++ b/aptos-move/aptos-release-builder/src/components/feature_flags.rs @@ -26,6 +26,7 @@ pub enum FeatureFlag { MultiEd25519PkValidateV2Natives, Blake2b256Native, ResourceGroups, + StorageSlotMetadata, } fn generate_features_blob(writer: &CodeWriter, data: &[u64]) { @@ -124,6 +125,7 @@ impl From for AptosFeatureFlag { }, FeatureFlag::Blake2b256Native => AptosFeatureFlag::BLAKE2B_256_NATIVE, FeatureFlag::ResourceGroups => AptosFeatureFlag::RESOURCE_GROUPS, + FeatureFlag::StorageSlotMetadata => AptosFeatureFlag::STORAGE_SLOT_METADATA, } } } @@ -147,6 +149,7 @@ impl From for FeatureFlag { }, AptosFeatureFlag::BLAKE2B_256_NATIVE => FeatureFlag::Blake2b256Native, AptosFeatureFlag::RESOURCE_GROUPS => FeatureFlag::ResourceGroups, + AptosFeatureFlag::STORAGE_SLOT_METADATA => FeatureFlag::StorageSlotMetadata, } } } diff --git a/aptos-move/framework/move-stdlib/doc/features.md b/aptos-move/framework/move-stdlib/doc/features.md index 9a363805038f6..14e7281ff6930 100644 --- a/aptos-move/framework/move-stdlib/doc/features.md +++ b/aptos-move/framework/move-stdlib/doc/features.md @@ -25,6 +25,8 @@ the Move stdlib, the Aptos stdlib, and the Aptos framework. - [Function `blake2b_256_enabled`](#0x1_features_blake2b_256_enabled) - [Function `get_resource_groups_feature`](#0x1_features_get_resource_groups_feature) - [Function `resource_groups_enabled`](#0x1_features_resource_groups_enabled) +- [Function `get_storage_slot_metadata_feature`](#0x1_features_get_storage_slot_metadata_feature) +- [Function `storage_slot_metadata_enabled`](#0x1_features_storage_slot_metadata_enabled) - [Function `change_feature_flags`](#0x1_features_change_feature_flags) - [Function `is_enabled`](#0x1_features_is_enabled) - [Function `set`](#0x1_features_set) @@ -168,6 +170,16 @@ Lifetime: transient + + +Whether metadata is tracked for newly allocated storage slots. + + +
const STORAGE_SLOT_METADATA: u64 = 10;
+
+ + + Whether during upgrade compatibility checking, friend functions should be treated similar like @@ -559,6 +571,52 @@ Lifetime: transient + + + + +## Function `get_storage_slot_metadata_feature` + + + +
public fun get_storage_slot_metadata_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_storage_slot_metadata_feature(): u64 { STORAGE_SLOT_METADATA }
+
+ + + +
+ + + +## Function `storage_slot_metadata_enabled` + + + +
public fun storage_slot_metadata_enabled(): bool
+
+ + + +
+Implementation + + +
public fun storage_slot_metadata_enabled(): bool acquires Features {
+    is_enabled(STORAGE_SLOT_METADATA)
+}
+
+ + +
diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.move b/aptos-move/framework/move-stdlib/sources/configs/features.move index 7c768012ebb8d..bfa4462d33fe9 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.move @@ -121,6 +121,15 @@ module std::features { is_enabled(RESOURCE_GROUPS) } + /// Whether metadata is tracked for newly allocated storage slots. + const STORAGE_SLOT_METADATA: u64 = 10; + + public fun get_storage_slot_metadata_feature(): u64 { STORAGE_SLOT_METADATA } + + public fun storage_slot_metadata_enabled(): bool acquires Features { + is_enabled(STORAGE_SLOT_METADATA) + } + // ============================================================================================ // Feature Flag Implementation diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index 2bf5f71c67214..0c6e7c5074380 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -17,6 +17,7 @@ pub enum FeatureFlag { MULTI_ED25519_PK_VALIDATE_V2_NATIVES = 7, BLAKE2B_256_NATIVE = 8, RESOURCE_GROUPS = 9, + STORAGE_SLOT_METADATA = 10, } /// Representation of features on chain as a bitset. From 0fbd4489df420481c8ef74a4373bdbbcd9e151b2 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Tue, 7 Feb 2023 00:07:07 +0000 Subject: [PATCH 04/14] refactor and fix resource group write op convertion We only had a "New mistakenly converted to Modification" issue for normal resources. Modules and resource groups were never affected. --- .../aptos-gas/src/transaction/storage.rs | 7 ++- .../aptos-vm/src/move_vm_ext/session.rs | 61 ++++++++----------- 2 files changed, 32 insertions(+), 36 deletions(-) diff --git a/aptos-move/aptos-gas/src/transaction/storage.rs b/aptos-move/aptos-gas/src/transaction/storage.rs index 4cb2cccb5eeb8..c445709640a69 100644 --- a/aptos-move/aptos-gas/src/transaction/storage.rs +++ b/aptos-move/aptos-gas/src/transaction/storage.rs @@ -271,7 +271,12 @@ impl ChangeSetConfigs { } } - pub fn creation_as_modification(&self) -> bool { + pub fn legacy_resource_creation_as_modification(&self) -> bool { + // Bug fixed at gas_feature_version 3 where (non-group) resource creation was converted to + // modification. + // Modules and table items were not affected (https://github.com/aptos-labs/aptos-core/pull/4722/commits/7c5e52297e8d1a6eac67a68a804ab1ca2a0b0f37). + // Resource groups were not affected because they were + // introduced later than feature_version 3 on all networks. self.gas_feature_version < 3 } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session.rs b/aptos-move/aptos-vm/src/move_vm_ext/session.rs index 1d3d070d1e444..f704f3da7be1d 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session.rs @@ -298,7 +298,6 @@ impl SessionOutput { ap_cache: &mut C, configs: &ChangeSetConfigs, ) -> Result { - use MoveStorageOp::*; let Self { change_set, resource_group_change_set, @@ -314,27 +313,14 @@ impl SessionOutput { let (modules, resources) = account_changeset.into_inner(); for (struct_tag, blob_op) in resources { let ap = ap_cache.get_resource_path(addr, struct_tag); - let op = match blob_op { - Delete => WriteOp::Deletion, - New(blob) => { - if configs.creation_as_modification() { - WriteOp::Modification(blob) - } else { - WriteOp::Creation(blob) - } - }, - Modify(blob) => WriteOp::Modification(blob), - }; + let op = + convert_write_op(blob_op, configs.legacy_resource_creation_as_modification()); write_set_mut.insert((StateKey::access_path(ap), op)) } for (name, blob_op) in modules { let ap = ap_cache.get_module_path(ModuleId::new(addr, name)); - let op = match blob_op { - Delete => WriteOp::Deletion, - New(blob) => WriteOp::Creation(blob), - Modify(blob) => WriteOp::Modification(blob), - }; + let op = convert_write_op(blob_op, false); write_set_mut.insert((StateKey::access_path(ap), op)) } @@ -344,17 +330,7 @@ impl SessionOutput { let (_, resources) = account_changeset.into_inner(); for (struct_tag, blob_op) in resources { let ap = ap_cache.get_resource_group_path(addr, struct_tag); - let op = match blob_op { - Delete => WriteOp::Deletion, - New(blob) => { - if configs.creation_as_modification() { - WriteOp::Modification(blob) - } else { - WriteOp::Creation(blob) - } - }, - Modify(blob) => WriteOp::Modification(blob), - }; + let op = convert_write_op(blob_op, false); write_set_mut.insert((StateKey::access_path(ap), op)) } } @@ -362,13 +338,8 @@ impl SessionOutput { for (handle, change) in table_change_set.changes { for (key, value_op) in change.entries { let state_key = StateKey::table_item(handle.into(), key); - match value_op { - Delete => write_set_mut.insert((state_key, WriteOp::Deletion)), - New(bytes) => write_set_mut.insert((state_key, WriteOp::Creation(bytes))), - Modify(bytes) => { - write_set_mut.insert((state_key, WriteOp::Modification(bytes))) - }, - } + let op = convert_write_op(value_op, false); + write_set_mut.insert((state_key, op)) } } @@ -431,3 +402,23 @@ impl SessionOutput { Ok(()) } } + +fn convert_write_op( + move_storage_op: MoveStorageOp>, + creation_as_modification: bool, +) -> WriteOp { + use MoveStorageOp::*; + use WriteOp::*; + + match move_storage_op { + Delete => Deletion, + New(blob) => { + if creation_as_modification { + Modification(blob) + } else { + Creation(blob) + } + }, + Modify(blob) => Modification(blob), + } +} From b928a9f26435d987b8ac05058b8b44e0149d80af Mon Sep 17 00:00:00 2001 From: aldenhu Date: Mon, 6 Feb 2023 14:58:01 +0000 Subject: [PATCH 05/14] Add update StateValue and WriteOp to support state slot metadata tracking * StateValue now has a new flavor which carries StateValueMetadata. * similarly there is now WriteOp::CreationWithMetadata and friends --- api/types/src/convert.rs | 30 +-- .../src/aggregator_extension.rs | 6 +- .../aptos-aggregator/src/delta_change_set.rs | 6 +- .../aptos-aggregator/src/transaction.rs | 29 +-- .../aptos-gas/src/transaction/storage.rs | 27 ++- .../aptos-validator-interface/src/lib.rs | 2 +- .../src/rest_interface.rs | 6 +- aptos-move/aptos-vm/src/delta_state_view.rs | 7 +- .../src/proptest_types/types.rs | 8 +- aptos-move/block-executor/src/view.rs | 8 +- aptos-move/e2e-tests/src/data_store.rs | 11 +- aptos-move/mvhashmap/src/unit_tests/mod.rs | 5 + .../src/unit_tests/proptest_types.rs | 6 +- aptos-move/vm-genesis/src/genesis_context.rs | 6 +- crates/aptos-rosetta/src/types/objects.rs | 13 +- .../src/in_memory_state_calculator.rs | 11 +- .../storage-service/server/src/tests.rs | 2 +- storage/aptosdb/src/fake_aptosdb.rs | 2 +- storage/indexer/src/lib.rs | 7 +- storage/scratchpad/benches/sparse_merkle.rs | 2 +- types/src/proptest_types.rs | 5 +- types/src/state_store/state_value.rs | 51 ++++- types/src/write_set.rs | 179 +++++++++++++++--- 23 files changed, 298 insertions(+), 131 deletions(-) diff --git a/api/types/src/convert.rs b/api/types/src/convert.rs index 66643b088e963..0b181d0323519 100644 --- a/api/types/src/convert.rs +++ b/api/types/src/convert.rs @@ -255,8 +255,8 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { access_path: AccessPath, op: WriteOp, ) -> Result { - let ret = match op { - WriteOp::Deletion => match access_path.get_path() { + let ret = match op.into_bytes() { + None => match access_path.get_path() { Path::Code(module_id) => WriteSetChange::DeleteModule(DeleteModule { address: access_path.address.into(), state_key_hash, @@ -273,21 +273,21 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { resource: typ.into(), }), }, - WriteOp::Modification(val) | WriteOp::Creation(val) => match access_path.get_path() { + Some(bytes) => match access_path.get_path() { Path::Code(_) => WriteSetChange::WriteModule(WriteModule { address: access_path.address.into(), state_key_hash, - data: MoveModuleBytecode::new(val).try_parse_abi()?, + data: MoveModuleBytecode::new(bytes).try_parse_abi()?, }), Path::Resource(typ) => WriteSetChange::WriteResource(WriteResource { address: access_path.address.into(), state_key_hash, - data: self.try_into_resource(&typ, &val)?, + data: self.try_into_resource(&typ, &bytes)?, }), Path::ResourceGroup(typ) => WriteSetChange::WriteResource(WriteResource { address: access_path.address.into(), state_key_hash, - data: self.try_into_resource(&typ, &val)?, + data: self.try_into_resource(&typ, &bytes)?, }), }, }; @@ -303,26 +303,26 @@ impl<'a, R: MoveResolverExt + ?Sized> MoveConverter<'a, R> { ) -> Result { let hex_handle = handle.0.to_vec().into(); let key: HexEncodedBytes = key.into(); - let ret = match op { - WriteOp::Deletion => { - let data = self.try_delete_table_item_into_deleted_table_data(handle, &key.0)?; + let ret = match op.into_bytes() { + Some(bytes) => { + let data = + self.try_write_table_item_into_decoded_table_data(handle, &key.0, &bytes)?; - WriteSetChange::DeleteTableItem(DeleteTableItem { + WriteSetChange::WriteTableItem(WriteTableItem { state_key_hash, handle: hex_handle, key, + value: bytes.into(), data, }) }, - WriteOp::Modification(value) | WriteOp::Creation(value) => { - let data = - self.try_write_table_item_into_decoded_table_data(handle, &key.0, &value)?; + None => { + let data = self.try_delete_table_item_into_deleted_table_data(handle, &key.0)?; - WriteSetChange::WriteTableItem(WriteTableItem { + WriteSetChange::DeleteTableItem(DeleteTableItem { state_key_hash, handle: hex_handle, key, - value: value.into(), data, }) }, diff --git a/aptos-move/aptos-aggregator/src/aggregator_extension.rs b/aptos-move/aptos-aggregator/src/aggregator_extension.rs index eeb3341f09e85..5de6215118f64 100644 --- a/aptos-move/aptos-aggregator/src/aggregator_extension.rs +++ b/aptos-move/aptos-aggregator/src/aggregator_extension.rs @@ -395,7 +395,11 @@ mod test { type Key = StateKey; fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result> { - Ok(self.data.get(state_key).cloned().map(StateValue::new)) + Ok(self + .data + .get(state_key) + .cloned() + .map(StateValue::new_legacy)) } fn is_genesis(&self) -> bool { diff --git a/aptos-move/aptos-aggregator/src/delta_change_set.rs b/aptos-move/aptos-aggregator/src/delta_change_set.rs index 41d087ca6bfa0..6451850b1217e 100644 --- a/aptos-move/aptos-aggregator/src/delta_change_set.rs +++ b/aptos-move/aptos-aggregator/src/delta_change_set.rs @@ -543,7 +543,11 @@ mod tests { type Key = StateKey; fn get_state_value(&self, state_key: &StateKey) -> anyhow::Result> { - Ok(self.data.get(state_key).cloned().map(StateValue::new)) + Ok(self + .data + .get(state_key) + .cloned() + .map(StateValue::new_legacy)) } fn is_genesis(&self) -> bool { diff --git a/aptos-move/aptos-aggregator/src/transaction.rs b/aptos-move/aptos-aggregator/src/transaction.rs index 0c9fc569977b0..3d0c446e08f74 100644 --- a/aptos-move/aptos-aggregator/src/transaction.rs +++ b/aptos-move/aptos-aggregator/src/transaction.rs @@ -74,15 +74,14 @@ impl ChangeSetExt { for (key, mut op) in other.into_iter() { if let Some(r) = write_ops.get_mut(&key) { match r { - Creation(data) => { + Creation(data) + | Modification(data) + | CreationWithMetadata { data, .. } + | ModificationWithMetadata { data, .. } => { let val: u128 = bcs::from_bytes(data)?; - *r = Creation(bcs::to_bytes(&op.apply_to(val)?)?); + *data = bcs::to_bytes(&op.apply_to(val)?)?; }, - Modification(data) => { - let val: u128 = bcs::from_bytes(data)?; - *r = Modification(bcs::to_bytes(&op.apply_to(val)?)?); - }, - Deletion => { + Deletion | DeletionWithMetadata { .. } => { bail!("Failed to apply Aggregator delta -- value already deleted"); }, } @@ -110,7 +109,6 @@ impl ChangeSetExt { pub fn squash_change_set(self, other: ChangeSet) -> anyhow::Result { use btree_map::Entry::*; - use WriteOp::*; let checker = self.checker.clone(); let (mut delta, change_set) = self.into_inner(); @@ -123,19 +121,8 @@ impl ChangeSetExt { for (key, op) in other_write_set.into_iter() { match write_ops.entry(key) { Occupied(mut entry) => { - let r = entry.get_mut(); - match (&r, op) { - (Modification(_) | Creation(_), Creation(_)) - | (Deletion, Deletion | Modification(_)) => { - bail!("The given change sets cannot be squashed") - }, - (Modification(_), Modification(data)) => *r = Modification(data), - (Creation(_), Modification(data)) => *r = Creation(data), - (Modification(_), Deletion) => *r = Deletion, - (Deletion, Creation(data)) => *r = Modification(data), - (Creation(_), Deletion) => { - entry.remove(); - }, + if !WriteOp::squash(entry.get_mut(), op)? { + entry.remove(); } }, Vacant(entry) => { diff --git a/aptos-move/aptos-gas/src/transaction/storage.rs b/aptos-move/aptos-gas/src/transaction/storage.rs index c445709640a69..e98a2c3b82eb3 100644 --- a/aptos-move/aptos-gas/src/transaction/storage.rs +++ b/aptos-move/aptos-gas/src/transaction/storage.rs @@ -73,14 +73,14 @@ impl StoragePricingV1 { } match op { - Creation(data) => { + Creation(data) | CreationWithMetadata { data, .. } => { num_new_items += 1.into(); num_bytes_val += NumBytes::new(data.len() as u64); }, - Modification(data) => { + Modification(data) | ModificationWithMetadata { data, .. } => { num_bytes_val += NumBytes::new(data.len() as u64); }, - Deletion => (), + Deletion | DeletionWithMetadata { .. } => (), } } @@ -183,15 +183,15 @@ impl StoragePricingV2 { for (key, op) in ops { match &op { - Creation(data) => { + Creation(data) | CreationWithMetadata { data, .. } => { num_items_create += 1.into(); num_bytes_create += self.write_op_size(key, data); }, - Modification(data) => { + Modification(data) | ModificationWithMetadata { data, .. } => { num_items_write += 1.into(); num_bytes_write += self.write_op_size(key, data); }, - Deletion => (), + Deletion | DeletionWithMetadata { .. } => (), } } @@ -306,15 +306,12 @@ impl CheckChangeSet for ChangeSetConfigs { let mut write_set_size = 0; for (key, op) in change_set.write_set() { - match op { - WriteOp::Creation(data) | WriteOp::Modification(data) => { - let write_op_size = (data.len() + key.size()) as u64; - if write_op_size > self.max_bytes_per_write_op { - return Err(VMStatus::Error(ERR)); - } - write_set_size += write_op_size; - }, - WriteOp::Deletion => (), + if let Some(bytes) = op.bytes() { + let write_op_size = (bytes.len() + key.size()) as u64; + if write_op_size > self.max_bytes_per_write_op { + return Err(VMStatus::Error(ERR)); + } + write_set_size += write_op_size; } if write_set_size > self.max_bytes_all_write_ops_per_transaction { return Err(VMStatus::Error(ERR)); diff --git a/aptos-move/aptos-validator-interface/src/lib.rs b/aptos-move/aptos-validator-interface/src/lib.rs index 2012d59c87c2c..00985f510b7ea 100644 --- a/aptos-move/aptos-validator-interface/src/lib.rs +++ b/aptos-move/aptos-validator-interface/src/lib.rs @@ -180,7 +180,7 @@ impl DebuggerStateView { .send((state_key.clone(), version, tx)) .unwrap(); let bytes_opt = rx.recv()?; - Ok(bytes_opt.map(StateValue::new)) + Ok(bytes_opt.map(StateValue::new_legacy)) } } diff --git a/aptos-move/aptos-validator-interface/src/rest_interface.rs b/aptos-move/aptos-validator-interface/src/rest_interface.rs index 084e9f5761ae6..56a4c917e66a5 100644 --- a/aptos-move/aptos-validator-interface/src/rest_interface.rs +++ b/aptos-move/aptos-validator-interface/src/rest_interface.rs @@ -52,7 +52,7 @@ impl AptosValidatorInterface for RestDebuggerInterface { ) -> Result> { match state_key.inner() { StateKeyInner::AccessPath(path) => match path.get_path() { - Path::Code(module_id) => Ok(Some(StateValue::new( + Path::Code(module_id) => Ok(Some(StateValue::new_legacy( self.0 .get_account_module_bcs_at_version( *module_id.address(), @@ -73,9 +73,9 @@ impl AptosValidatorInterface for RestDebuggerInterface { ) .await .ok() - .map(|inner| StateValue::new(inner.into_inner()))), + .map(|inner| StateValue::new_legacy(inner.into_inner()))), }, - StateKeyInner::TableItem { handle, key } => Ok(Some(StateValue::new( + StateKeyInner::TableItem { handle, key } => Ok(Some(StateValue::new_legacy( self.0 .get_raw_table_item(handle.0, key, version) .await diff --git a/aptos-move/aptos-vm/src/delta_state_view.rs b/aptos-move/aptos-vm/src/delta_state_view.rs index 05fc1305b0917..96b5086fdff12 100644 --- a/aptos-move/aptos-vm/src/delta_state_view.rs +++ b/aptos-move/aptos-vm/src/delta_state_view.rs @@ -7,7 +7,7 @@ use aptos_types::{ state_store::{ state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, }, - write_set::{WriteOp, WriteSet}, + write_set::{TransactionWrite, WriteSet}, }; pub struct DeltaStateView<'a, 'b, S> { @@ -33,10 +33,7 @@ where fn get_state_value(&self, state_key: &StateKey) -> Result> { match self.write_set.get(state_key) { - Some(WriteOp::Creation(data) | WriteOp::Modification(data)) => { - Ok(Some(StateValue::new(data.clone()))) - }, - Some(WriteOp::Deletion) => Ok(None), + Some(write_op) => Ok(write_op.as_state_value()), None => self.base.get_state_value(state_key), } } diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index ac52eeb301c4d..5c71b4dec8c00 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -53,7 +53,9 @@ where /// Gets the state value for a given state key. fn get_state_value(&self, _: &K) -> anyhow::Result> { // When aggregator value has to be resolved from storage, pretend it is 100. - Ok(Some(StateValue::new(serialize(&STORAGE_AGGREGATOR_VALUE)))) + Ok(Some(StateValue::new_legacy(serialize( + &STORAGE_AGGREGATOR_VALUE, + )))) } fn id(&self) -> StateViewId { @@ -154,6 +156,10 @@ impl> + Debug + Clone + Eq + Send + Sync + Arbitrary> Transactio None } } + + fn as_state_value(&self) -> Option { + unimplemented!() + } } #[derive(Clone, Copy)] diff --git a/aptos-move/block-executor/src/view.rs b/aptos-move/block-executor/src/view.rs index 4ac4932eff9ce..65830b9dd2d43 100644 --- a/aptos-move/block-executor/src/view.rs +++ b/aptos-move/block-executor/src/view.rs @@ -185,8 +185,8 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< fn get_state_value(&self, state_key: &T::Key) -> anyhow::Result> { match self.latest_view { ViewMapKind::MultiVersion(map) => match map.read(state_key, self.txn_idx) { - ReadResult::Value(v) => Ok(v.extract_raw_bytes().map(StateValue::new)), - ReadResult::U128(v) => Ok(Some(StateValue::new(serialize(&v)))), + ReadResult::Value(v) => Ok(v.as_state_value()), + ReadResult::U128(v) => Ok(Some(StateValue::new_legacy(serialize(&v)))), ReadResult::Unresolved(delta) => { let from_storage = self .base_view @@ -197,7 +197,7 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< let result = delta .apply_to(from_storage) .map_err(|pe| pe.finish(Location::Undefined).into_vm_status())?; - Ok(Some(StateValue::new(serialize(&result)))) + Ok(Some(StateValue::new_legacy(serialize(&result)))) }, ReadResult::None => self.base_view.get_state_value(state_key), }, @@ -218,7 +218,7 @@ impl<'a, T: Transaction, S: TStateView> TStateView for LatestView< // log_context.alert(); // ret }, - |v| Ok(v.extract_raw_bytes().map(StateValue::new)), + |v| Ok(v.as_state_value()), ), } } diff --git a/aptos-move/e2e-tests/src/data_store.rs b/aptos-move/e2e-tests/src/data_store.rs index 2a6954097543a..68739ecdfc4d6 100644 --- a/aptos-move/e2e-tests/src/data_store.rs +++ b/aptos-move/e2e-tests/src/data_store.rs @@ -56,7 +56,10 @@ impl FakeDataStore { WriteOp::Modification(blob) | WriteOp::Creation(blob) => { self.set(state_key.clone(), blob.clone()); }, - WriteOp::Deletion => { + WriteOp::ModificationWithMetadata { .. } | WriteOp::CreationWithMetadata { .. } => { + unimplemented!() + }, + WriteOp::Deletion | WriteOp::DeletionWithMetadata { .. } => { self.remove(state_key); }, } @@ -109,7 +112,11 @@ impl TStateView for FakeDataStore { type Key = StateKey; fn get_state_value(&self, state_key: &StateKey) -> Result> { - Ok(self.state_data.get(state_key).cloned().map(StateValue::new)) + Ok(self + .state_data + .get(state_key) + .cloned() + .map(StateValue::new_legacy)) } fn is_genesis(&self) -> bool { diff --git a/aptos-move/mvhashmap/src/unit_tests/mod.rs b/aptos-move/mvhashmap/src/unit_tests/mod.rs index dc692b1eb48f5..ed7eb66e6c7fa 100644 --- a/aptos-move/mvhashmap/src/unit_tests/mod.rs +++ b/aptos-move/mvhashmap/src/unit_tests/mod.rs @@ -6,6 +6,7 @@ use aptos_aggregator::{ delta_change_set::{delta_add, delta_sub, DeltaOp, DeltaUpdate}, transaction::AggregatorValue, }; +use aptos_types::state_store::state_value::StateValue; mod proptest_types; @@ -23,6 +24,10 @@ impl TransactionWrite for Value { v.resize(16, 0); Some(v) } + + fn as_state_value(&self) -> Option { + unimplemented!() + } } // Generate a Vec deterministically based on txn_idx and incarnation. diff --git a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs index e9c389120e761..c9d4d321d64c5 100644 --- a/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs +++ b/aptos-move/mvhashmap/src/unit_tests/proptest_types.rs @@ -6,7 +6,7 @@ use aptos_aggregator::{ delta_change_set::{delta_add, delta_sub, DeltaOp}, transaction::AggregatorValue, }; -use aptos_types::write_set::TransactionWrite; +use aptos_types::{state_store::state_value::StateValue, write_set::TransactionWrite}; use proptest::{collection::vec, prelude::*, sample::Index, strategy::Strategy}; use std::{ collections::{BTreeMap, HashMap}, @@ -51,6 +51,10 @@ impl> + Clone> TransactionWrite for Value { Some(bytes) } } + + fn as_state_value(&self) -> Option { + unimplemented!() + } } enum Data { diff --git a/aptos-move/vm-genesis/src/genesis_context.rs b/aptos-move/vm-genesis/src/genesis_context.rs index 889466bd4a50f..1392d0a16b602 100644 --- a/aptos-move/vm-genesis/src/genesis_context.rs +++ b/aptos-move/vm-genesis/src/genesis_context.rs @@ -38,7 +38,11 @@ impl TStateView for GenesisStateView { type Key = StateKey; fn get_state_value(&self, state_key: &StateKey) -> Result> { - Ok(self.state_data.get(state_key).cloned().map(StateValue::new)) + Ok(self + .state_data + .get(state_key) + .cloned() + .map(StateValue::new_legacy)) } fn is_genesis(&self) -> bool { diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index 5dcb0a8132325..eb6e0dc39cbe1 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -845,10 +845,9 @@ async fn parse_operations_from_write_set( }, }; - let data = match write_op { - WriteOp::Creation(inner) => inner, - WriteOp::Modification(inner) => inner, - WriteOp::Deletion => return Ok(vec![]), + let data = match write_op.bytes() { + Some(bytes) => bytes, + None => return Ok(vec![]), }; // Determine operation @@ -1158,11 +1157,7 @@ async fn parse_staking_contract_resource_changes( let stake_pools: BTreeMap = changes .iter() .filter_map(|(state_key, write_op)| { - let data = match write_op { - WriteOp::Creation(data) => Some(data), - WriteOp::Modification(data) => Some(data), - WriteOp::Deletion => None, - }; + let data = write_op.bytes(); let mut ret = None; if let (StateKeyInner::AccessPath(path), Some(data)) = (state_key.inner(), data) { diff --git a/execution/executor-types/src/in_memory_state_calculator.rs b/execution/executor-types/src/in_memory_state_calculator.rs index 3abf031a275fa..d897e87c2b7c7 100644 --- a/execution/executor-types/src/in_memory_state_calculator.rs +++ b/execution/executor-types/src/in_memory_state_calculator.rs @@ -17,7 +17,7 @@ use aptos_types::{ state_key::StateKey, state_storage_usage::StateStorageUsage, state_value::StateValue, }, transaction::{Transaction, Version}, - write_set::{WriteOp, WriteSet}, + write_set::{TransactionWrite, WriteOp, WriteSet}, }; use dashmap::DashMap; use once_cell::sync::Lazy; @@ -287,14 +287,7 @@ fn process_state_key_write_op( write_op: WriteOp, ) -> Result<(StateKey, Option)> { let key_size = state_key.size(); - let state_value = match write_op { - WriteOp::Modification(new_value) | WriteOp::Creation(new_value) => { - let value = StateValue::from(new_value); - usage.add_item(key_size + value.size()); - Some(value) - }, - WriteOp::Deletion => None, - }; + let state_value = write_op.as_state_value(); let cached = state_cache.insert(state_key.clone(), state_value.clone()); if let Some(old_value_opt) = cached { if let Some(old_value) = old_value_opt { diff --git a/state-sync/storage-service/server/src/tests.rs b/state-sync/storage-service/server/src/tests.rs index 8353af550fe1c..b37e629f26928 100644 --- a/state-sync/storage-service/server/src/tests.rs +++ b/state-sync/storage-service/server/src/tests.rs @@ -3296,7 +3296,7 @@ fn create_state_keys_and_values( // Create the requested keys and values (0..num_keys_and_values) .map(|_| { - let state_value = StateValue::new(random_bytes.clone()); + let state_value = StateValue::new_legacy(random_bytes.clone()); (StateKey::raw(vec![]), state_value) }) .collect() diff --git a/storage/aptosdb/src/fake_aptosdb.rs b/storage/aptosdb/src/fake_aptosdb.rs index d79b48aa97199..7333ddd74086f 100644 --- a/storage/aptosdb/src/fake_aptosdb.rs +++ b/storage/aptosdb/src/fake_aptosdb.rs @@ -711,7 +711,7 @@ impl DbReader for FakeAptosDB { EventHandle::new(EventKey::new(1, account_address), 0), ); let bytes = bcs::to_bytes(&account)?; - Ok(Some(StateValue::new(bytes))) + Ok(Some(StateValue::new_legacy(bytes))) } else { self.inner.get_state_value_by_version(state_key, version) } diff --git a/storage/indexer/src/lib.rs b/storage/indexer/src/lib.rs index 9d5f074a5ea5f..ca2b21e2af12b 100644 --- a/storage/indexer/src/lib.rs +++ b/storage/indexer/src/lib.rs @@ -159,8 +159,8 @@ impl<'a> TableInfoParser<'a> { } pub fn parse_write_op(&mut self, state_key: &'a StateKey, write_op: &'a WriteOp) -> Result<()> { - match write_op { - WriteOp::Modification(bytes) | WriteOp::Creation(bytes) => match state_key.inner() { + if let Some(bytes) = write_op.bytes() { + match state_key.inner() { StateKeyInner::AccessPath(access_path) => { let path: Path = (&access_path.path).try_into()?; match path { @@ -171,8 +171,7 @@ impl<'a> TableInfoParser<'a> { }, StateKeyInner::TableItem { handle, .. } => self.parse_table_item(*handle, bytes)?, StateKeyInner::Raw(_) => (), - }, - WriteOp::Deletion => (), + } } Ok(()) } diff --git a/storage/scratchpad/benches/sparse_merkle.rs b/storage/scratchpad/benches/sparse_merkle.rs index fc6cd2c2be34b..a20559a400cbd 100644 --- a/storage/scratchpad/benches/sparse_merkle.rs +++ b/storage/scratchpad/benches/sparse_merkle.rs @@ -181,7 +181,7 @@ impl Benches { None } else { let bytes: Vec = rng.sample_iter::(Standard).take(100).collect(); - Some(StateValue::new(bytes)) + Some(StateValue::new_legacy(bytes)) } } diff --git a/types/src/proptest_types.rs b/types/src/proptest_types.rs index a15fb73e2ff2d..bc90f4e591659 100644 --- a/types/src/proptest_types.rs +++ b/types/src/proptest_types.rs @@ -788,7 +788,10 @@ impl TransactionToCommitGen { .map(move |(key, value)| { let state_key = StateKey::access_path(AccessPath::new(address, key)); ( - (state_key.clone(), Some(StateValue::from(value.clone()))), + ( + state_key.clone(), + Some(StateValue::new_legacy(value.clone())), + ), (state_key, WriteOp::Modification(value)), ) }) diff --git a/types/src/state_store/state_value.rs b/types/src/state_store/state_value.rs index ddb7be819b325..d2f4e48f593eb 100644 --- a/types/src/state_store/state_value.rs +++ b/types/src/state_store/state_value.rs @@ -9,10 +9,32 @@ use aptos_crypto::{ HashValue, }; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; +use move_core_types::account_address::AccountAddress; #[cfg(any(test, feature = "fuzzing"))] use proptest::{arbitrary::Arbitrary, prelude::*}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; +#[derive( + BCSCryptoHash, + Clone, + CryptoHasher, + Debug, + Deserialize, + Eq, + PartialEq, + Serialize, + Ord, + PartialOrd, + Hash, +)] +pub enum StateValueMetadata { + V0 { + payer: AccountAddress, + deposit: u128, + creation_time_usecs: u64, + }, +} + #[derive(Clone, Debug, CryptoHasher, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct StateValue { inner: StateValueInner, @@ -35,6 +57,11 @@ pub struct StateValue { #[serde(rename = "StateValue")] pub enum StateValueInner { V0(#[serde(with = "serde_bytes")] Vec), + WithMetadata { + #[serde(with = "serde_bytes")] + data: Vec, + metadata: StateValueMetadata, + }, } #[cfg(any(test, feature = "fuzzing"))] @@ -43,7 +70,7 @@ impl Arbitrary for StateValue { type Strategy = BoxedStrategy; fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { - any::>().prop_map(StateValue::new).boxed() + any::>().prop_map(StateValue::new_legacy).boxed() } } @@ -68,34 +95,40 @@ impl Serialize for StateValue { } impl StateValue { - pub fn new(bytes: Vec) -> Self { - let inner = StateValueInner::V0(bytes); + pub fn new_legacy(bytes: Vec) -> Self { + Self::new_impl(StateValueInner::V0(bytes)) + } + + pub fn new_with_metadata(data: Vec, metadata: StateValueMetadata) -> Self { + Self::new_impl(StateValueInner::WithMetadata { data, metadata }) + } + + fn new_impl(inner: StateValueInner) -> Self { let hash = CryptoHash::hash(&inner); Self { inner, hash } } pub fn size(&self) -> usize { - match &self.inner { - StateValueInner::V0(bytes) => bytes.len(), - } + self.bytes().len() } pub fn bytes(&self) -> &[u8] { match &self.inner { - StateValueInner::V0(bytes) => bytes, + StateValueInner::V0(data) | StateValueInner::WithMetadata { data, .. } => data, } } pub fn into_bytes(self) -> Vec { match self.inner { - StateValueInner::V0(bytes) => bytes, + StateValueInner::V0(data) | StateValueInner::WithMetadata { data, .. } => data, } } } +#[cfg(any(test, feature = "fuzzing"))] impl From> for StateValue { fn from(bytes: Vec) -> Self { - StateValue::new(bytes) + StateValue::new_legacy(bytes) } } diff --git a/types/src/write_set.rs b/types/src/write_set.rs index 937998f7d74a7..9678fee17dd46 100644 --- a/types/src/write_set.rs +++ b/types/src/write_set.rs @@ -4,7 +4,10 @@ //! For each transaction the VM executes, the VM will output a `WriteSet` that contains each access //! path it updates. For each access path, the VM can either give its new value or delete it. -use crate::state_store::state_key::StateKey; +use crate::state_store::{ + state_key::StateKey, + state_value::{StateValue, StateValueMetadata}, +}; use anyhow::{bail, Result}; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; use serde::{Deserialize, Serialize}; @@ -18,42 +21,161 @@ pub enum WriteOp { Creation(#[serde(with = "serde_bytes")] Vec), Modification(#[serde(with = "serde_bytes")] Vec), Deletion, + CreationWithMetadata { + #[serde(with = "serde_bytes")] + data: Vec, + metadata: StateValueMetadata, + }, + ModificationWithMetadata { + #[serde(with = "serde_bytes")] + data: Vec, + metadata: StateValueMetadata, + }, + DeletionWithMetadata { + metadata: StateValueMetadata, + }, } impl WriteOp { #[inline] pub fn is_deletion(&self) -> bool { match self { - WriteOp::Deletion => true, - WriteOp::Modification(_) | WriteOp::Creation(_) => false, + WriteOp::Deletion | WriteOp::DeletionWithMetadata { .. } => true, + WriteOp::Modification(_) + | WriteOp::ModificationWithMetadata { .. } + | WriteOp::Creation(_) + | WriteOp::CreationWithMetadata { .. } => false, } } pub fn is_creation(&self) -> bool { match self { - WriteOp::Creation(_) => true, - WriteOp::Modification(_) | WriteOp::Deletion => false, + WriteOp::Creation(_) | WriteOp::CreationWithMetadata { .. } => true, + WriteOp::Modification(_) + | WriteOp::ModificationWithMetadata { .. } + | WriteOp::Deletion + | WriteOp::DeletionWithMetadata { .. } => false, } } pub fn is_modification(&self) -> bool { match self { - WriteOp::Modification(_) => true, - WriteOp::Creation(_) | WriteOp::Deletion => false, + WriteOp::Modification(_) | WriteOp::ModificationWithMetadata { .. } => true, + WriteOp::Creation(_) + | WriteOp::CreationWithMetadata { .. } + | WriteOp::Deletion + | WriteOp::DeletionWithMetadata { .. } => false, + } + } + + pub fn squash(op: &mut Self, other: Self) -> Result { + use WriteOp::*; + + // n.b. With write sets from multiple sessions being squashed together, it's possible + // to see two ops carrying different metadata (or one with it the other without) + // due to deleting in one session and recreating in another. The original metadata + // shouldn't change due to the squash. + // And because the deposit or refund happens after all squashing is finished, it's + // not a concern of fairness. + + match (&op, other) { + ( + Modification(_) + | ModificationWithMetadata { .. } + | Creation(_) + | CreationWithMetadata { .. }, + Creation(_) | CreationWithMetadata {..}, + ) // create existing + | ( + Deletion | DeletionWithMetadata {..}, + Deletion | DeletionWithMetadata {..} | Modification(_) | ModificationWithMetadata { .. }, + ) // delete or modify already deleted + => { + bail!( + "The given change sets cannot be squashed", + ) + }, + (Modification(_), Modification(data) | ModificationWithMetadata {data, ..}) => *op = Modification(data), + (ModificationWithMetadata{metadata, ..}, Modification(data) | ModificationWithMetadata{data, ..}) => { + *op = ModificationWithMetadata{data, metadata: metadata.clone()} + }, + (Creation(_), Modification(data) | ModificationWithMetadata {data, ..} ) => { + *op = Creation(data) + }, + (CreationWithMetadata{metadata , ..}, Modification(data) | ModificationWithMetadata{data, ..}) => { + *op = CreationWithMetadata{data, metadata: metadata.clone()} + }, + (Modification(_) , Deletion | DeletionWithMetadata {..}) => { + *op = Deletion + }, + (ModificationWithMetadata{metadata, ..} , Deletion | DeletionWithMetadata {..}) => { + *op = DeletionWithMetadata {metadata: metadata.clone()} + }, + (Deletion, Creation(data) | CreationWithMetadata {data, ..}) => { + *op = Modification(data) + }, + (DeletionWithMetadata {metadata, ..}, Creation(data)| CreationWithMetadata {data, ..}) => { + *op = ModificationWithMetadata{data, metadata: metadata.clone()} + }, + (Creation(_) | CreationWithMetadata {..}, Deletion | DeletionWithMetadata {..}) => { + return Ok(false) + }, + } + Ok(true) + } + + pub fn into_bytes(self) -> Option> { + use WriteOp::*; + + match self { + Creation(data) + | CreationWithMetadata { data, .. } + | Modification(data) + | ModificationWithMetadata { data, .. } => Some(data), + Deletion | DeletionWithMetadata { .. } => None, + } + } + + pub fn bytes(&self) -> Option<&[u8]> { + use WriteOp::*; + + match self { + Creation(data) + | CreationWithMetadata { data, .. } + | Modification(data) + | ModificationWithMetadata { data, .. } => Some(data), + Deletion | DeletionWithMetadata { .. } => None, + } + } + + pub fn metadata(&self) -> Option<&StateValueMetadata> { + use WriteOp::*; + + match self { + Creation(_) | Modification(_) | Deletion => None, + CreationWithMetadata { metadata, .. } + | ModificationWithMetadata { metadata, .. } + | DeletionWithMetadata { metadata, .. } => Some(metadata), } } } pub trait TransactionWrite { fn extract_raw_bytes(&self) -> Option>; + + fn as_state_value(&self) -> Option; } impl TransactionWrite for WriteOp { fn extract_raw_bytes(&self) -> Option> { - match self { - WriteOp::Creation(v) | WriteOp::Modification(v) => Some(v.clone()), - WriteOp::Deletion => None, - } + self.clone().into_bytes() + } + + fn as_state_value(&self) -> Option { + self.bytes().map(|bytes| match self.metadata() { + None => StateValue::new_legacy(bytes.to_vec()), + Some(metadata) => StateValue::new_with_metadata(bytes.to_vec(), metadata.clone()), + }) } } @@ -77,6 +199,25 @@ impl std::fmt::Debug for WriteOp { .collect::() ), WriteOp::Deletion => write!(f, "Deletion"), + WriteOp::CreationWithMetadata { data, metadata } => write!( + f, + "CreationWithMetadata({}, metadata:{:?})", + data.iter() + .map(|byte| format!("{:02x}", byte)) + .collect::(), + metadata, + ), + WriteOp::ModificationWithMetadata { data, metadata } => write!( + f, + "ModificationWithMetadata({}, metadata:{:?})", + data.iter() + .map(|byte| format!("{:02x}", byte)) + .collect::(), + metadata, + ), + WriteOp::DeletionWithMetadata { metadata } => { + write!(f, "DeletionWithMetadata(metadata:{:?})", metadata,) + }, } } } @@ -179,24 +320,12 @@ impl WriteSetMut { pub fn squash(mut self, other: Self) -> Result { use btree_map::Entry::*; - use WriteOp::*; for (key, op) in other.write_set.into_iter() { match self.write_set.entry(key) { Occupied(mut entry) => { - let r = entry.get_mut(); - match (&r, op) { - (Modification(_) | Creation(_), Creation(_)) - | (Deletion, Deletion | Modification(_)) => { - bail!("The given change sets cannot be squashed") - }, - (Modification(_), Modification(data)) => *r = Modification(data), - (Creation(_), Modification(data)) => *r = Creation(data), - (Modification(_), Deletion) => *r = Deletion, - (Deletion, Creation(data)) => *r = Modification(data), - (Creation(_), Deletion) => { - entry.remove(); - }, + if !WriteOp::squash(entry.get_mut(), op)? { + entry.remove(); } }, Vacant(entry) => { From 4bf3ee404d951f940957c6c5cf2c2ae7d0c59681 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Mon, 6 Feb 2023 21:28:01 +0000 Subject: [PATCH 06/14] refactor: squash SessionOutput::into_change_set() into SessionExt::finish() SessionOutput is removed entirely, SessionExt::finish() directly returns ChangeSetExt --- aptos-move/aptos-debugger/src/lib.rs | 11 +- aptos-move/aptos-vm/src/aptos_vm.rs | 32 ++-- aptos-move/aptos-vm/src/aptos_vm_impl.rs | 5 +- aptos-move/aptos-vm/src/move_vm_ext/mod.rs | 2 +- .../aptos-vm/src/move_vm_ext/session.rs | 167 +++++++----------- aptos-move/e2e-tests/src/executor.rs | 22 ++- aptos-move/vm-genesis/src/lib.rs | 35 ++-- .../src/writeset_builder.rs | 16 +- 8 files changed, 117 insertions(+), 173 deletions(-) diff --git a/aptos-move/aptos-debugger/src/lib.rs b/aptos-move/aptos-debugger/src/lib.rs index 28d9b8ee7007f..fce641eb4be6e 100644 --- a/aptos-move/aptos-debugger/src/lib.rs +++ b/aptos-move/aptos-debugger/src/lib.rs @@ -187,15 +187,14 @@ impl AptosDebugger { .unwrap(); let mut session = move_vm.new_session(&state_view_storage, SessionId::Void); f(&mut session).map_err(|err| format_err!("Unexpected VM Error: {:?}", err))?; - session - .finish() - .map_err(|err| format_err!("Unexpected VM Error: {:?}", err))? - .into_change_set( + let change_set_ext = session + .finish( &mut (), &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), ) - .map_err(|err| format_err!("Unexpected VM Error: {:?}", err)) - .map(|res| res.into_inner().1) + .map_err(|err| format_err!("Unexpected VM Error: {:?}", err))?; + let (_delta_change_set, change_set) = change_set_ext.into_inner(); + Ok(change_set) } } diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index ebc6dd13273e0..1a3adcd5cf666 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -289,9 +289,8 @@ impl AptosVM { .run_success_epilogue(&mut session, gas_meter.balance(), txn_data, log_context)?; let epilogue_change_set_ext = session - .finish() - .map_err(|e| e.into_vm_status())? - .into_change_set(&mut (), gas_meter.change_set_configs())?; + .finish(&mut (), gas_meter.change_set_configs()) + .map_err(|e| e.into_vm_status())?; let change_set_ext = user_txn_change_set_ext .squash(epilogue_change_set_ext) .map_err(|_err| VMStatus::Error(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR))?; @@ -396,9 +395,9 @@ impl AptosVM { new_published_modules_loaded, )?; - let session_output = session.finish().map_err(|e| e.into_vm_status())?; - let change_set_ext = - session_output.into_change_set(&mut (), gas_meter.change_set_configs())?; + let change_set_ext = session + .finish(&mut (), gas_meter.change_set_configs()) + .map_err(|e| e.into_vm_status())?; // Charge gas for write set gas_meter.charge_write_set_gas(change_set_ext.write_set().iter())?; @@ -565,9 +564,9 @@ impl AptosVM { new_published_modules_loaded, )?; - let session_output = session.finish().map_err(|e| e.into_vm_status())?; - let change_set_ext = - session_output.into_change_set(&mut (), gas_meter.change_set_configs())?; + let change_set_ext = session + .finish(&mut (), gas_meter.change_set_configs()) + .map_err(|e| e.into_vm_status())?; // Charge gas for write set gas_meter.charge_write_set_gas(change_set_ext.write_set().iter())?; @@ -836,24 +835,15 @@ impl AptosVM { ) .map_err(Err)?; - let execution_result = tmp_session + tmp_session .execute_script( script.code(), script.ty_args().to_vec(), args, &mut gas_meter, ) - .and_then(|_| tmp_session.finish()) - .map_err(|e| e.into_vm_status()); - - match execution_result { - Ok(session_out) => session_out - .into_change_set(&mut (), &change_set_configs) - .map_err(Err)?, - Err(e) => { - return Err(Ok((e, discard_error_output(StatusCode::INVALID_WRITE_SET)))); - }, - } + .and_then(|_| tmp_session.finish(&mut (), &change_set_configs)) + .map_err(|e| Err(e.into_vm_status()))? }, }) } diff --git a/aptos-move/aptos-vm/src/aptos_vm_impl.rs b/aptos-move/aptos-vm/src/aptos_vm_impl.rs index 87d1422b065c0..94de429c19998 100644 --- a/aptos-move/aptos-vm/src/aptos_vm_impl.rs +++ b/aptos-move/aptos-vm/src/aptos_vm_impl.rs @@ -588,8 +588,9 @@ pub(crate) fn get_transaction_output( .checked_sub(gas_left) .expect("Balance should always be less than or equal to max gas amount"); - let session_out = session.finish().map_err(|e| e.into_vm_status())?; - let change_set_ext = session_out.into_change_set(ap_cache, change_set_configs)?; + let change_set_ext = session + .finish(ap_cache, change_set_configs) + .map_err(|e| e.into_vm_status())?; let (delta_change_set, change_set) = change_set_ext.into_inner(); let (write_set, events) = change_set.into_inner(); diff --git a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs index 0b17da2363117..53ec00b6980bc 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/mod.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/mod.rs @@ -9,6 +9,6 @@ mod vm; pub use crate::move_vm_ext::{ resolver::MoveResolverExt, - session::{SessionExt, SessionId, SessionOutput}, + session::{SessionExt, SessionId}, vm::{verifier_config, MoveVmExt}, }; diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session.rs b/aptos-move/aptos-vm/src/move_vm_ext/session.rs index f704f3da7be1d..bcbfd7fdeefe4 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session.rs @@ -33,7 +33,7 @@ use move_core_types::{ language_storage::{ModuleId, StructTag}, vm_status::{StatusCode, VMStatus}, }; -use move_table_extension::{NativeTableContext, TableChange, TableChangeSet}; +use move_table_extension::{NativeTableContext, TableChangeSet}; use move_vm_runtime::{move_vm::MoveVM, session::Session}; use serde::{Deserialize, Serialize}; use std::{ @@ -100,7 +100,7 @@ pub struct SessionExt<'r, 'l, S> { impl<'r, 'l, S> SessionExt<'r, 'l, S> where - S: MoveResolverExt, + S: MoveResolverExt + 'r, { pub fn new(inner: Session<'r, 'l, S>, move_vm: &'l MoveVM, remote: &'r S) -> Self { Self { @@ -109,7 +109,11 @@ where } } - pub fn finish(self) -> VMResult { + pub fn finish( + self, + ap_cache: &mut C, + configs: &ChangeSetConfigs, + ) -> VMResult { let (change_set, events, mut extensions) = self.inner.finish_with_extensions()?; let (change_set, resource_group_change_set) = Self::split_and_merge_resource_groups(&self.remote, change_set)?; @@ -122,13 +126,16 @@ where let aggregator_context: NativeAggregatorContext = extensions.remove(); let aggregator_change_set = aggregator_context.into_change_set(); - Ok(SessionOutput { + Self::convert_change_set( change_set, resource_group_change_set, events, table_change_set, aggregator_change_set, - }) + ap_cache, + configs, + ) + .map_err(|status| PartialVMError::new(status.status_code()).finish(Location::Undefined)) } pub fn extract_publish_request(&mut self) -> Option { @@ -245,100 +252,52 @@ where Ok((change_set_filtered, resource_group_change_set)) } -} - -impl<'r, 'l, S> Deref for SessionExt<'r, 'l, S> { - type Target = Session<'r, 'l, S>; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} -impl<'r, 'l, S> DerefMut for SessionExt<'r, 'l, S> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } -} - -pub struct SessionOutput { - pub change_set: MoveChangeSet, - pub resource_group_change_set: MoveChangeSet, - pub events: Vec, - pub table_change_set: TableChangeSet, - pub aggregator_change_set: AggregatorChangeSet, -} - -// TODO: Move this into the Move repo. -fn squash_table_change_sets( - base: &mut TableChangeSet, - other: TableChangeSet, -) -> Result<(), VMStatus> { - base.new_tables.extend(other.new_tables); - for removed_table in &base.removed_tables { - base.new_tables.remove(removed_table); - } - // There's chance that a table is added in `self`, and an item is added to that table in - // `self`, and later the item is deleted in `other`, netting to a NOOP for that item, - // but this is an tricky edge case that we don't expect to happen too much, it doesn't hurt - // too much to just keep the deletion. It's safe as long as we do it that way consistently. - base.removed_tables.extend(other.removed_tables.into_iter()); - for (handle, changes) in other.changes.into_iter() { - let my_changes = base.changes.entry(handle).or_insert(TableChange { - entries: Default::default(), - }); - my_changes.entries.extend(changes.entries.into_iter()); - } - Ok(()) -} - -impl SessionOutput { - pub fn into_change_set( - self, + pub fn convert_change_set( + change_set: MoveChangeSet, + resource_group_change_set: MoveChangeSet, + events: Vec, + table_change_set: TableChangeSet, + aggregator_change_set: AggregatorChangeSet, ap_cache: &mut C, configs: &ChangeSetConfigs, ) -> Result { - let Self { - change_set, - resource_group_change_set, - events, - table_change_set, - aggregator_change_set, - } = self; - let mut write_set_mut = WriteSetMut::new(Vec::new()); let mut delta_change_set = DeltaChangeSet::empty(); for (addr, account_changeset) in change_set.into_inner() { let (modules, resources) = account_changeset.into_inner(); for (struct_tag, blob_op) in resources { - let ap = ap_cache.get_resource_path(addr, struct_tag); - let op = - convert_write_op(blob_op, configs.legacy_resource_creation_as_modification()); - write_set_mut.insert((StateKey::access_path(ap), op)) + let state_key = StateKey::access_path(ap_cache.get_resource_path(addr, struct_tag)); + let op = Self::convert_write_op( + blob_op, + configs.legacy_resource_creation_as_modification(), + ); + write_set_mut.insert((state_key, op)) } for (name, blob_op) in modules { - let ap = ap_cache.get_module_path(ModuleId::new(addr, name)); - let op = convert_write_op(blob_op, false); - - write_set_mut.insert((StateKey::access_path(ap), op)) + let state_key = + StateKey::access_path(ap_cache.get_module_path(ModuleId::new(addr, name))); + let op = Self::convert_write_op(blob_op, false); + write_set_mut.insert((state_key, op)) } } for (addr, account_changeset) in resource_group_change_set.into_inner() { let (_, resources) = account_changeset.into_inner(); for (struct_tag, blob_op) in resources { - let ap = ap_cache.get_resource_group_path(addr, struct_tag); - let op = convert_write_op(blob_op, false); - write_set_mut.insert((StateKey::access_path(ap), op)) + let state_key = + StateKey::access_path(ap_cache.get_resource_group_path(addr, struct_tag)); + let op = Self::convert_write_op(blob_op, false); + write_set_mut.insert((state_key, op)) } } for (handle, change) in table_change_set.changes { for (key, value_op) in change.entries { let state_key = StateKey::table_item(handle.into(), key); - let op = convert_write_op(value_op, false); + let op = Self::convert_write_op(value_op, false); write_set_mut.insert((state_key, op)) } } @@ -382,43 +341,37 @@ impl SessionOutput { )) } - pub fn squash(&mut self, other: Self) -> Result<(), VMStatus> { - self.change_set - .squash(other.change_set) - .map_err(|_| VMStatus::Error(StatusCode::DATA_FORMAT_ERROR))?; - self.events.extend(other.events.into_iter()); - - // Squash the table changes. - squash_table_change_sets(&mut self.table_change_set, other.table_change_set)?; - - // Squash aggregator changes. - self.aggregator_change_set - .squash(other.aggregator_change_set)?; + fn convert_write_op( + move_storage_op: MoveStorageOp>, + creation_as_modification: bool, + ) -> WriteOp { + use MoveStorageOp::*; + use WriteOp::*; + + match move_storage_op { + Delete => Deletion, + New(blob) => { + if creation_as_modification { + Modification(blob) + } else { + Creation(blob) + } + }, + Modify(blob) => Modification(blob), + } + } +} - self.resource_group_change_set - .squash(other.resource_group_change_set) - .map_err(|_| VMStatus::Error(StatusCode::DATA_FORMAT_ERROR))?; +impl<'r, 'l, S> Deref for SessionExt<'r, 'l, S> { + type Target = Session<'r, 'l, S>; - Ok(()) + fn deref(&self) -> &Self::Target { + &self.inner } } -fn convert_write_op( - move_storage_op: MoveStorageOp>, - creation_as_modification: bool, -) -> WriteOp { - use MoveStorageOp::*; - use WriteOp::*; - - match move_storage_op { - Delete => Deletion, - New(blob) => { - if creation_as_modification { - Modification(blob) - } else { - Creation(blob) - } - }, - Modify(blob) => Modification(blob), +impl<'r, 'l, S> DerefMut for SessionExt<'r, 'l, S> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner } } diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index 9b232d57f2b33..be094f977f057 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -601,15 +601,13 @@ impl FakeExecutor { e.into_vm_status() ) }); - let session_out = session.finish().expect("Failed to generate txn effects"); - // TODO: Support deltas in fake executor. - let (_, change_set) = session_out - .into_change_set( + let change_set_ext = session + .finish( &mut (), &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), ) - .expect("Failed to generate writeset") - .into_inner(); + .expect("Failed to generate txn effects"); + let (_delta_change_set, change_set) = change_set_ext.into_inner(); let (write_set, _events) = change_set.into_inner(); write_set }; @@ -643,15 +641,15 @@ impl FakeExecutor { &mut UnmeteredGasMeter, ) .map_err(|e| e.into_vm_status())?; - let session_out = session.finish().expect("Failed to generate txn effects"); - // TODO: Support deltas in fake executor. - let (_, change_set) = session_out - .into_change_set( + + let change_set_ext = session + .finish( &mut (), &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), ) - .expect("Failed to generate writeset") - .into_inner(); + .expect("Failed to generate txn effects"); + // TODO: Support deltas in fake executor. + let (_delta_change_set, change_set) = change_set_ext.into_inner(); let (writeset, _events) = change_set.into_inner(); Ok(writeset) } diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index 1d33380e20e1f..30d6c684d6f05 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -134,7 +134,12 @@ pub fn encode_aptos_mainnet_genesis_transaction( // Reconfiguration should happen after all on-chain invocations. emit_new_block_and_epoch_event(&mut session); - let mut session1_out = session.finish().unwrap(); + let cs1 = session + .finish( + &mut (), + &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), + ) + .unwrap(); // Publish the framework, using a different session id, in case both scripts creates tables let state_view = GenesisStateView::new(); @@ -145,16 +150,14 @@ pub fn encode_aptos_mainnet_genesis_transaction( let id2 = HashValue::new(id2_arr); let mut session = move_vm.new_session(&data_cache, SessionId::genesis(id2)); publish_framework(&mut session, framework); - let session2_out = session.finish().unwrap(); - - session1_out.squash(session2_out).unwrap(); - - let change_set_ext = session1_out - .into_change_set( + let cs2 = session + .finish( &mut (), &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), ) .unwrap(); + let change_set_ext = cs1.squash(cs2).unwrap(); + let (delta_change_set, change_set) = change_set_ext.into_inner(); // Publishing stdlib should not produce any deltas around aggregators and map to write ops and @@ -245,7 +248,12 @@ pub fn encode_genesis_change_set( // Reconfiguration should happen after all on-chain invocations. emit_new_block_and_epoch_event(&mut session); - let mut session1_out = session.finish().unwrap(); + let cs1 = session + .finish( + &mut (), + &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), + ) + .unwrap(); let state_view = GenesisStateView::new(); let data_cache = state_view.as_move_resolver(); @@ -256,16 +264,15 @@ pub fn encode_genesis_change_set( let id2 = HashValue::new(id2_arr); let mut session = move_vm.new_session(&data_cache, SessionId::genesis(id2)); publish_framework(&mut session, framework); - let session2_out = session.finish().unwrap(); - - session1_out.squash(session2_out).unwrap(); - - let change_set_ext = session1_out - .into_change_set( + let cs2 = session + .finish( &mut (), &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), ) .unwrap(); + + let change_set_ext = cs1.squash(cs2).unwrap(); + let (delta_change_set, change_set) = change_set_ext.into_inner(); // Publishing stdlib should not produce any deltas around aggregators and map to write ops and diff --git a/aptos-move/writeset-transaction-generator/src/writeset_builder.rs b/aptos-move/writeset-transaction-generator/src/writeset_builder.rs index 29aec0eb5b906..225e93f7ebfe4 100644 --- a/aptos-move/writeset-transaction-generator/src/writeset_builder.rs +++ b/aptos-move/writeset-transaction-generator/src/writeset_builder.rs @@ -119,7 +119,7 @@ where ) .unwrap(); let state_view_storage = StorageAdapter::new(state_view); - let session_out = { + let change_set_ext = { // TODO: specify an id by human and pass that in. let genesis_id = HashValue::zero(); let mut session = GenesisSession( @@ -130,19 +130,15 @@ where session.enable_reconfiguration(); session .0 - .finish() + .finish( + &mut (), + &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), + ) .map_err(|err| format_err!("Unexpected VM Error: {:?}", err)) .unwrap() }; // Genesis never produces the delta change set. - let (_, change_set) = session_out - .into_change_set( - &mut (), - &ChangeSetConfigs::unlimited_at_gas_feature_version(LATEST_GAS_FEATURE_VERSION), - ) - .map_err(|err| format_err!("Unexpected VM Error: {:?}", err)) - .unwrap() - .into_inner(); + let (_delta_change_set, change_set) = change_set_ext.into_inner(); change_set } From 2fd4dfab3e09479a895af9d0044650c01014ff80 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Wed, 8 Feb 2023 20:05:13 +0000 Subject: [PATCH 07/14] Make MoveResolverExt a StateView and ConfigStorage So that in the VM adapter and session it's possible to access on-chain configs and state slot metadata --- aptos-move/aptos-vm/src/data_cache.rs | 17 ++++++++++++++++- aptos-move/aptos-vm/src/move_vm_ext/resolver.rs | 4 +++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/aptos-move/aptos-vm/src/data_cache.rs b/aptos-move/aptos-vm/src/data_cache.rs index 68c7fd1d0977b..29be3b0332eff 100644 --- a/aptos-move/aptos-vm/src/data_cache.rs +++ b/aptos-move/aptos-vm/src/data_cache.rs @@ -91,6 +91,12 @@ impl<'a, 'm, S: MoveResolverExt> TableResolver for MoveResolverWithVMMetadata<'a } } +impl<'a, 'm, S: MoveResolverExt> ConfigStorage for MoveResolverWithVMMetadata<'a, 'm, S> { + fn fetch_config(&self, access_path: AccessPath) -> Option> { + self.move_resolver.fetch_config(access_path) + } +} + impl<'a, 'm, S: MoveResolverExt> StateStorageUsageResolver for MoveResolverWithVMMetadata<'a, 'm, S> { @@ -99,7 +105,15 @@ impl<'a, 'm, S: MoveResolverExt> StateStorageUsageResolver } } -// Adapter to convert a `StateView` into a `RemoteCache`. +impl<'a, 'm, S: MoveResolverExt> Deref for MoveResolverWithVMMetadata<'a, 'm, S> { + type Target = S; + + fn deref(&self) -> &Self::Target { + self.move_resolver + } +} + +/// Adapter to convert a `StateView` into a `MoveResolverExt`. pub struct StorageAdapter<'a, S>(&'a S); impl<'a, S: StateView> StorageAdapter<'a, S> { @@ -202,6 +216,7 @@ impl AsMoveResolver for S { } } +/// Owned version of `StorageAdapter`. pub struct StorageAdapterOwned { state_view: S, } diff --git a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs index 12b4081be86bd..746a2cf535b5c 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/resolver.rs @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_framework::{natives::state_storage::StateStorageUsageResolver, RuntimeModuleMetadataV1}; +use aptos_state_view::StateView; +use aptos_types::on_chain_config::ConfigStorage; use move_binary_format::errors::{Location, PartialVMError, VMError}; use move_core_types::{ account_address::AccountAddress, @@ -13,7 +15,7 @@ use move_table_extension::TableResolver; use std::collections::BTreeMap; pub trait MoveResolverExt: - MoveResolver + TableResolver + StateStorageUsageResolver + MoveResolver + TableResolver + StateStorageUsageResolver + ConfigStorage + StateView { fn get_module_metadata(&self, module_id: ModuleId) -> Option; From ddcd20756d4e3343b6b7000eb7f68c30dc77e242 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Wed, 8 Feb 2023 03:01:07 +0000 Subject: [PATCH 08/14] track storage slot payer --- .../aptos-gas/src/transaction/storage.rs | 2 +- aptos-move/aptos-vm/src/data_cache.rs | 11 +- .../aptos-vm/src/move_vm_ext/session.rs | 220 +++++++++++++++--- aptos-move/aptos-vm/src/move_vm_ext/vm.rs | 8 + types/src/on_chain_config/aptos_features.rs | 4 + types/src/on_chain_config/mod.rs | 2 + types/src/on_chain_config/timestamp.rs | 15 ++ types/src/state_store/state_value.rs | 24 +- 8 files changed, 242 insertions(+), 44 deletions(-) create mode 100644 types/src/on_chain_config/timestamp.rs diff --git a/aptos-move/aptos-gas/src/transaction/storage.rs b/aptos-move/aptos-gas/src/transaction/storage.rs index e98a2c3b82eb3..c8b1bd29660ac 100644 --- a/aptos-move/aptos-gas/src/transaction/storage.rs +++ b/aptos-move/aptos-gas/src/transaction/storage.rs @@ -275,7 +275,7 @@ impl ChangeSetConfigs { // Bug fixed at gas_feature_version 3 where (non-group) resource creation was converted to // modification. // Modules and table items were not affected (https://github.com/aptos-labs/aptos-core/pull/4722/commits/7c5e52297e8d1a6eac67a68a804ab1ca2a0b0f37). - // Resource groups were not affected because they were + // Resource groups and state values with metadata were not affected because they were // introduced later than feature_version 3 on all networks. self.gas_feature_version < 3 } diff --git a/aptos-move/aptos-vm/src/data_cache.rs b/aptos-move/aptos-vm/src/data_cache.rs index 29be3b0332eff..f2bf7b444d073 100644 --- a/aptos-move/aptos-vm/src/data_cache.rs +++ b/aptos-move/aptos-vm/src/data_cache.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 //! Scratchpad for on chain values during the execution. -use crate::move_vm_ext::MoveResolverExt; +use crate::move_vm_ext::{MoveResolverExt, MoveVmExt}; #[allow(unused_imports)] use anyhow::Error; use aptos_framework::{natives::state_storage::StateStorageUsageResolver, RuntimeModuleMetadataV1}; @@ -20,21 +20,24 @@ use move_core_types::{ vm_status::StatusCode, }; use move_table_extension::{TableHandle, TableResolver}; -use move_vm_runtime::move_vm::MoveVM; use std::ops::{Deref, DerefMut}; pub struct MoveResolverWithVMMetadata<'a, 'm, S> { move_resolver: &'a S, - move_vm: &'m MoveVM, + move_vm: &'m MoveVmExt, } impl<'a, 'm, S: MoveResolverExt> MoveResolverWithVMMetadata<'a, 'm, S> { - pub fn new(move_resolver: &'a S, move_vm: &'m MoveVM) -> Self { + pub fn new(move_resolver: &'a S, move_vm: &'m MoveVmExt) -> Self { Self { move_resolver, move_vm, } } + + pub fn vm(&self) -> &MoveVmExt { + self.move_vm + } } impl<'a, 'm, S: MoveResolverExt> MoveResolverExt for MoveResolverWithVMMetadata<'a, 'm, S> { diff --git a/aptos-move/aptos-vm/src/move_vm_ext/session.rs b/aptos-move/aptos-vm/src/move_vm_ext/session.rs index bcbfd7fdeefe4..9ba13bbdcf65d 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/session.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/session.rs @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - access_path_cache::AccessPathCache, data_cache::MoveResolverWithVMMetadata, - move_vm_ext::MoveResolverExt, transaction_metadata::TransactionMetadata, + access_path_cache::AccessPathCache, + data_cache::MoveResolverWithVMMetadata, + move_vm_ext::{MoveResolverExt, MoveVmExt}, + transaction_metadata::TransactionMetadata, }; use aptos_aggregator::{ aggregator_extension::AggregatorID, @@ -17,10 +19,12 @@ use aptos_framework::natives::{ code::{NativeCodeContext, PublishRequest}, }; use aptos_gas::ChangeSetConfigs; +use aptos_logger::error; use aptos_types::{ block_metadata::BlockMetadata, contract_event::ContractEvent, - state_store::{state_key::StateKey, table::TableHandle}, + on_chain_config::{CurrentTimeMicroseconds, OnChainConfig}, + state_store::{state_key::StateKey, state_value::StateValueMetadata, table::TableHandle}, transaction::{ChangeSet, SignatureCheckedTransaction}, write_set::{WriteOp, WriteSetMut}, }; @@ -30,11 +34,11 @@ use move_core_types::{ effects::{ AccountChangeSet, ChangeSet as MoveChangeSet, Event as MoveEvent, Op as MoveStorageOp, }, - language_storage::{ModuleId, StructTag}, + language_storage::{ModuleId, StructTag, CORE_CODE_ADDRESS}, vm_status::{StatusCode, VMStatus}, }; use move_table_extension::{NativeTableContext, TableChangeSet}; -use move_vm_runtime::{move_vm::MoveVM, session::Session}; +use move_vm_runtime::session::Session; use serde::{Deserialize, Serialize}; use std::{ collections::BTreeMap, @@ -91,21 +95,35 @@ impl SessionId { pub fn as_uuid(&self) -> HashValue { self.hash() } + + pub fn sender(&self) -> Option { + match self { + SessionId::Txn { sender, .. } => Some(*sender), + SessionId::BlockMeta { .. } | SessionId::Genesis { .. } | SessionId::Void => None, + } + } } pub struct SessionExt<'r, 'l, S> { inner: Session<'r, 'l, S>, remote: MoveResolverWithVMMetadata<'r, 'l, S>, + new_slot_payer: Option, } impl<'r, 'l, S> SessionExt<'r, 'l, S> where S: MoveResolverExt + 'r, { - pub fn new(inner: Session<'r, 'l, S>, move_vm: &'l MoveVM, remote: &'r S) -> Self { + pub fn new( + inner: Session<'r, 'l, S>, + move_vm: &'l MoveVmExt, + remote: &'r S, + new_slot_payer: Option, + ) -> Self { Self { inner, remote: MoveResolverWithVMMetadata::new(remote, move_vm), + new_slot_payer, } } @@ -114,9 +132,21 @@ where ap_cache: &mut C, configs: &ChangeSetConfigs, ) -> VMResult { + let (change_set_ext, _current_timestamp) = + self.finish_with_current_timestamp(ap_cache, configs)?; + + Ok(change_set_ext) + } + + pub fn finish_with_current_timestamp( + self, + ap_cache: &mut C, + configs: &ChangeSetConfigs, + ) -> VMResult<(ChangeSetExt, Option)> { let (change_set, events, mut extensions) = self.inner.finish_with_extensions()?; - let (change_set, resource_group_change_set) = + let (change_set, resource_group_change_set, updated_timestamp) = Self::split_and_merge_resource_groups(&self.remote, change_set)?; + let current_time = Self::get_current_timestamp(updated_timestamp, &self.remote); let table_context: NativeTableContext = extensions.remove(); let table_change_set = table_context @@ -126,7 +156,10 @@ where let aggregator_context: NativeAggregatorContext = extensions.remove(); let aggregator_change_set = aggregator_context.into_change_set(); - Self::convert_change_set( + let change_set_ext = Self::convert_change_set( + &self.remote, + self.new_slot_payer, + current_time.as_ref(), change_set, resource_group_change_set, events, @@ -135,7 +168,9 @@ where ap_cache, configs, ) - .map_err(|status| PartialVMError::new(status.status_code()).finish(Location::Undefined)) + .map_err(|status| PartialVMError::new(status.status_code()).finish(Location::Undefined))?; + + Ok((change_set_ext, current_time)) } pub fn extract_publish_request(&mut self) -> Option { @@ -164,13 +199,21 @@ where fn split_and_merge_resource_groups( remote: &MoveResolverWithVMMetadata, change_set: MoveChangeSet, - ) -> VMResult<(MoveChangeSet, MoveChangeSet)> { + ) -> VMResult<( + MoveChangeSet, + MoveChangeSet, + Result, ()>, + )> { // The use of this implies that we could theoretically call unwrap with no consequences, // but using unwrap means the code panics if someone can come up with an attack. let common_error = PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .finish(Location::Undefined); let mut change_set_filtered = MoveChangeSet::new(); let mut resource_group_change_set = MoveChangeSet::new(); + // Assuming the timestamp has not been updated in this session by returning Ok(None), which + // results in the call site to assume the timestamp to be the same as in the base state + // view + let mut updated_timestamp: Result, ()> = Ok(None); for (addr, account_changeset) in change_set.into_inner() { let mut resource_groups: BTreeMap = BTreeMap::new(); @@ -187,6 +230,12 @@ where .add_resource_op(struct_tag, blob_op) .map_err(|_| common_error.clone())?; } else { + if addr == CORE_CODE_ADDRESS + && CurrentTimeMicroseconds::struct_tag() == struct_tag + { + updated_timestamp = Self::parse_updated_timestamp(&blob_op) + } + change_set_filtered .add_resource_op(addr, struct_tag, blob_op) .map_err(|_| common_error.clone())?; @@ -250,10 +299,52 @@ where } } - Ok((change_set_filtered, resource_group_change_set)) + Ok(( + change_set_filtered, + resource_group_change_set, + updated_timestamp, + )) + } + + // Seen the global timestamp resource being updated. + // returns Ok(Some(timestamp)) if successfully parsed it + // returns Err(()) if failed to parse or seeing a delete, which will result in the call site + // deem the timestamp to be None + fn parse_updated_timestamp( + op: &MoveStorageOp>, + ) -> Result, ()> { + match op { + MoveStorageOp::New(bytes) | MoveStorageOp::Modify(bytes) => { + match bcs::from_bytes(bytes) { + Ok(current_time) => Ok(Some(current_time)), + Err(err) => { + error!("Failed to deserialize CurrentTimeMicroseconds. {}", err); + Err(()) + }, + } + }, + MoveStorageOp::Delete => { + error!("CurrentTimeMicroseconds got deleted."); + Err(()) + }, + } + } + + fn get_current_timestamp( + updated_timestamp: Result, ()>, + remote: &MoveResolverWithVMMetadata, + ) -> Option { + match updated_timestamp { + Ok(Some(timestamp)) => Some(timestamp), + Ok(None) => CurrentTimeMicroseconds::fetch_config(remote), + Err(()) => None, + } } pub fn convert_change_set( + remote: &MoveResolverWithVMMetadata, + new_slot_payer: Option, + current_time: Option<&CurrentTimeMicroseconds>, change_set: MoveChangeSet, resource_group_change_set: MoveChangeSet, events: Vec, @@ -264,22 +355,36 @@ where ) -> Result { let mut write_set_mut = WriteSetMut::new(Vec::new()); let mut delta_change_set = DeltaChangeSet::empty(); + let mut new_slot_metadata: Option = None; + if remote.vm().features().is_storage_slot_metadata_enabled() { + if let Some(payer) = new_slot_payer { + if let Some(current_time) = current_time { + new_slot_metadata = Some(StateValueMetadata::new(payer, 0, current_time)); + } + } + } + let woc = WriteOpConverter { + remote, + new_slot_metadata, + }; for (addr, account_changeset) in change_set.into_inner() { let (modules, resources) = account_changeset.into_inner(); for (struct_tag, blob_op) in resources { let state_key = StateKey::access_path(ap_cache.get_resource_path(addr, struct_tag)); - let op = Self::convert_write_op( + let op = woc.convert( + &state_key, blob_op, configs.legacy_resource_creation_as_modification(), - ); + )?; + write_set_mut.insert((state_key, op)) } for (name, blob_op) in modules { let state_key = StateKey::access_path(ap_cache.get_module_path(ModuleId::new(addr, name))); - let op = Self::convert_write_op(blob_op, false); + let op = woc.convert(&state_key, blob_op, false)?; write_set_mut.insert((state_key, op)) } } @@ -289,7 +394,7 @@ where for (struct_tag, blob_op) in resources { let state_key = StateKey::access_path(ap_cache.get_resource_group_path(addr, struct_tag)); - let op = Self::convert_write_op(blob_op, false); + let op = woc.convert(&state_key, blob_op, false)?; write_set_mut.insert((state_key, op)) } } @@ -297,7 +402,7 @@ where for (handle, change) in table_change_set.changes { for (key, value_op) in change.entries { let state_key = StateKey::table_item(handle.into(), key); - let op = Self::convert_write_op(value_op, false); + let op = woc.convert(&state_key, value_op, false)?; write_set_mut.insert((state_key, op)) } } @@ -309,12 +414,13 @@ where match change { AggregatorChange::Write(value) => { - let write_op = WriteOp::Modification(serialize(&value)); + let write_op = + woc.convert(&state_key, MoveStorageOp::Modify(serialize(&value)), false)?; write_set_mut.insert((state_key, write_op)); }, AggregatorChange::Merge(delta_op) => delta_change_set.insert((state_key, delta_op)), AggregatorChange::Delete => { - let write_op = WriteOp::Deletion; + let write_op = woc.convert(&state_key, MoveStorageOp::Delete, false)?; write_set_mut.insert((state_key, write_op)); }, } @@ -340,26 +446,6 @@ where Arc::new(configs.clone()), )) } - - fn convert_write_op( - move_storage_op: MoveStorageOp>, - creation_as_modification: bool, - ) -> WriteOp { - use MoveStorageOp::*; - use WriteOp::*; - - match move_storage_op { - Delete => Deletion, - New(blob) => { - if creation_as_modification { - Modification(blob) - } else { - Creation(blob) - } - }, - Modify(blob) => Modification(blob), - } - } } impl<'r, 'l, S> Deref for SessionExt<'r, 'l, S> { @@ -375,3 +461,61 @@ impl<'r, 'l, S> DerefMut for SessionExt<'r, 'l, S> { &mut self.inner } } + +struct WriteOpConverter<'r, 'l, S> { + remote: &'r MoveResolverWithVMMetadata<'r, 'l, S>, + new_slot_metadata: Option, +} + +impl<'r, 'l, S: MoveResolverExt> WriteOpConverter<'r, 'l, S> { + fn convert( + &self, + state_key: &StateKey, + move_storage_op: MoveStorageOp>, + legacy_creation_as_modification: bool, + ) -> Result { + use MoveStorageOp::*; + use WriteOp::*; + + let existing_value_opt = self + .remote + .get_state_value(state_key) + .map_err(|_| VMStatus::Error(StatusCode::STORAGE_ERROR))?; + + let write_op = match (existing_value_opt, move_storage_op) { + (None, Modify(_) | Delete) | (Some(_), New(_)) => { + return Err(VMStatus::Error( + StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + )) + }, + (None, New(data)) => match &self.new_slot_metadata { + None => { + if legacy_creation_as_modification { + Modification(data) + } else { + Creation(data) + } + }, + Some(metadata) => CreationWithMetadata { + data, + metadata: metadata.clone(), + }, + }, + (Some(existing_value), Modify(data)) => { + // Inherit metadata even if the feature flags is turned off, for compatibility. + match existing_value.into_metadata() { + None => Modification(data), + Some(metadata) => ModificationWithMetadata { data, metadata }, + } + }, + (Some(existing_value), Delete) => { + // Inherit metadata even if the feature flags is turned off, for compatibility. + match existing_value.into_metadata() { + None => Deletion, + Some(metadata) => DeletionWithMetadata { metadata }, + } + }, + }; + Ok(write_op) + } +} diff --git a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs index a8c5483369ba2..cd5fcb28c1453 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs @@ -23,6 +23,7 @@ use std::ops::Deref; pub struct MoveVmExt { inner: MoveVM, chain_id: u8, + features: Features, } impl MoveVmExt { @@ -59,6 +60,7 @@ impl MoveVmExt { }, )?, chain_id, + features, }) } @@ -78,6 +80,7 @@ impl MoveVmExt { extensions.add(NativeRistrettoPointContext::new()); extensions.add(NativeAggregatorContext::new(txn_hash, remote)); + let sender_opt = session_id.sender(); let script_hash = match session_id { SessionId::Txn { sender: _, @@ -99,8 +102,13 @@ impl MoveVmExt { self.inner.new_session_with_extensions(remote, extensions), self, remote, + sender_opt, ) } + + pub fn features(&self) -> &Features { + &self.features + } } impl Deref for MoveVmExt { diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index 0c6e7c5074380..988b0ba520288 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -51,6 +51,10 @@ impl Features { pub fn are_resource_groups_enabled(&self) -> bool { self.is_enabled(FeatureFlag::RESOURCE_GROUPS) } + + pub fn is_storage_slot_metadata_enabled(&self) -> bool { + self.is_enabled(FeatureFlag::STORAGE_SLOT_METADATA) + } } // -------------------------------------------------------------------------------------------- diff --git a/types/src/on_chain_config/mod.rs b/types/src/on_chain_config/mod.rs index 77178497fff7c..fcfc03485c2a0 100644 --- a/types/src/on_chain_config/mod.rs +++ b/types/src/on_chain_config/mod.rs @@ -23,6 +23,7 @@ mod aptos_version; mod chain_id; mod consensus_config; mod gas_schedule; +mod timestamp; mod validator_set; pub use self::{ @@ -36,6 +37,7 @@ pub use self::{ ProposerElectionType, }, gas_schedule::{GasSchedule, GasScheduleV2, StorageGasSchedule}, + timestamp::CurrentTimeMicroseconds, validator_set::{ConsensusScheme, ValidatorSet}, }; diff --git a/types/src/on_chain_config/timestamp.rs b/types/src/on_chain_config/timestamp.rs new file mode 100644 index 0000000000000..a2619bfd937cf --- /dev/null +++ b/types/src/on_chain_config/timestamp.rs @@ -0,0 +1,15 @@ +// Copyright (c) Aptos +// SPDX-License-Identifier: Apache-2.0 + +use crate::on_chain_config::OnChainConfig; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +pub struct CurrentTimeMicroseconds { + pub microseconds: u64, +} + +impl OnChainConfig for CurrentTimeMicroseconds { + const MODULE_IDENTIFIER: &'static str = "timestamp"; + const TYPE_IDENTIFIER: &'static str = "CurrentTimeMicroseconds"; +} diff --git a/types/src/state_store/state_value.rs b/types/src/state_store/state_value.rs index d2f4e48f593eb..77772c1b12cb2 100644 --- a/types/src/state_store/state_value.rs +++ b/types/src/state_store/state_value.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - proof::SparseMerkleRangeProof, state_store::state_key::StateKey, transaction::Version, + on_chain_config::CurrentTimeMicroseconds, proof::SparseMerkleRangeProof, + state_store::state_key::StateKey, transaction::Version, }; use aptos_crypto::{ hash::{CryptoHash, SPARSE_MERKLE_PLACEHOLDER_HASH}, @@ -35,6 +36,20 @@ pub enum StateValueMetadata { }, } +impl StateValueMetadata { + pub fn new( + payer: AccountAddress, + deposit: u128, + creation_time_usecs: &CurrentTimeMicroseconds, + ) -> Self { + Self::V0 { + payer, + deposit, + creation_time_usecs: creation_time_usecs.microseconds, + } + } +} + #[derive(Clone, Debug, CryptoHasher, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct StateValue { inner: StateValueInner, @@ -123,6 +138,13 @@ impl StateValue { StateValueInner::V0(data) | StateValueInner::WithMetadata { data, .. } => data, } } + + pub fn into_metadata(self) -> Option { + match self.inner { + StateValueInner::V0(_) => None, + StateValueInner::WithMetadata { metadata, .. } => Some(metadata), + } + } } #[cfg(any(test, feature = "fuzzing"))] From d613f5fe5d9f8735ff1999257529225fa4c50020 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Thu, 9 Feb 2023 03:19:21 +0000 Subject: [PATCH 09/14] storage_deposit contract --- .../framework/aptos-framework/doc/coin.md | 33 +- .../framework/aptos-framework/doc/overview.md | 1 + .../aptos-framework/doc/storage_deposit.md | 367 ++++++++++++++++++ .../aptos-framework/sources/coin.move | 13 +- .../sources/storage_deposit.move | 105 +++++ .../src/aptos_framework_sdk_builder.rs | 30 ++ 6 files changed, 541 insertions(+), 8 deletions(-) create mode 100644 aptos-move/framework/aptos-framework/doc/storage_deposit.md create mode 100644 aptos-move/framework/aptos-framework/sources/storage_deposit.move diff --git a/aptos-move/framework/aptos-framework/doc/coin.md b/aptos-move/framework/aptos-framework/doc/coin.md index 02c3f45e87378..04f5a85bdef83 100644 --- a/aptos-move/framework/aptos-framework/doc/coin.md +++ b/aptos-move/framework/aptos-framework/doc/coin.md @@ -21,6 +21,7 @@ This module provides the foundation for typesafe Coins. - [Function `allow_supply_upgrades`](#0x1_coin_allow_supply_upgrades) - [Function `initialize_aggregatable_coin`](#0x1_coin_initialize_aggregatable_coin) - [Function `is_aggregatable_coin_zero`](#0x1_coin_is_aggregatable_coin_zero) +- [Function `extract_from_aggregatable_coin`](#0x1_coin_extract_from_aggregatable_coin) - [Function `drain_aggregatable_coin`](#0x1_coin_drain_aggregatable_coin) - [Function `merge_aggregatable_coin`](#0x1_coin_merge_aggregatable_coin) - [Function `collect_into_aggregatable_coin`](#0x1_coin_collect_into_aggregatable_coin) @@ -707,6 +708,33 @@ Returns true if the value of aggregatable coin is zero. + + + + +## Function `extract_from_aggregatable_coin` + + + +
public(friend) fun extract_from_aggregatable_coin<CoinType>(coin: &mut coin::AggregatableCoin<CoinType>, amount: u64): coin::Coin<CoinType>
+
+ + + +
+Implementation + + +
public(friend) fun extract_from_aggregatable_coin<CoinType>(coin: &mut AggregatableCoin<CoinType>, amount: u64): Coin<CoinType> {
+    aggregator::sub(&mut coin.value, (amount as u128));
+    Coin<CoinType> {
+        value: amount
+    }
+}
+
+ + +
@@ -733,10 +761,7 @@ Drains the aggregatable coin, setting it to zero and returning a standard coin. let amount = aggregator::read(&coin.value); assert!(amount <= MAX_U64, error::out_of_range(EAGGREGATABLE_COIN_VALUE_TOO_LARGE)); - aggregator::sub(&mut coin.value, amount); - Coin<CoinType> { - value: (amount as u64), - } + extract_from_aggregatable_coin(coin, (amount as u64)) } diff --git a/aptos-move/framework/aptos-framework/doc/overview.md b/aptos-move/framework/aptos-framework/doc/overview.md index a67d7af478b89..f54af8b1b327b 100644 --- a/aptos-move/framework/aptos-framework/doc/overview.md +++ b/aptos-move/framework/aptos-framework/doc/overview.md @@ -40,6 +40,7 @@ This is the reference documentation of the Aptos framework. - [`0x1::staking_contract`](staking_contract.md#0x1_staking_contract) - [`0x1::staking_proxy`](staking_proxy.md#0x1_staking_proxy) - [`0x1::state_storage`](state_storage.md#0x1_state_storage) +- [`0x1::storage_deposit`](storage_deposit.md#0x1_storage_deposit) - [`0x1::storage_gas`](storage_gas.md#0x1_storage_gas) - [`0x1::system_addresses`](system_addresses.md#0x1_system_addresses) - [`0x1::timestamp`](timestamp.md#0x1_timestamp) diff --git a/aptos-move/framework/aptos-framework/doc/storage_deposit.md b/aptos-move/framework/aptos-framework/doc/storage_deposit.md new file mode 100644 index 0000000000000..9c9fff3d81f3e --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/storage_deposit.md @@ -0,0 +1,367 @@ + + + +# Module `0x1::storage_deposit` + + + +- [Resource `GlobalStorageDeposit`](#0x1_storage_deposit_GlobalStorageDeposit) +- [Struct `SlotDepositEvent`](#0x1_storage_deposit_SlotDepositEvent) +- [Struct `ExcessBytesPenaltyEvent`](#0x1_storage_deposit_ExcessBytesPenaltyEvent) +- [Struct `SlotRefundEvent`](#0x1_storage_deposit_SlotRefundEvent) +- [Struct `DepositEntry`](#0x1_storage_deposit_DepositEntry) +- [Struct `ChargeSchedule`](#0x1_storage_deposit_ChargeSchedule) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_storage_deposit_initialize) +- [Function `charge_and_refund`](#0x1_storage_deposit_charge_and_refund) + + +
use 0x1::account;
+use 0x1::aptos_coin;
+use 0x1::coin;
+use 0x1::error;
+use 0x1::event;
+use 0x1::system_addresses;
+
+ + + + + +## Resource `GlobalStorageDeposit` + + + +
struct GlobalStorageDeposit has store, key
+
+ + + +
+Fields + + +
+
+deposit: coin::AggregatableCoin<aptos_coin::AptosCoin> +
+
+ +
+
+slot_deposit_event: event::EventHandle<storage_deposit::SlotDepositEvent> +
+
+ +
+
+excess_bytes_penalty_event: event::EventHandle<storage_deposit::ExcessBytesPenaltyEvent> +
+
+ +
+
+slot_refund_event: event::EventHandle<storage_deposit::SlotRefundEvent> +
+
+ +
+
+ + +
+ + + +## Struct `SlotDepositEvent` + + + +
struct SlotDepositEvent has drop, store
+
+ + + +
+Fields + + +
+
+payer: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ExcessBytesPenaltyEvent` + + + +
struct ExcessBytesPenaltyEvent has drop, store
+
+ + + +
+Fields + + +
+
+payer: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `SlotRefundEvent` + + + +
struct SlotRefundEvent has drop, store
+
+ + + +
+Fields + + +
+
+payee: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DepositEntry` + + + +
struct DepositEntry has drop
+
+ + + +
+Fields + + +
+
+account: address +
+
+ +
+
+amount: u64 +
+
+ +
+
+ + +
+ + + +## Struct `ChargeSchedule` + + + +
struct ChargeSchedule has drop
+
+ + + +
+Fields + + +
+
+slot_charges: vector<storage_deposit::DepositEntry> +
+
+ +
+
+slot_refunds: vector<storage_deposit::DepositEntry> +
+
+ +
+
+excess_bytes_penalties: vector<storage_deposit::DepositEntry> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Maximum possible coin supply. + + +
const MAX_U128: u128 = 340282366920938463463374607431768211455;
+
+ + + + + + + +
const EGLOBAL_STORAGE_DEPOSIT: u64 = 0;
+
+ + + + + +## Function `initialize` + + + +
public entry fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public entry fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(
+        !exists<GlobalStorageDeposit>(@aptos_framework),
+        error::already_exists(EGLOBAL_STORAGE_DEPOSIT)
+    );
+
+    let global_storage_deposit = GlobalStorageDeposit {
+        // FIXME(aldenhu): needs the limit to be u128 (u64 implied)
+        deposit: coin::initialize_aggregatable_coin<AptosCoin>(aptos_framework),
+        slot_deposit_event: new_event_handle<SlotDepositEvent>(aptos_framework),
+        excess_bytes_penalty_event: new_event_handle<ExcessBytesPenaltyEvent>(aptos_framework),
+        slot_refund_event: new_event_handle<SlotRefundEvent>(aptos_framework),
+    };
+
+    move_to<GlobalStorageDeposit>(aptos_framework, global_storage_deposit);
+}
+
+ + + +
+ + + +## Function `charge_and_refund` + + + +
public fun charge_and_refund(schedule: storage_deposit::ChargeSchedule)
+
+ + + +
+Implementation + + +
public fun charge_and_refund(schedule: ChargeSchedule) acquires GlobalStorageDeposit {
+    assert!(
+        exists<GlobalStorageDeposit>(@aptos_framework),
+        error::not_found(EGLOBAL_STORAGE_DEPOSIT)
+    );
+    let global_storage_deposit = borrow_global_mut<GlobalStorageDeposit>(@aptos_framework);
+
+    let i = 0;
+    let len = vector::length(&schedule.slot_charges);
+    while (i <= len) {
+        let entry = vector::borrow(&schedule.slot_charges, i);
+        coin::collect_into_aggregatable_coin<AptosCoin>(entry.account, entry.amount, &mut global_storage_deposit.deposit);
+        // FIXME(aldenhu): central events kills concurrency, probably need to augment Account with these events
+        // TODO: emit event
+        i = i + 1;
+    };
+
+    let i = 0;
+    let len = vector::length(&schedule.slot_refunds);
+    while (i <= len) {
+        let entry = vector::borrow(&schedule.slot_charges, i);
+        let coin = coin::extract_from_aggregatable_coin(&mut global_storage_deposit.deposit, entry.amount);
+        coin::deposit(entry.account, coin);
+        // FIXME(aldenhu): central events kills concurrency, probably need to augment Account with these events
+        // TODO: emit event
+        i = i + 1;
+    };
+
+    let i = 0;
+    let len = vector::length(&schedule.excess_bytes_penalties);
+    while (i <= len) {
+        let entry = vector::borrow(&schedule.slot_charges, i);
+        coin::collect_into_aggregatable_coin<AptosCoin>(entry.account, entry.amount, &mut global_storage_deposit.deposit);
+        // FIXME(aldenhu): central events kills concurrency, probably need to augment Account with these events
+        // TODO: emit event
+        i = i + 1;
+    };
+}
+
+ + + +
+ + +[move-book]: https://move-language.github.io/move/introduction.html diff --git a/aptos-move/framework/aptos-framework/sources/coin.move b/aptos-move/framework/aptos-framework/sources/coin.move index da0dc43aa2bef..8186a4a5d8d17 100644 --- a/aptos-move/framework/aptos-framework/sources/coin.move +++ b/aptos-move/framework/aptos-framework/sources/coin.move @@ -16,6 +16,7 @@ module aptos_framework::coin { friend aptos_framework::aptos_coin; friend aptos_framework::genesis; + friend aptos_framework::storage_deposit; friend aptos_framework::transaction_fee; // @@ -175,6 +176,13 @@ module aptos_framework::coin { amount == 0 } + public(friend) fun extract_from_aggregatable_coin(coin: &mut AggregatableCoin, amount: u64): Coin { + aggregator::sub(&mut coin.value, (amount as u128)); + Coin { + value: amount + } + } + /// Drains the aggregatable coin, setting it to zero and returning a standard coin. public(friend) fun drain_aggregatable_coin(coin: &mut AggregatableCoin): Coin { spec { @@ -184,10 +192,7 @@ module aptos_framework::coin { let amount = aggregator::read(&coin.value); assert!(amount <= MAX_U64, error::out_of_range(EAGGREGATABLE_COIN_VALUE_TOO_LARGE)); - aggregator::sub(&mut coin.value, amount); - Coin { - value: (amount as u64), - } + extract_from_aggregatable_coin(coin, (amount as u64)) } /// Merges `coin` into aggregatable coin (`dst_coin`). diff --git a/aptos-move/framework/aptos-framework/sources/storage_deposit.move b/aptos-move/framework/aptos-framework/sources/storage_deposit.move new file mode 100644 index 0000000000000..994a33526cfea --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/storage_deposit.move @@ -0,0 +1,105 @@ +module aptos_framework::storage_deposit { + use aptos_framework::event::{EventHandle}; + use aptos_framework::system_addresses; + use std::error; + use aptos_framework::account::new_event_handle; + use std::vector; + use aptos_framework::coin; + use aptos_framework::aptos_coin::AptosCoin; + use aptos_framework::coin::AggregatableCoin; + + /// Maximum possible coin supply. + const MAX_U128: u128 = 340282366920938463463374607431768211455; + + const EGLOBAL_STORAGE_DEPOSIT: u64 = 0; + + struct GlobalStorageDeposit has key, store { + deposit: AggregatableCoin, + slot_deposit_event: EventHandle, + excess_bytes_penalty_event: EventHandle, + slot_refund_event: EventHandle, + } + + struct SlotDepositEvent has drop, store { + payer: address, + amount: u64, + } + + struct ExcessBytesPenaltyEvent has drop, store { + payer: address, + amount: u64, + } + + struct SlotRefundEvent has drop, store { + payee: address, + amount: u64, + } + + struct DepositEntry has drop { + account: address, + amount: u64, + } + + struct ChargeSchedule has drop { + slot_charges: vector, + slot_refunds: vector, + excess_bytes_penalties: vector, + } + + public entry fun initialize(aptos_framework: &signer) { + system_addresses::assert_aptos_framework(aptos_framework); + assert!( + !exists(@aptos_framework), + error::already_exists(EGLOBAL_STORAGE_DEPOSIT) + ); + + let global_storage_deposit = GlobalStorageDeposit { + // FIXME(aldenhu): needs the limit to be u128 (u64 implied) + deposit: coin::initialize_aggregatable_coin(aptos_framework), + slot_deposit_event: new_event_handle(aptos_framework), + excess_bytes_penalty_event: new_event_handle(aptos_framework), + slot_refund_event: new_event_handle(aptos_framework), + }; + + move_to(aptos_framework, global_storage_deposit); + } + + public fun charge_and_refund(schedule: ChargeSchedule) acquires GlobalStorageDeposit { + assert!( + exists(@aptos_framework), + error::not_found(EGLOBAL_STORAGE_DEPOSIT) + ); + let global_storage_deposit = borrow_global_mut(@aptos_framework); + + let i = 0; + let len = vector::length(&schedule.slot_charges); + while (i <= len) { + let entry = vector::borrow(&schedule.slot_charges, i); + coin::collect_into_aggregatable_coin(entry.account, entry.amount, &mut global_storage_deposit.deposit); + // FIXME(aldenhu): central events kills concurrency, probably need to augment Account with these events + // TODO: emit event + i = i + 1; + }; + + let i = 0; + let len = vector::length(&schedule.slot_refunds); + while (i <= len) { + let entry = vector::borrow(&schedule.slot_charges, i); + let coin = coin::extract_from_aggregatable_coin(&mut global_storage_deposit.deposit, entry.amount); + coin::deposit(entry.account, coin); + // FIXME(aldenhu): central events kills concurrency, probably need to augment Account with these events + // TODO: emit event + i = i + 1; + }; + + let i = 0; + let len = vector::length(&schedule.excess_bytes_penalties); + while (i <= len) { + let entry = vector::borrow(&schedule.slot_charges, i); + coin::collect_into_aggregatable_coin(entry.account, entry.amount, &mut global_storage_deposit.deposit); + // FIXME(aldenhu): central events kills concurrency, probably need to augment Account with these events + // TODO: emit event + i = i + 1; + }; + } +} diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs index ebda36cf92ef2..099b0cd217402 100644 --- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs @@ -489,6 +489,8 @@ pub enum EntryFunctionCall { new_voter: AccountAddress, }, + StorageDepositInitialize {}, + /// Updates the major version to a larger version. /// This can be called by on chain governance. VersionSetVersion { @@ -858,6 +860,7 @@ impl EntryFunctionCall { operator, new_voter, } => staking_proxy_set_voter(operator, new_voter), + StorageDepositInitialize {} => storage_deposit_initialize(), VersionSetVersion { major } => version_set_version(major), VestingAdminWithdraw { contract_address } => vesting_admin_withdraw(contract_address), VestingDistribute { contract_address } => vesting_distribute(contract_address), @@ -2272,6 +2275,21 @@ pub fn staking_proxy_set_voter( )) } +pub fn storage_deposit_initialize() -> TransactionPayload { + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::new([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, + ]), + ident_str!("storage_deposit").to_owned(), + ), + ident_str!("initialize").to_owned(), + vec![], + vec![], + )) +} + /// Updates the major version to a larger version. /// This can be called by on chain governance. pub fn version_set_version(major: u64) -> TransactionPayload { @@ -3360,6 +3378,14 @@ mod decoder { } } + pub fn storage_deposit_initialize(payload: &TransactionPayload) -> Option { + if let TransactionPayload::EntryFunction(_script) = payload { + Some(EntryFunctionCall::StorageDepositInitialize {}) + } else { + None + } + } + pub fn version_set_version(payload: &TransactionPayload) -> Option { if let TransactionPayload::EntryFunction(script) = payload { Some(EntryFunctionCall::VersionSetVersion { @@ -3808,6 +3834,10 @@ static SCRIPT_FUNCTION_DECODER_MAP: once_cell::sync::Lazy Date: Thu, 9 Feb 2023 17:43:46 +0000 Subject: [PATCH 10/14] storage deposit related (non-)gas parameters --- aptos-move/aptos-gas/src/algebra.rs | 21 +++++- aptos-move/aptos-gas/src/gas_meter.rs | 1 + aptos-move/aptos-gas/src/lib.rs | 2 + aptos-move/aptos-gas/src/transaction/mod.rs | 41 ++++++++++- .../aptos-gas/src/transaction/storage.rs | 73 +++++++++++++------ 5 files changed, 113 insertions(+), 25 deletions(-) diff --git a/aptos-move/aptos-gas/src/algebra.rs b/aptos-move/aptos-gas/src/algebra.rs index 1d9eebd0e9901..681608d79f413 100644 --- a/aptos-move/aptos-gas/src/algebra.rs +++ b/aptos-move/aptos-gas/src/algebra.rs @@ -4,7 +4,7 @@ pub use aptos_gas_algebra_ext::{ AbstractValueSize, AbstractValueSizePerArg, AbstractValueUnit, InternalGasPerAbstractValueUnit, }; -use move_core_types::gas_algebra::{GasQuantity, InternalGasUnit, UnitDiv}; +use move_core_types::gas_algebra::{Byte, GasQuantity, InternalGasUnit, UnitDiv}; /// Unit of (external) gas. pub enum GasUnit {} @@ -19,3 +19,22 @@ pub type GasScalingFactor = GasQuantity>; pub type Fee = GasQuantity; pub type FeePerGasUnit = GasQuantity>; + +/// Unit of storage slot +pub enum Slot {} + +pub type NumSlots = GasQuantity; + +pub type FeePerSlot = GasQuantity>; + +pub type FeePerByte = GasQuantity>; + +/// Unit of time. +pub enum Microsecond {} + +pub type NumMicroseconds = GasQuantity; + +/// Unit of proportion. +pub enum BasePoint {} + +pub type NumBasePoints = GasQuantity; diff --git a/aptos-move/aptos-gas/src/gas_meter.rs b/aptos-move/aptos-gas/src/gas_meter.rs index 6f4d541e1dc20..92fc6c7713fcb 100644 --- a/aptos-move/aptos-gas/src/gas_meter.rs +++ b/aptos-move/aptos-gas/src/gas_meter.rs @@ -29,6 +29,7 @@ use std::collections::BTreeMap; // Change log: // - V7 // - Native support for exists +// - storage deposit // - V6 // - Added a new native function - blake2b_256. // - V5 diff --git a/aptos-move/aptos-gas/src/lib.rs b/aptos-move/aptos-gas/src/lib.rs index 39828f4096dc8..04c1b69e1a95c 100644 --- a/aptos-move/aptos-gas/src/lib.rs +++ b/aptos-move/aptos-gas/src/lib.rs @@ -18,6 +18,8 @@ //! - The on-chain gas schedule needs to be extensible and unordered so we can upgrate it easily //! in the future. +extern crate core; + #[macro_use] mod natives; diff --git a/aptos-move/aptos-gas/src/transaction/mod.rs b/aptos-move/aptos-gas/src/transaction/mod.rs index ed84816b19b16..7723b631f8add 100644 --- a/aptos-move/aptos-gas/src/transaction/mod.rs +++ b/aptos-move/aptos-gas/src/transaction/mod.rs @@ -4,7 +4,10 @@ //! This module defines all the gas parameters for transactions, along with their initial values //! in the genesis and a mapping between the Rust representation and the on-chain gas schedule. -use crate::algebra::{AbstractValueSize, FeePerGasUnit, Gas, GasScalingFactor, GasUnit}; +use crate::algebra::{ + AbstractValueSize, FeePerByte, FeePerGasUnit, FeePerSlot, Gas, GasScalingFactor, GasUnit, + NumBasePoints, NumMicroseconds, +}; use move_core_types::gas_algebra::{ InternalGas, InternalGasPerArg, InternalGasPerByte, InternalGasUnit, NumBytes, ToUnitFractionalWithParams, ToUnitWithParams, @@ -125,6 +128,42 @@ crate::params::define_gas_parameters!( { 5.. => "max_bytes_all_events_per_transaction"}, 10 << 20, // all events from a single transaction are 10MB max ], + [ + per_storage_slot_deposit: FeePerSlot, + { 7.. => "per_storage_slot_deposit"}, + 50_000, // 50k Octas each slot allocation, that's 500k APT for 1 billion slots + ], + [ + per_storage_excess_byte_penalty: FeePerByte, + { 7.. => "per_storage_excess_byte_penalty"}, + // If a storage write is larger than `free_write_bytes_quota`, each excess byte is + // 50 Octas. It's charged for both creation and modification. + // As a result, to allocate 1TB of state space by creating large slots, the cost is + // at least 500k APT. + 50, + ], + [ + max_storage_slot_refund_ratio: NumBasePoints, + { 7.. => "max_storage_slot_refund_ratio"}, + 90_00, // if deleted quickly, refund 90% of the deposit + ], + [ + min_storage_slot_refund_ratio: NumBasePoints, + { 7.. => "max_storage_slot_refund_ratio"}, + 50_00, // deleting a fairly old item still yields 50% refund + ], + [ + storage_slot_refund_degrade_start: NumMicroseconds, + { 7.. => "storage_slot_refund_degrade_start"}, + 86_400_000_000, // maximum refund if slot is freed within 24 hours + ], + [ + storage_slot_refund_degrade_period: NumMicroseconds, + { 7.. => "storage_slot_refund_degrade_period"}, + // Minimal refund if a slot if freed later than 31 days. That is "1 day" from the + // previous parameter plus "30 days", the value of this parameter. + 2_592_000_000_000, + ], ] ); diff --git a/aptos-move/aptos-gas/src/transaction/storage.rs b/aptos-move/aptos-gas/src/transaction/storage.rs index c8b1bd29660ac..ef4150e0c8b87 100644 --- a/aptos-move/aptos-gas/src/transaction/storage.rs +++ b/aptos-move/aptos-gas/src/transaction/storage.rs @@ -1,7 +1,10 @@ // Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 -use crate::{AptosGasParameters, LATEST_GAS_FEATURE_VERSION}; +use crate::{ + AptosGasParameters, FeePerByte, FeePerSlot, NumBasePoints, NumMicroseconds, + LATEST_GAS_FEATURE_VERSION, +}; use aptos_types::{ on_chain_config::StorageGasSchedule, state_store::state_key::StateKey, @@ -98,12 +101,22 @@ impl StoragePricingV1 { pub struct StoragePricingV2 { pub feature_version: u64, pub free_write_bytes_quota: NumBytes, + + // These are for gas pub per_item_read: InternalGasPerArg, pub per_item_create: InternalGasPerArg, pub per_item_write: InternalGasPerArg, pub per_byte_read: InternalGasPerByte, pub per_byte_create: InternalGasPerByte, pub per_byte_write: InternalGasPerByte, + + // These are for deposit + pub per_slot_deposit: FeePerSlot, + pub per_excess_byte_penalty: FeePerByte, + pub max_slot_refund_ratio: NumBasePoints, + pub min_slot_refund_ratio: NumBasePoints, + pub refund_degrade_start: NumMicroseconds, + pub refund_degrade_period: NumMicroseconds, } impl StoragePricingV2 { @@ -141,6 +154,12 @@ impl StoragePricingV2 { per_byte_read: storage_gas_schedule.per_byte_read.into(), per_byte_create: storage_gas_schedule.per_byte_create.into(), per_byte_write: storage_gas_schedule.per_byte_write.into(), + per_slot_deposit: gas_params.txn.per_storage_slot_deposit, + per_excess_byte_penalty: gas_params.txn.per_storage_excess_byte_penalty, + max_slot_refund_ratio: gas_params.txn.max_storage_slot_refund_ratio, + min_slot_refund_ratio: gas_params.txn.min_storage_slot_refund_ratio, + refund_degrade_start: gas_params.txn.storage_slot_refund_degrade_start, + refund_degrade_period: gas_params.txn.storage_slot_refund_degrade_period, } } @@ -234,15 +253,25 @@ impl StoragePricing { #[derive(Clone)] pub struct ChangeSetConfigs { gas_feature_version: u64, - max_bytes_per_write_op: u64, - max_bytes_all_write_ops_per_transaction: u64, - max_bytes_per_event: u64, - max_bytes_all_events_per_transaction: u64, + max_bytes_per_write_op: NumBytes, + max_bytes_all_write_ops_per_transaction: NumBytes, + max_bytes_per_event: NumBytes, + max_bytes_all_events_per_transaction: NumBytes, +} + +fn max_bytes() -> NumBytes { + NumBytes::new(u64::MAX) } impl ChangeSetConfigs { pub fn unlimited_at_gas_feature_version(gas_feature_version: u64) -> Self { - Self::new_impl(gas_feature_version, u64::MAX, u64::MAX, u64::MAX, u64::MAX) + Self::new_impl( + gas_feature_version, + max_bytes(), + max_bytes(), + max_bytes(), + max_bytes(), + ) } pub fn new(feature_version: u64, gas_params: &AptosGasParameters) -> Self { @@ -257,10 +286,10 @@ impl ChangeSetConfigs { fn new_impl( gas_feature_version: u64, - max_bytes_per_write_op: u64, - max_bytes_all_write_ops_per_transaction: u64, - max_bytes_per_event: u64, - max_bytes_all_events_per_transaction: u64, + max_bytes_per_write_op: NumBytes, + max_bytes_all_write_ops_per_transaction: NumBytes, + max_bytes_per_event: NumBytes, + max_bytes_all_events_per_transaction: NumBytes, ) -> Self { Self { gas_feature_version, @@ -281,21 +310,19 @@ impl ChangeSetConfigs { } fn for_feature_version_3() -> Self { - const MB: u64 = 1 << 20; + let mb = NumBytes::new(1 << 20); + let gb = NumBytes::new(1 << 30); - Self::new_impl(3, MB, u64::MAX, MB, MB << 10) + Self::new_impl(3, mb, max_bytes(), mb, gb) } fn from_gas_params(gas_feature_version: u64, gas_params: &AptosGasParameters) -> Self { Self::new_impl( gas_feature_version, - gas_params.txn.max_bytes_per_write_op.into(), - gas_params - .txn - .max_bytes_all_write_ops_per_transaction - .into(), - gas_params.txn.max_bytes_per_event.into(), - gas_params.txn.max_bytes_all_events_per_transaction.into(), + gas_params.txn.max_bytes_per_write_op, + gas_params.txn.max_bytes_all_write_ops_per_transaction, + gas_params.txn.max_bytes_per_event, + gas_params.txn.max_bytes_all_events_per_transaction, ) } } @@ -308,12 +335,12 @@ impl CheckChangeSet for ChangeSetConfigs { for (key, op) in change_set.write_set() { if let Some(bytes) = op.bytes() { let write_op_size = (bytes.len() + key.size()) as u64; - if write_op_size > self.max_bytes_per_write_op { + if write_op_size > self.max_bytes_per_write_op.into() { return Err(VMStatus::Error(ERR)); } write_set_size += write_op_size; } - if write_set_size > self.max_bytes_all_write_ops_per_transaction { + if write_set_size > self.max_bytes_all_write_ops_per_transaction.into() { return Err(VMStatus::Error(ERR)); } } @@ -321,11 +348,11 @@ impl CheckChangeSet for ChangeSetConfigs { let mut total_event_size = 0; for event in change_set.events() { let size = event.event_data().len() as u64; - if size > self.max_bytes_per_event { + if size > self.max_bytes_per_event.into() { return Err(VMStatus::Error(ERR)); } total_event_size += size; - if total_event_size > self.max_bytes_all_events_per_transaction { + if total_event_size > self.max_bytes_all_events_per_transaction.into() { return Err(VMStatus::Error(ERR)); } } From c4bc65f8ba6d0c80733e0bbb105971011bc6c9f3 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Mon, 13 Feb 2023 21:25:15 +0000 Subject: [PATCH 11/14] trivial refactor: remove duplication --- aptos-move/aptos-vm/src/aptos_vm.rs | 37 +++++++++-------------------- 1 file changed, 11 insertions(+), 26 deletions(-) diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 1a3adcd5cf666..fa3c7a27df953 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -253,14 +253,21 @@ impl AptosVM { } } - fn success_transaction_cleanup( + fn success_transaction_cleanup( &self, storage: &S, - user_txn_change_set_ext: ChangeSetExt, + user_txn_session: SessionExt, gas_meter: &mut AptosGasMeter, txn_data: &TransactionMetadata, log_context: &AdapterLogSchema, ) -> Result<(VMStatus, TransactionOutputExt), VMStatus> { + let user_txn_change_set_ext = user_txn_session + .finish(&mut (), gas_meter.change_set_configs()) + .map_err(|e| e.into_vm_status())?; + // Charge gas for write set + gas_meter.charge_write_set_gas(user_txn_change_set_ext.write_set().iter())?; + // TODO(Gas): Charge for aggregator writes + let storage_with_changes = DeltaStateView::new(storage, user_txn_change_set_ext.write_set()); // TODO: at this point we know that delta application failed @@ -395,21 +402,7 @@ impl AptosVM { new_published_modules_loaded, )?; - let change_set_ext = session - .finish(&mut (), gas_meter.change_set_configs()) - .map_err(|e| e.into_vm_status())?; - - // Charge gas for write set - gas_meter.charge_write_set_gas(change_set_ext.write_set().iter())?; - // TODO(Gas): Charge for aggregator writes - - self.success_transaction_cleanup( - storage, - change_set_ext, - gas_meter, - txn_data, - log_context, - ) + self.success_transaction_cleanup(storage, session, gas_meter, txn_data, log_context) } } @@ -564,15 +557,7 @@ impl AptosVM { new_published_modules_loaded, )?; - let change_set_ext = session - .finish(&mut (), gas_meter.change_set_configs()) - .map_err(|e| e.into_vm_status())?; - - // Charge gas for write set - gas_meter.charge_write_set_gas(change_set_ext.write_set().iter())?; - // TODO(Gas): Charge for aggregator writes - - self.success_transaction_cleanup(storage, change_set_ext, gas_meter, txn_data, log_context) + self.success_transaction_cleanup(storage, session, gas_meter, txn_data, log_context) } /// Resolve a pending code publish request registered via the NativeCodeContext. From 798ea0b571ea2c0eb99b6ba086582f5a4cf1b81b Mon Sep 17 00:00:00 2001 From: aldenhu Date: Mon, 13 Feb 2023 23:05:22 +0000 Subject: [PATCH 12/14] storage deposit: connecting the dots --- Cargo.lock | 2 + .../aptos-aggregator/src/transaction.rs | 4 + aptos-move/aptos-gas/Cargo.toml | 2 + aptos-move/aptos-gas/src/gas_meter.rs | 39 +++- aptos-move/aptos-gas/src/lib.rs | 4 +- aptos-move/aptos-gas/src/transaction/mod.rs | 4 +- .../aptos-gas/src/transaction/storage.rs | 181 +++++++++++++++++- aptos-move/aptos-vm/src/aptos_vm.rs | 44 ++++- aptos-move/aptos-vm/src/aptos_vm_impl.rs | 40 +++- types/src/state_store/state_value.rs | 31 ++- types/src/transaction/change_set.rs | 9 +- types/src/write_set.rs | 8 + 12 files changed, 344 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8304111c44a4..b23dcce507967 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1300,12 +1300,14 @@ dependencies = [ "aptos-types", "bcs 0.1.4 (git+https://github.com/aptos-labs/bcs.git?rev=d31fab9d81748e2594be5cd5cdf845786a30562d)", "clap 3.2.23", + "itertools", "move-binary-format", "move-core-types", "move-model", "move-stdlib", "move-table-extension", "move-vm-types", + "serde 1.0.149", "tempfile", ] diff --git a/aptos-move/aptos-aggregator/src/transaction.rs b/aptos-move/aptos-aggregator/src/transaction.rs index 3d0c446e08f74..7f263abc87842 100644 --- a/aptos-move/aptos-aggregator/src/transaction.rs +++ b/aptos-move/aptos-aggregator/src/transaction.rs @@ -55,6 +55,10 @@ impl ChangeSetExt { self.change_set.write_set() } + pub fn write_set_mut(&mut self) -> &mut WriteSetMut { + self.change_set.write_set_mut() + } + pub fn into_inner(self) -> (DeltaChangeSet, ChangeSet) { (self.delta_change_set, self.change_set) } diff --git a/aptos-move/aptos-gas/Cargo.toml b/aptos-move/aptos-gas/Cargo.toml index c25407773d21e..673942d8af3d8 100644 --- a/aptos-move/aptos-gas/Cargo.toml +++ b/aptos-move/aptos-gas/Cargo.toml @@ -21,12 +21,14 @@ aptos-package-builder = { workspace = true } aptos-types = { workspace = true } bcs = { workspace = true } clap = { workspace = true } +itertools = { workspace = true } move-binary-format = { workspace = true } move-core-types = { workspace = true } move-model = { workspace = true } move-stdlib = { workspace = true } move-table-extension = { workspace = true } move-vm-types = { workspace = true } +serde = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/aptos-move/aptos-gas/src/gas_meter.rs b/aptos-move/aptos-gas/src/gas_meter.rs index 92fc6c7713fcb..f244644659dd9 100644 --- a/aptos-move/aptos-gas/src/gas_meter.rs +++ b/aptos-move/aptos-gas/src/gas_meter.rs @@ -8,14 +8,18 @@ use crate::{ algebra::{AbstractValueSize, Gas}, instr::InstructionGasParameters, misc::MiscGasParameters, - transaction::{ChangeSetConfigs, TransactionGasParameters}, + transaction::{ChangeSetConfigs, StorageDepositChargeSchedule, TransactionGasParameters}, StorageGasParameters, }; use aptos_types::{ - account_config::CORE_CODE_ADDRESS, state_store::state_key::StateKey, write_set::WriteOp, + account_config::CORE_CODE_ADDRESS, + on_chain_config::CurrentTimeMicroseconds, + state_store::state_key::StateKey, + write_set::{WriteOp, WriteSet, WriteSetMut}, }; use move_binary_format::errors::{Location, PartialVMError, PartialVMResult, VMResult}; use move_core_types::{ + account_address::AccountAddress, gas_algebra::{InternalGas, NumArgs, NumBytes}, language_storage::ModuleId, vm_status::StatusCode, @@ -781,4 +785,35 @@ impl AptosGasMeter { let cost = self.storage_gas_params.pricing.calculate_write_set_gas(ops); self.charge(cost).map_err(|e| e.finish(Location::Undefined)) } + + pub fn apply_storage_deposit_charges( + &mut self, + write_set: &mut WriteSetMut, + txn_sender: AccountAddress, + current_timestamp: Option<&CurrentTimeMicroseconds>, + ) -> VMResult { + self.storage_gas_params + .pricing + .apply_storage_deposit_charges(write_set, txn_sender, current_timestamp) + } + + pub fn should_affect_storage_deposit(&self, write_set: &WriteSet) -> bool { + use WriteOp::*; + + for (_key, op) in write_set { + match op { + Creation(_) | CreationWithMetadata { .. } | Deletion => {}, + DeletionWithMetadata { .. } => return true, + Modification(data) | ModificationWithMetadata { data, .. } => { + if NumBytes::new(data.len() as u64) + > self.storage_gas_params.pricing.free_write_bytes_quota() + { + return true; + } + }, + } + } + + false + } } diff --git a/aptos-move/aptos-gas/src/lib.rs b/aptos-move/aptos-gas/src/lib.rs index 04c1b69e1a95c..fe1e682541d0a 100644 --- a/aptos-move/aptos-gas/src/lib.rs +++ b/aptos-move/aptos-gas/src/lib.rs @@ -47,4 +47,6 @@ pub use move_core_types::gas_algebra::{ Arg, Byte, GasQuantity, InternalGas, InternalGasPerArg, InternalGasPerByte, InternalGasUnit, NumArgs, NumBytes, UnitDiv, }; -pub use transaction::{ChangeSetConfigs, StorageGasParameters, TransactionGasParameters}; +pub use transaction::{ + ChangeSetConfigs, StorageDepositChargeSchedule, StorageGasParameters, TransactionGasParameters, +}; diff --git a/aptos-move/aptos-gas/src/transaction/mod.rs b/aptos-move/aptos-gas/src/transaction/mod.rs index 7723b631f8add..51b7cb84b6b33 100644 --- a/aptos-move/aptos-gas/src/transaction/mod.rs +++ b/aptos-move/aptos-gas/src/transaction/mod.rs @@ -15,7 +15,9 @@ use move_core_types::gas_algebra::{ mod storage; -pub use storage::{ChangeSetConfigs, StorageGasParameters}; +pub use storage::{ + ChangeSetConfigs, StorageDepositChargeSchedule, StorageDepositEntry, StorageGasParameters, +}; crate::params::define_gas_parameters!( TransactionGasParameters, diff --git a/aptos-move/aptos-gas/src/transaction/storage.rs b/aptos-move/aptos-gas/src/transaction/storage.rs index ef4150e0c8b87..d90de6a87fadf 100644 --- a/aptos-move/aptos-gas/src/transaction/storage.rs +++ b/aptos-move/aptos-gas/src/transaction/storage.rs @@ -6,16 +6,20 @@ use crate::{ LATEST_GAS_FEATURE_VERSION, }; use aptos_types::{ - on_chain_config::StorageGasSchedule, - state_store::state_key::StateKey, + on_chain_config::{CurrentTimeMicroseconds, StorageGasSchedule}, + state_store::{state_key::StateKey, state_value::StateValueMetadata}, transaction::{ChangeSet, CheckChangeSet}, - write_set::WriteOp, + write_set::{WriteOp, WriteSetMut}, }; +use itertools::Itertools; +use move_binary_format::errors::VMResult; use move_core_types::{ + account_address::AccountAddress, gas_algebra::{InternalGas, InternalGasPerArg, InternalGasPerByte, NumArgs, NumBytes}, vm_status::{StatusCode, VMStatus}, }; -use std::fmt::Debug; +use serde::{Deserialize, Serialize}; +use std::{cmp::Ordering, collections::BTreeMap, fmt::Debug}; #[derive(Clone, Debug)] pub struct StoragePricingV1 { @@ -97,6 +101,35 @@ impl StoragePricingV1 { } } +#[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct StorageDepositEntry { + pub account: AccountAddress, + pub amount: u64, +} + +#[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct StorageDepositChargeSchedule { + pub slot_charges: Vec, + pub slot_refunds: Vec, + pub excess_bytes_penalties: Vec, +} + +impl StorageDepositChargeSchedule { + pub fn empty() -> Self { + Self { + slot_charges: Vec::new(), + slot_refunds: Vec::new(), + excess_bytes_penalties: Vec::new(), + } + } + + pub fn is_empty(&self) -> bool { + self.slot_charges.is_empty() + && self.slot_refunds.is_empty() + && self.excess_bytes_penalties.is_empty() + } +} + #[derive(Clone, Debug)] pub struct StoragePricingV2 { pub feature_version: u64, @@ -219,6 +252,125 @@ impl StoragePricingV2 { + num_bytes_create * self.per_byte_create + num_bytes_write * self.per_byte_write } + + fn storage_deposit_enabled(&self) -> bool { + self.feature_version >= 7 + } + + fn calculate_refund( + &self, + metadata: &StateValueMetadata, + current_timestamp: Option<&CurrentTimeMicroseconds>, + ) -> u64 { + let now = match current_timestamp { + None => { + // current timestamp unknown, doing full refund. + return metadata.deposit(); + }, + Some(t) => t.microseconds, + }; + let created = metadata.creation_time_usecs(); + assert!(now > created); + + let elapsed = now - created; + let refund_degrade_period: u64 = self.refund_degrade_period.into(); + let since_degrade_start = if elapsed <= self.refund_degrade_start.into() { + 0 + } else if elapsed >= refund_degrade_period { + refund_degrade_period + } else { + elapsed + }; + + const HUNDRED_PCT: u64 = 10000; + assert!(u64::MAX / HUNDRED_PCT > refund_degrade_period); + + let refund_ratio_range: u64 = + u64::from(self.max_slot_refund_ratio) - u64::from(self.min_slot_refund_ratio); + let refund_ratio: u64 = refund_ratio_range * (refund_degrade_period - since_degrade_start) + / refund_degrade_period; + + assert!(u64::MAX / HUNDRED_PCT > metadata.deposit()); + metadata.deposit() * refund_ratio / HUNDRED_PCT + } + + pub fn apply_storage_deposit_charges( + &self, + write_set: &mut WriteSetMut, + txn_sender: AccountAddress, + current_timestamp: Option<&CurrentTimeMicroseconds>, + ) -> VMResult { + use WriteOp::*; + + if !self.storage_deposit_enabled() { + return Ok(StorageDepositChargeSchedule::empty()); + } + + let per_slot_deposit: u64 = self.per_slot_deposit.into(); + + let mut slot_deposit = BTreeMap::::new(); + let mut slot_refund = BTreeMap::::new(); + let mut excess_bytes: u64 = 0; + + for (state_key, write_op) in write_set.as_inner_mut().iter_mut() { + excess_bytes += write_op + .bytes() + .map(|bytes| self.write_op_size(state_key, bytes).into()) + .unwrap_or(0); + + match write_op { + Deletion | Creation(_) | Modification(_) | ModificationWithMetadata { .. } => {}, + CreationWithMetadata { metadata, .. } => { + metadata.set_deposit(self.per_slot_deposit.into()); + *slot_deposit.entry(metadata.payer()).or_default() += per_slot_deposit; + }, + DeletionWithMetadata { metadata, .. } => { + *slot_refund.entry(metadata.payer()).or_default() += + self.calculate_refund(metadata, current_timestamp); + }, + } + } + + let mut slot_charges = Vec::new(); + let mut slot_refunds = Vec::new(); + + let deposit_iter = slot_deposit.into_iter(); + let refund_iter = slot_refund.into_iter(); + use itertools::EitherOrBoth::*; + deposit_iter + .merge_join_by(refund_iter, |(l, _), (r, _)| l.cmp(r)) + .for_each(|res| match res { + Both((account, deposit), (_, refund)) => match deposit.cmp(&refund) { + Ordering::Greater => slot_charges.push(StorageDepositEntry { + account, + amount: deposit - refund, + }), + Ordering::Less => slot_refunds.push(StorageDepositEntry { + account, + amount: refund - deposit, + }), + Ordering::Equal => {}, + }, + Left((account, amount)) => { + slot_charges.push(StorageDepositEntry { account, amount }) + }, + Right((account, amount)) => { + slot_refunds.push(StorageDepositEntry { account, amount }) + }, + }); + let per_excess_byte_penalty: u64 = self.per_excess_byte_penalty.into(); + assert!(u64::MAX / per_excess_byte_penalty > excess_bytes); + let excess_bytes_penalties = vec![StorageDepositEntry { + account: txn_sender, + amount: excess_bytes * per_excess_byte_penalty, + }]; + + Ok(StorageDepositChargeSchedule { + slot_charges, + slot_refunds, + excess_bytes_penalties, + }) + } } #[derive(Clone, Debug)] @@ -248,6 +400,27 @@ impl StoragePricing { V2(v2) => v2.calculate_write_set_gas(&mut ops.into_iter()), } } + + pub fn apply_storage_deposit_charges( + &self, + write_set: &mut WriteSetMut, + txn_sender: AccountAddress, + current_timestamp: Option<&CurrentTimeMicroseconds>, + ) -> VMResult { + use StoragePricing::*; + + match self { + V1(_v1) => Ok(StorageDepositChargeSchedule::empty()), + V2(v2) => v2.apply_storage_deposit_charges(write_set, txn_sender, current_timestamp), + } + } + + pub fn free_write_bytes_quota(&self) -> NumBytes { + match self { + StoragePricing::V1(_) => max_bytes(), + StoragePricing::V2(v2) => v2.free_write_bytes_quota, + } + } } #[derive(Clone)] diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index fa3c7a27df953..fa07bddff98ad 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -5,7 +5,9 @@ use crate::{ adapter_common::{ discard_error_output, discard_error_vm_status, PreprocessedTransaction, VMAdapter, }, - aptos_vm_impl::{get_transaction_output, AptosVMImpl, AptosVMInternals}, + aptos_vm_impl::{ + get_transaction_output_no_storage_deposit_charge, AptosVMImpl, AptosVMInternals, + }, block_executor::BlockAptosVM, counters::*, data_cache::{AsMoveResolver, IntoMoveResolver}, @@ -235,7 +237,7 @@ impl AptosVM { ) { return discard_error_vm_status(e); } - let txn_output = get_transaction_output( + let txn_output = get_transaction_output_no_storage_deposit_charge( &mut (), session, gas_meter.balance(), @@ -244,6 +246,12 @@ impl AptosVM { gas_meter.change_set_configs(), ) .unwrap_or_else(|e| discard_error_vm_status(e).1); + if gas_meter.should_affect_storage_deposit(txn_output.txn_output().write_set()) { + error!( + *log_context, + "Failure epilogue should not yield storage deposit changes, but ignored.", + ) + } (error_code, txn_output) }, TransactionStatus::Discard(status) => { @@ -261,12 +269,15 @@ impl AptosVM { txn_data: &TransactionMetadata, log_context: &AdapterLogSchema, ) -> Result<(VMStatus, TransactionOutputExt), VMStatus> { - let user_txn_change_set_ext = user_txn_session - .finish(&mut (), gas_meter.change_set_configs()) + let (mut user_txn_change_set_ext, current_timestamp) = user_txn_session + .finish_with_current_timestamp(&mut (), gas_meter.change_set_configs()) .map_err(|e| e.into_vm_status())?; - // Charge gas for write set - gas_meter.charge_write_set_gas(user_txn_change_set_ext.write_set().iter())?; - // TODO(Gas): Charge for aggregator writes + + let storage_deposit_charge_schedule = gas_meter.apply_storage_deposit_charges( + user_txn_change_set_ext.write_set_mut(), + txn_data.sender, + current_timestamp.as_ref(), + )?; let storage_with_changes = DeltaStateView::new(storage, user_txn_change_set_ext.write_set()); @@ -291,6 +302,16 @@ impl AptosVM { let resolver = self.0.new_move_resolver(&storage_with_changes); let mut session = self.0.new_session(&resolver, SessionId::txn_meta(txn_data)); + // transfer storage deposit charges + self.0.run_storage_deposit_charges( + &mut session, + gas_meter, + &storage_deposit_charge_schedule, + )?; + + // Charge gas for write set + gas_meter.charge_write_set_gas(user_txn_change_set_ext.write_set().iter())?; + // TODO(Gas): Charge for aggregator writes self.0 .run_success_epilogue(&mut session, gas_meter.balance(), txn_data, log_context)?; @@ -298,12 +319,19 @@ impl AptosVM { let epilogue_change_set_ext = session .finish(&mut (), gas_meter.change_set_configs()) .map_err(|e| e.into_vm_status())?; + let change_set_ext = user_txn_change_set_ext .squash(epilogue_change_set_ext) .map_err(|_err| VMStatus::Error(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR))?; let (delta_change_set, change_set) = change_set_ext.into_inner(); let (write_set, events) = change_set.into_inner(); + if gas_meter.should_affect_storage_deposit(&write_set) { + error!( + *log_context, + "Success epilogue should not yield storage deposit changes, but ignored.", + ) + } let gas_used = txn_data .max_gas_amount() @@ -940,7 +968,7 @@ impl AptosVM { })?; SYSTEM_TRANSACTIONS_EXECUTED.inc(); - let output = get_transaction_output( + let output = get_transaction_output_no_storage_deposit_charge( &mut (), session, 0.into(), diff --git a/aptos-move/aptos-vm/src/aptos_vm_impl.rs b/aptos-move/aptos-vm/src/aptos_vm_impl.rs index 94de429c19998..643a15a306ccf 100644 --- a/aptos-move/aptos-vm/src/aptos_vm_impl.rs +++ b/aptos-move/aptos-vm/src/aptos_vm_impl.rs @@ -13,8 +13,9 @@ use crate::{ use aptos_aggregator::transaction::TransactionOutputExt; use aptos_framework::RuntimeModuleMetadataV1; use aptos_gas::{ - AbstractValueSizeGasParameters, AptosGasParameters, ChangeSetConfigs, FromOnChainGasSchedule, - Gas, NativeGasParameters, StorageGasParameters, + AbstractValueSizeGasParameters, AptosGasMeter, AptosGasParameters, ChangeSetConfigs, + FromOnChainGasSchedule, Gas, NativeGasParameters, StorageDepositChargeSchedule, + StorageGasParameters, }; use aptos_logger::prelude::*; use aptos_state_view::StateView; @@ -31,6 +32,7 @@ use aptos_types::{ use fail::fail_point; use move_binary_format::{errors::VMResult, CompiledModule}; use move_core_types::{ + identifier::Identifier, language_storage::ModuleId, move_resource::MoveStructType, resolver::ResourceResolver, @@ -422,6 +424,33 @@ impl AptosVMImpl { .or_else(|err| convert_prologue_error(transaction_validation, err, log_context)) } + pub(crate) fn run_storage_deposit_charges( + &self, + session: &mut SessionExt, + gas_meter: &mut AptosGasMeter, + schedule: &StorageDepositChargeSchedule, + ) -> Result<(), VMStatus> { + fail_point!("move_adapter::run_storage_deposit_charges", |_| { + Err(VMStatus::Error( + StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR, + )) + }); + + session + .execute_function_bypass_visibility( + &ModuleId::new( + CORE_CODE_ADDRESS, + Identifier::new("storage_deposit").unwrap(), + ), + &Identifier::new("charges_and_refunds").unwrap(), + vec![], + vec![bcs::to_bytes(schedule).expect("Failed to serialize charge schedule.")], + gas_meter, + ) + .map(|_return_vals| ()) + .map_err(|err| VMStatus::Error(err.major_status())) + } + /// Run the epilogue of a transaction by calling into `EPILOGUE_NAME` function stored /// in the `ACCOUNT_MODULE` on chain. pub(crate) fn run_success_epilogue( @@ -445,10 +474,8 @@ impl AptosVMImpl { .execute_function_bypass_visibility( &transaction_validation.module_id(), &transaction_validation.user_epilogue_name, - // TODO: Deprecate this once we remove gas currency on the Move side. vec![], serialize_values(&vec![ - MoveValue::Signer(txn_data.sender), MoveValue::U64(txn_sequence_number), MoveValue::U64(txn_gas_price.into()), MoveValue::U64(txn_max_gas_units.into()), @@ -575,7 +602,10 @@ impl<'a> AptosVMInternals<'a> { } } -pub(crate) fn get_transaction_output( +pub(crate) fn get_transaction_output_no_storage_deposit_charge< + A: AccessPathCache, + S: MoveResolverExt, +>( ap_cache: &mut A, session: SessionExt, gas_left: Gas, diff --git a/types/src/state_store/state_value.rs b/types/src/state_store/state_value.rs index 77772c1b12cb2..ba9cc72b09eb3 100644 --- a/types/src/state_store/state_value.rs +++ b/types/src/state_store/state_value.rs @@ -31,7 +31,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; pub enum StateValueMetadata { V0 { payer: AccountAddress, - deposit: u128, + deposit: u64, creation_time_usecs: u64, }, } @@ -39,7 +39,7 @@ pub enum StateValueMetadata { impl StateValueMetadata { pub fn new( payer: AccountAddress, - deposit: u128, + deposit: u64, creation_time_usecs: &CurrentTimeMicroseconds, ) -> Self { Self::V0 { @@ -48,6 +48,33 @@ impl StateValueMetadata { creation_time_usecs: creation_time_usecs.microseconds, } } + + pub fn payer(&self) -> AccountAddress { + match self { + StateValueMetadata::V0 { payer, .. } => *payer, + } + } + + pub fn deposit(&self) -> u64 { + match self { + StateValueMetadata::V0 { deposit, .. } => *deposit, + } + } + + pub fn set_deposit(&mut self, amount: u64) { + match self { + StateValueMetadata::V0 { deposit, .. } => *deposit = amount, + } + } + + pub fn creation_time_usecs(&self) -> u64 { + match self { + StateValueMetadata::V0 { + creation_time_usecs, + .. + } => *creation_time_usecs, + } + } } #[derive(Clone, Debug, CryptoHasher, Eq, PartialEq, Ord, PartialOrd, Hash)] diff --git a/types/src/transaction/change_set.rs b/types/src/transaction/change_set.rs index b3203143e4667..6c98b2554f650 100644 --- a/types/src/transaction/change_set.rs +++ b/types/src/transaction/change_set.rs @@ -1,7 +1,10 @@ // Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 -use crate::{contract_event::ContractEvent, write_set::WriteSet}; +use crate::{ + contract_event::ContractEvent, + write_set::{WriteSet, WriteSetMut}, +}; use move_core_types::vm_status::VMStatus; use serde::{Deserialize, Serialize}; @@ -44,6 +47,10 @@ impl ChangeSet { &self.write_set } + pub fn write_set_mut(&mut self) -> &mut WriteSetMut { + self.write_set.as_mut() + } + pub fn events(&self) -> &[ContractEvent] { &self.events } diff --git a/types/src/write_set.rs b/types/src/write_set.rs index 9678fee17dd46..913c78527d16b 100644 --- a/types/src/write_set.rs +++ b/types/src/write_set.rs @@ -243,6 +243,14 @@ impl WriteSet { } } +impl AsMut for WriteSet { + fn as_mut(&mut self) -> &mut WriteSetMut { + match self { + Self::V0(write_set) => &mut write_set.0, + } + } +} + impl Deref for WriteSet { type Target = WriteSetV0; From 4a0b7e3a37492cb9bb102b7128511b0478e217a7 Mon Sep 17 00:00:00 2001 From: Stelian Ionescu Date: Mon, 22 May 2023 15:38:41 -0400 Subject: [PATCH 13/14] Require permission check before running determine-docker-build-metadata --- .github/workflows/docker-build-test.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index 98d90672fce3b..ced3853512835 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -96,12 +96,13 @@ jobs: # Because the docker build happens in a reusable workflow, have a separate job that collects the right metadata # for the subsequent docker builds. Reusable workflows do not currently have the "env" context: https://github.com/orgs/community/discussions/26671 determine-docker-build-metadata: + needs: [permission-check] runs-on: ubuntu-latest steps: - name: collect metadata run: | - echo "GIT_SHA: ${{ env.GIT_SHA }}" - echo "TARGET_CACHE_ID: ${{ env.TARGET_CACHE_ID }}" + echo "GIT_SHA: ${GIT_SHA}" + echo "TARGET_CACHE_ID: ${TARGET_CACHE_ID}" outputs: gitSha: ${{ env.GIT_SHA }} targetCacheId: ${{ env.TARGET_CACHE_ID }} From 5747a3268f0ce4052fc154e68aa49ac348c8d068 Mon Sep 17 00:00:00 2001 From: Stelian Ionescu Date: Sun, 4 Jun 2023 22:05:40 -0400 Subject: [PATCH 14/14] fix trigger condition for build jobs --- .github/workflows/docker-build-test.yaml | 147 +++++++++++++++++------ 1 file changed, 113 insertions(+), 34 deletions(-) diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index ced3853512835..f6c50e8c0f417 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -25,24 +25,19 @@ on: # build on main branch OR when a PR is labeled with `CICD:build-images` workflow_dispatch: pull_request_target: types: [labeled, opened, synchronize, reopened, auto_merge_enabled] - # For PR that modify .github, run from that PR - # This will fail to get secrets if you are not from aptos - pull_request: - types: [labeled, opened, synchronize, reopened, auto_merge_enabled] - paths: - - ".github/workflows/docker-build-test.yaml" - - ".github/workflows/workflow-run-forge.yaml" - - ".github/workflows/docker-rust-build.yaml" - - ".github/workflows/sdk-release.yaml" - - ".github/workflows/lint-test.yaml" push: branches: - main + # release branches - devnet - testnet - mainnet - aptos-node-v* + - aptos-release-v* + # experimental branches - performance_benchmark + - quorum-store + - preview # cancel redundant builds concurrency: @@ -53,6 +48,7 @@ concurrency: env: GCP_DOCKER_ARTIFACT_REPO: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO }} + GCP_DOCKER_ARTIFACT_REPO_US: ${{ secrets.GCP_DOCKER_ARTIFACT_REPO_US }} AWS_ECR_ACCOUNT_NUM: ${{ secrets.ENV_ECR_AWS_ACCOUNT_NUM }} # In case of pull_request events by default github actions merges main into the PR branch and then runs the tests etc # on the prospective merge result instead of only on the tip of the PR. @@ -74,15 +70,15 @@ permissions: # Note on the job-level `if` conditions: # This workflow is designed such that: # 1. Run ALL jobs when a 'push', 'workflow_dispatch' triggered the workflow or on 'pull_request's which have set auto_merge=true or have the label "CICD:run-e2e-tests". -# 2. Run ONLY the docker image building jobs on PRs with the "CICD:build-images" label. +# 2. Run ONLY the docker image building jobs on PRs with the "CICD:build[-]-images" label. # 3. Run NOTHING when neither 1. or 2.'s conditions are satisfied. jobs: permission-check: if: | github.event_name == 'push' || github.event_name == 'workflow_dispatch' || - contains(github.event.pull_request.labels.*.name, 'CICD:build-images') || - contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || + contains(join(github.event.pull_request.labels.*.name, ','), 'CICD:build-') || + contains(join(github.event.pull_request.labels.*.name, ','), 'CICD:run-') || github.event.pull_request.auto_merge != null || contains(github.event.pull_request.body, '#e2e') runs-on: ubuntu-latest @@ -109,7 +105,7 @@ jobs: rust-images: needs: [permission-check, determine-docker-build-metadata] - uses: ./.github/workflows/docker-rust-build.yaml + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-docker-rust-build.yaml@main secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} @@ -119,7 +115,11 @@ jobs: rust-images-indexer: needs: [permission-check, determine-docker-build-metadata] - uses: ./.github/workflows/docker-rust-build.yaml + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-docker-rust-build.yaml@main + if: | + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:build-indexer-images') secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} @@ -130,7 +130,11 @@ jobs: rust-images-failpoints: needs: [permission-check, determine-docker-build-metadata] - uses: ./.github/workflows/docker-rust-build.yaml + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-docker-rust-build.yaml@main + if: | + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:build-failpoints-images') secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} @@ -141,7 +145,11 @@ jobs: rust-images-performance: needs: [permission-check, determine-docker-build-metadata] - uses: ./.github/workflows/docker-rust-build.yaml + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-docker-rust-build.yaml@main + if: | + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:build-performance-images') secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} @@ -154,7 +162,7 @@ jobs: if: | contains(github.event.pull_request.labels.*.name, 'CICD:build-consensus-only-image') || contains(github.event.pull_request.labels.*.name, 'CICD:run-consensus-only-perf-test') - uses: ./.github/workflows/docker-rust-build.yaml + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-docker-rust-build.yaml@main secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} @@ -164,24 +172,57 @@ jobs: BUILD_ADDL_TESTING_IMAGES: true sdk-release: - needs: [rust-images, determine-docker-build-metadata] + needs: [permission-check, rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" + if: | + (github.event_name == 'push' && github.ref_name != 'main') || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || + github.event.pull_request.auto_merge != null || + contains(github.event.pull_request.body, '#e2e') + uses: aptos-labs/aptos-core/.github/workflows/sdk-release.yaml@main + secrets: inherit + with: + GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + + cli-e2e-tests: + needs: [permission-check, rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" if: | !contains(github.event.pull_request.labels.*.name, 'CICD:skip-sdk-integration-test') && ( - github.event_name == 'push' || + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || + github.event.pull_request.auto_merge != null) || + contains(github.event.pull_request.body, '#e2e' + ) + uses: ./.github/workflows/cli-e2e-tests.yaml + secrets: inherit + with: + GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + + indexer-grpc-e2e-tests: + needs: [permission-check, rust-images, determine-docker-build-metadata] # runs with the default release docker build variant "rust-images" + if: | + (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || - github.event.pull_request.auto_merge != null) || + github.event.pull_request.auto_merge != null || contains(github.event.pull_request.body, '#e2e') - uses: ./.github/workflows/sdk-release.yaml + uses: ./.github/workflows/docker-indexer-grpc-test.yaml secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} forge-e2e-test: needs: - [rust-images, rust-images-failpoints, determine-docker-build-metadata] + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test if: | - !contains(github.event.pull_request.labels.*.name, 'CICD:skip-forge-e2e-test') && ( + !failure() && !cancelled() && needs.permission-check.result == 'success' && ( (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || @@ -192,6 +233,9 @@ jobs: secrets: inherit with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + FORGE_TEST_SUITE: land_blocking + IMAGE_TAG: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + FORGE_RUNNER_DURATION_SECS: 480 COMMENT_HEADER: forge-e2e # Use the cache ID as the Forge namespace so we can limit Forge test concurrency on k8s, since Forge # test lifecycle is separate from that of GHA. This protects us from the case where many Forge tests are triggered @@ -201,9 +245,15 @@ jobs: # Run e2e compat test against testnet branch forge-compat-test: needs: - [rust-images, rust-images-failpoints, determine-docker-build-metadata] + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test if: | - !contains(github.event.pull_request.labels.*.name, 'CICD:skip-forge-e2e-test') && ( + !failure() && !cancelled() && needs.permission-check.result == 'success' && ( (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || @@ -215,18 +265,50 @@ jobs: with: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} FORGE_TEST_SUITE: compat - IMAGE_TAG: testnet_2d8b1b57553d869190f61df1aaf7f31a8fc19a7b # test against the latest build on testnet branch + IMAGE_TAG: testnet_2d8b1b57553d869190f61df1aaf7f31a8fc19a7b # test against a previous testnet release FORGE_RUNNER_DURATION_SECS: 300 COMMENT_HEADER: forge-compat - # Use the cache ID as the Forge namespace so we can limit Forge test concurrency on k8s, since Forge - # test lifecycle is separate from that of GHA. This protects us from the case where many Forge tests are triggered - # by this GHA. If there is a Forge namespace collision, Forge will pre-empt the existing test running in the namespace. FORGE_NAMESPACE: forge-compat-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} + # Run forge framework upgradability test + forge-framework-upgrade-test: + needs: + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test + if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && ( + (github.event_name == 'push' && github.ref_name != 'main') || + github.event_name == 'workflow_dispatch' || + contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || + github.event.pull_request.auto_merge != null || + contains(github.event.pull_request.body, '#e2e') + ) + uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main + secrets: inherit + with: + GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} + FORGE_TEST_SUITE: framework_upgrade + IMAGE_TAG: aptos-node-v1.3.0_3fc3d42b6cfe27460004f9a0326451bcda840a60 # This workflow will test the upgradability from the current tip of the release branch to the current main branch. + FORGE_RUNNER_DURATION_SECS: 300 + COMMENT_HEADER: forge-framework-upgrade + FORGE_NAMESPACE: forge-framework-upgrade-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} + forge-consensus-only-perf-test: needs: - [rust-images-consensus-only-perf-test, determine-docker-build-metadata] + - permission-check + - determine-docker-build-metadata + - rust-images + - rust-images-indexer + - rust-images-failpoints + - rust-images-performance + - rust-images-consensus-only-perf-test if: | + !failure() && !cancelled() && needs.permission-check.result == 'success' && contains(github.event.pull_request.labels.*.name, 'CICD:run-consensus-only-perf-test') uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main secrets: inherit @@ -236,7 +318,4 @@ jobs: IMAGE_TAG: consensus_only_perf_test_${{ needs.determine-docker-build-metadata.outputs.gitSha }} FORGE_RUNNER_DURATION_SECS: 300 COMMENT_HEADER: forge-consensus-only-perf-test - # Use the cache ID as the Forge namespace so we can limit Forge test concurrency on k8s, since Forge - # test lifecycle is separate from that of GHA. This protects us from the case where many Forge tests are triggered - # by this GHA. If there is a Forge namespace collision, Forge will pre-empt the existing test running in the namespace. FORGE_NAMESPACE: forge-consensus-only-perf-test-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }}