From ce72eeaf5c9490f87aa3a39ae7f92087908e7d79 Mon Sep 17 00:00:00 2001 From: Green Baneling Date: Wed, 13 Mar 2024 20:36:13 +0100 Subject: [PATCH] Moved `StorageTransaction` to the `fuel-core-storage` crate (#1694) Closes https://github.com/FuelLabs/fuel-core/issues/1589 ## Overview The change moves the database transaction logic from the `fuel-core` to the `fuel-core-storage` level. The corresponding issue described the reason behind it. ## Technical details of implementation The change splits the `KeyValueStore` into `KeyValueInspect` and `KeyValueMutate`, as well the `Blueprint` into `BlueprintInspect` and `BlueprintMutate`. It allows requiring less restricted constraints for any read-related operations. One of the main ideas of the change is to allow for the actual storage only to implement `KeyValueInspect` and `Modifiable` without the `KeyValueMutate`. It simplifies work with the databases and provides a safe way of interacting with them (Modification into the database can only go through the `Modifiable::commit_changes`). This feature is used to [track the height](https://github.com/FuelLabs/fuel-core/pull/1694/files#diff-c95a3d57a39feac7c8c2f3b193a24eec39e794413adc741df36450f9a4539898) of each database during commits and even limit how commits are done, providing additional safety. This part of the change was done as a [separate commit](https://github.com/FuelLabs/fuel-core/pull/1694/commits/7b1141ac838568e3590f09dd420cb24a6946bd32). The `StorageTransaction` is a `StructuredStorage` that uses `StorageTransactionInner` inside to accumulate modifications. Only `StorageStorageInner` has a real implementation of the `KeyValueMutate`(Other types only implement it in tests). The implementation of the `Modifiable` for the `Database` contains a business logic that provides additional safety but limits the usage of the database. The `Database` now tracks its height and is responsible for its updates. In the `commit_changes` function, it analyzes the changes that were done and tries to find a new height(For example, in the case of the `OnChain` database, we are looking for a new `Block` in the `FuelBlocks` table). As was planned in the issue, now the executor has full control over how commits to the storage are done. All mutation methods now require `&mut self` - exclusive ownership over the object to be able to write into it. It almost negates the chance of concurrent modification of the storage, but it is still possible since the `Database` implements the `Clone` trait. To be sure that we don't corrupt the state of the database, the `commit_changes` function implements additional safety checks to be sure that we commit updates per each height only once time. Side changes: - The `drop` function was moved from `Database` to `RocksDB` as a preparation for the state rewind since the read view should also keep the drop function until it is destroyed. - The `StatisticTable` table lives in the off-chain worker. - Removed duplication of the `Database` from the `dapp::ConcreteStorage` since it is already available from the VM. - The executor return only produced `Changes` instead of the storage transaction, which simplifies the interaction between modules and port definition. - The logic related to the iteration over the storage is moved to the `fuel-core-storage` crate and is now reusable. It provides an `interator` method that duplicates the logic from `MemoryStore` on iterating over the `BTreeMap` and methods like `iter_all`, `iter_all_by_prefix`, etc. It was done in a separate revivable [commit](https://github.com/FuelLabs/fuel-core/pull/1694/commits/5b9bd78320e6f36d0650ec05698f12f7d1b3c7c9). - The `MemoryTransactionView` is fully replaced by the `StorageTransactionInner`. - Removed `flush` method from the `Database` since it is not needed after https://github.com/FuelLabs/fuel-core/pull/1664 . --------- Co-authored-by: Voxelot --- CHANGELOG.md | 26 + Cargo.lock | 59 +- benches/benches/block_target_gas.rs | 6 +- benches/benches/state.rs | 60 +- benches/benches/vm.rs | 25 +- benches/benches/vm_set/blockchain.rs | 21 +- benches/src/lib.rs | 13 +- bin/fuel-core/src/cli/snapshot.rs | 28 +- crates/database/src/lib.rs | 33 +- crates/fuel-core/Cargo.toml | 3 +- crates/fuel-core/src/combined_database.rs | 43 +- crates/fuel-core/src/database.rs | 1047 ++++++++++++----- crates/fuel-core/src/database/balances.rs | 120 +- crates/fuel-core/src/database/block.rs | 79 +- crates/fuel-core/src/database/coin.rs | 5 +- crates/fuel-core/src/database/contracts.rs | 5 +- .../src/database/database_description.rs | 2 +- .../src/database/genesis_progress.rs | 33 +- crates/fuel-core/src/database/message.rs | 5 +- crates/fuel-core/src/database/metadata.rs | 70 +- crates/fuel-core/src/database/sealed_block.rs | 5 +- crates/fuel-core/src/database/state.rs | 127 +- crates/fuel-core/src/database/statistic.rs | 77 -- crates/fuel-core/src/database/storage.rs | 246 ++-- crates/fuel-core/src/database/transaction.rs | 104 -- crates/fuel-core/src/database/transactions.rs | 34 +- crates/fuel-core/src/executor.rs | 247 ++-- .../src/graphql_api/database/arc_wrapper.rs | 4 +- crates/fuel-core/src/graphql_api/ports.rs | 31 +- crates/fuel-core/src/graphql_api/storage.rs | 89 +- .../src/graphql_api/storage/statistic.rs | 32 + .../src/graphql_api/worker_service.rs | 89 +- crates/fuel-core/src/schema/dap.rs | 50 +- crates/fuel-core/src/service.rs | 11 +- crates/fuel-core/src/service/adapters.rs | 2 + .../src/service/adapters/block_importer.rs | 58 +- .../service/adapters/consensus_module/poa.rs | 11 +- .../src/service/adapters/executor.rs | 16 +- .../service/adapters/graphql_api/off_chain.rs | 31 +- .../service/adapters/graphql_api/on_chain.rs | 1 + .../src/service/adapters/producer.rs | 10 +- .../fuel-core/src/service/adapters/relayer.rs | 17 + crates/fuel-core/src/service/genesis.rs | 59 +- .../src/service/genesis/off_chain.rs | 22 +- .../fuel-core/src/service/genesis/runner.rs | 165 +-- .../fuel-core/src/service/genesis/workers.rs | 13 +- crates/fuel-core/src/state.rs | 128 +- crates/fuel-core/src/state/in_memory.rs | 2 +- .../src/state/in_memory/memory_store.rs | 196 ++- .../src/state/in_memory/transaction.rs | 725 ------------ crates/fuel-core/src/state/rocks_db.rs | 290 ++--- .../consensus_module/poa/src/ports.rs | 17 +- .../consensus_module/poa/src/service.rs | 26 +- .../consensus_module/poa/src/service_test.rs | 8 +- .../service_test/manually_produce_tests.rs | 2 +- crates/services/executor/src/executor.rs | 321 ++--- crates/services/executor/src/ports.rs | 48 - crates/services/executor/src/refs/contract.rs | 14 +- crates/services/importer/src/importer.rs | 87 +- crates/services/importer/src/importer/test.rs | 147 ++- crates/services/importer/src/ports.rs | 114 +- crates/services/p2p/src/service.rs | 8 +- .../services/producer/src/block_producer.rs | 29 +- crates/services/producer/src/mocks.rs | 31 +- crates/services/producer/src/ports.rs | 7 +- crates/services/relayer/src/ports.rs | 18 + crates/services/relayer/src/ports/tests.rs | 149 ++- .../services/relayer/src/service/get_logs.rs | 2 + crates/services/relayer/src/storage.rs | 44 +- crates/services/src/service.rs | 9 + crates/services/txpool/src/mock_db.rs | 4 +- crates/storage/src/blueprint.rs | 74 +- crates/storage/src/blueprint/merklized.rs | 105 +- crates/storage/src/blueprint/plain.rs | 111 +- crates/storage/src/blueprint/sparse.rs | 171 +-- crates/storage/src/column.rs | 8 - crates/storage/src/iter.rs | 202 +++- crates/storage/src/kv_store.rs | 113 +- crates/storage/src/lib.rs | 1 + crates/storage/src/structured_storage.rs | 309 +++-- .../storage/src/structured_storage/blocks.rs | 12 +- .../storage/src/structured_storage/state.rs | 79 +- crates/storage/src/test_helpers.rs | 103 +- crates/storage/src/transactional.rs | 830 ++++++++++++- crates/types/src/services.rs | 34 +- tests/tests/blocks.rs | 11 +- tests/tests/health.rs | 4 +- 87 files changed, 4291 insertions(+), 3536 deletions(-) delete mode 100644 crates/fuel-core/src/database/statistic.rs delete mode 100644 crates/fuel-core/src/database/transaction.rs create mode 100644 crates/fuel-core/src/graphql_api/storage/statistic.rs create mode 100644 crates/fuel-core/src/service/adapters/relayer.rs delete mode 100644 crates/fuel-core/src/state/in_memory/transaction.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 667f69d5e4b..fd9152bd03a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +Description of the upcoming release here. + ### Added - [#1747](https://github.com/FuelLabs/fuel-core/pull/1747): The DA block height is now included in the genesis state. @@ -16,6 +18,30 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Changed #### Breaking +- [#1694](https://github.com/FuelLabs/fuel-core/pull/1694): The change moves the database transaction logic from the `fuel-core` to the `fuel-core-storage` level. The corresponding [issue](https://github.com/FuelLabs/fuel-core/issues/1589) described the reason behind it. + + ## Technical details of implementation + + - The change splits the `KeyValueStore` into `KeyValueInspect` and `KeyValueMutate`, as well the `Blueprint` into `BlueprintInspect` and `BlueprintMutate`. It allows requiring less restricted constraints for any read-related operations. + + - One of the main ideas of the change is to allow for the actual storage only to implement `KeyValueInspect` and `Modifiable` without the `KeyValueMutate`. It simplifies work with the databases and provides a safe way of interacting with them (Modification into the database can only go through the `Modifiable::commit_changes`). This feature is used to [track the height](https://github.com/FuelLabs/fuel-core/pull/1694/files#diff-c95a3d57a39feac7c8c2f3b193a24eec39e794413adc741df36450f9a4539898) of each database during commits and even limit how commits are done, providing additional safety. This part of the change was done as a [separate commit](https://github.com/FuelLabs/fuel-core/pull/1694/commits/7b1141ac838568e3590f09dd420cb24a6946bd32). + + - The `StorageTransaction` is a `StructuredStorage` that uses `InMemoryTransaction` inside to accumulate modifications. Only `InMemoryTransaction` has a real implementation of the `KeyValueMutate`(Other types only implement it in tests). + + - The implementation of the `Modifiable` for the `Database` contains a business logic that provides additional safety but limits the usage of the database. The `Database` now tracks its height and is responsible for its updates. In the `commit_changes` function, it analyzes the changes that were done and tries to find a new height(For example, in the case of the `OnChain` database, we are looking for a new `Block` in the `FuelBlocks` table). + + - As was planned in the issue, now the executor has full control over how commits to the storage are done. + + - All mutation methods now require `&mut self` - exclusive ownership over the object to be able to write into it. It almost negates the chance of concurrent modification of the storage, but it is still possible since the `Database` implements the `Clone` trait. To be sure that we don't corrupt the state of the database, the `commit_changes` function implements additional safety checks to be sure that we commit updates per each height only once time. + + - Side changes: + - The `drop` function was moved from `Database` to `RocksDB` as a preparation for the state rewind since the read view should also keep the drop function until it is destroyed. + - The `StatisticTable` table lives in the off-chain worker. + - Removed duplication of the `Database` from the `dap::ConcreteStorage` since it is already available from the VM. + - The executor return only produced `Changes` instead of the storage transaction, which simplifies the interaction between modules and port definition. + - The logic related to the iteration over the storage is moved to the `fuel-core-storage` crate and is now reusable. It provides an `interator` method that duplicates the logic from `MemoryStore` on iterating over the `BTreeMap` and methods like `iter_all`, `iter_all_by_prefix`, etc. It was done in a separate revivable [commit](https://github.com/FuelLabs/fuel-core/pull/1694/commits/5b9bd78320e6f36d0650ec05698f12f7d1b3c7c9). + - The `MemoryTransactionView` is fully replaced by the `StorageTransactionInner`. + - Removed `flush` method from the `Database` since it is not needed after https://github.com/FuelLabs/fuel-core/pull/1664. - [#1693](https://github.com/FuelLabs/fuel-core/pull/1693): The change separates the initial chain state from the chain config and stores them in separate files when generating a snapshot. The state snapshot can be generated in a new format where parquet is used for compression and indexing while postcard is used for encoding. This enables importing in a stream like fashion which reduces memory requirements. Json encoding is still supported to enable easy manual setup. However, parquet is prefered for large state files. diff --git a/Cargo.lock b/Cargo.lock index c5273babe47..6370d406577 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,6 +97,16 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "arrayvec", + "bytes", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -323,7 +333,7 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.2.0", "async-executor", - "async-io 2.3.1", + "async-io 2.3.2", "async-lock 3.3.0", "blocking", "futures-lite 2.2.0", @@ -425,9 +435,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" dependencies = [ "async-lock 3.3.0", "cfg-if", @@ -496,7 +506,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" dependencies = [ - "async-io 2.3.1", + "async-io 2.3.2", "async-lock 2.8.0", "atomic-waker", "cfg-if", @@ -1602,9 +1612,9 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.3" +version = "3.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66ba08eff649795412705351c11358001781278102196f49623d9c8d240ed8b9" +checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" dependencies = [ "nix 0.28.0", "windows-sys 0.52.0", @@ -2107,17 +2117,17 @@ dependencies = [ [[package]] name = "enr" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" +checksum = "2dc3eabaca59dc39ea5ed15062e4abc5bba9723b1cff7a4fea3faae0647f04c0" dependencies = [ + "alloy-rlp", "base64 0.21.7", "bytes", "hex", "k256", "log", "rand", - "rlp", "serde", "sha3", "zeroize", @@ -2308,7 +2318,7 @@ dependencies = [ "serde", "serde_json", "syn 2.0.52", - "toml 0.8.10", + "toml 0.8.11", "walkdir", ] @@ -2350,7 +2360,7 @@ dependencies = [ "rlp", "serde", "serde_json", - "strum 0.26.1", + "strum 0.26.2", "syn 2.0.52", "tempfile", "thiserror", @@ -2715,6 +2725,7 @@ dependencies = [ "clap 4.5.2", "derive_more", "enum-iterator", + "fuel-core", "fuel-core-chain-config", "fuel-core-consensus-module", "fuel-core-database", @@ -4055,7 +4066,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io 2.3.1", + "async-io 2.3.2", "core-foundation", "fnv", "futures", @@ -7627,11 +7638,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.1", + "strum_macros 0.26.2", ] [[package]] @@ -7662,9 +7673,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck", "proc-macro2", @@ -8206,14 +8217,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" +checksum = "af06656561d28735e9c1cd63dfd57132c8155426aa6af24f36a00a351f88c48e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.6", + "toml_edit 0.22.7", ] [[package]] @@ -8260,9 +8271,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.6" +version = "0.22.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" +checksum = "18769cd1cec395d70860ceb4d932812a0b4d06b1a4bb336745a4d21b9496e992" dependencies = [ "indexmap 2.2.5", "serde", @@ -8615,9 +8626,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126e423afe2dd9ac52142e7e9d5ce4135d7e13776c529d27fd6bc49f19e3280b" +checksum = "8fec26a25bd6fca441cdd0f769fd7f891bae119f996de31f86a5eddccef54c1d" [[package]] name = "vcpkg" diff --git a/benches/benches/block_target_gas.rs b/benches/benches/block_target_gas.rs index a46094ab6ef..362a6220043 100644 --- a/benches/benches/block_target_gas.rs +++ b/benches/benches/block_target_gas.rs @@ -17,6 +17,10 @@ use ed25519_dalek::Signer; use ethnum::U256; use fuel_core::{ combined_database::CombinedDatabase, + database::{ + balances::BalancesInitializer, + state::StateInitializer, + }, service::{ config::Trigger, Config, @@ -256,7 +260,7 @@ fn service_with_many_contracts( .build() .unwrap(); let _drop = rt.enter(); - let mut database = Database::rocksdb(); + let mut database = Database::rocksdb_temp(); let mut config = Config::local_node(); config .chain_config diff --git a/benches/benches/state.rs b/benches/benches/state.rs index fb3027c6540..b20edc7df0f 100644 --- a/benches/benches/state.rs +++ b/benches/benches/state.rs @@ -5,8 +5,19 @@ use criterion::{ BenchmarkGroup, Criterion, }; -use fuel_core::database::Database; -use fuel_core_storage::vm_storage::VmStorage; +use fuel_core::database::{ + database_description::on_chain::OnChain, + state::StateInitializer, + Database, +}; +use fuel_core_storage::{ + transactional::{ + IntoTransaction, + ReadTransaction, + WriteTransaction, + }, + vm_storage::VmStorage, +}; use fuel_core_types::{ blockchain::header::GeneratedConsensusFields, fuel_tx::Bytes32, @@ -28,7 +39,10 @@ use std::{ #[global_allocator] static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; -fn setup(db: &mut Database, contract: &ContractId, n: usize) { +fn setup(db: &mut D, contract: &ContractId, n: usize) +where + D: StateInitializer, +{ let mut rng_keys = thread_rng(); let gen_keys = || -> Bytes32 { rng_keys.gen() }; let state_keys = iter::repeat_with(gen_keys).take(n); @@ -51,16 +65,16 @@ fn insert_state_single_contract_database(c: &mut Criterion) { let mut bench_state = |group: &mut BenchmarkGroup, name: &str, n: usize| { group.bench_function(name, |b| { - let mut db = VmStorage::default(); + let mut db = Database::::default(); let contract: ContractId = rng.gen(); - setup(db.database_mut(), &contract, n); - let outer = db.database_mut().transaction(); + setup(&mut db, &contract, n); + let outer = db.write_transaction(); b.iter_custom(|iters| { let mut elapsed_time = Duration::default(); for _ in 0..iters { - let mut inner = outer.transaction(); + let inner = outer.read_transaction(); let mut inner_db = VmStorage::new::( - inner.as_mut().clone(), + inner, &Default::default(), Default::default(), ); @@ -112,16 +126,16 @@ fn insert_state_single_contract_transaction(c: &mut Criterion) { let mut bench_state = |group: &mut BenchmarkGroup, name: &str, n: usize| { group.bench_function(name, |b| { - let mut db = VmStorage::::default(); + let db = Database::::default(); let contract: ContractId = rng.gen(); - let mut outer = db.database_mut().transaction(); - setup(outer.as_mut(), &contract, n); + let mut outer = db.into_transaction(); + setup(&mut outer, &contract, n); b.iter_custom(|iters| { let mut elapsed_time = Duration::default(); for _ in 0..iters { - let mut inner = outer.transaction(); + let inner = outer.read_transaction(); let mut inner_db = VmStorage::new::( - inner.as_mut().clone(), + inner, &Default::default(), Default::default(), ); @@ -173,19 +187,19 @@ fn insert_state_multiple_contracts_database(c: &mut Criterion) { let mut bench_state = |group: &mut BenchmarkGroup, name: &str, n: usize| { group.bench_function(name, |b| { - let mut db = VmStorage::::default(); + let mut db = Database::::default(); for _ in 0..n { let contract: ContractId = rng.gen(); - setup(db.database_mut(), &contract, 1); + setup(&mut db, &contract, 1); } - let outer = db.database_mut().transaction(); + let outer = db.into_transaction(); b.iter_custom(|iters| { let mut elapsed_time = Duration::default(); let contract: ContractId = rng.gen(); for _ in 0..iters { - let mut inner = outer.transaction(); + let inner = outer.read_transaction(); let mut inner_db = VmStorage::new::( - inner.as_mut().clone(), + inner, &Default::default(), Default::default(), ); @@ -237,19 +251,19 @@ fn insert_state_multiple_contracts_transaction(c: &mut Criterion) { let mut bench_state = |group: &mut BenchmarkGroup, name: &str, n: usize| { group.bench_function(name, |b| { - let mut db = VmStorage::::default(); - let mut outer = db.database_mut().transaction(); + let db = Database::::default(); + let mut outer = db.into_transaction(); for _ in 0..n { let contract: ContractId = rng.gen(); - setup(outer.as_mut(), &contract, 1); + setup(&mut outer, &contract, 1); } b.iter_custom(|iters| { let mut elapsed_time = Duration::default(); let contract: ContractId = rng.gen(); for _ in 0..iters { - let mut inner = outer.transaction(); + let inner = outer.read_transaction(); let mut inner_db = VmStorage::new::( - inner.as_mut().clone(), + inner, &Default::default(), Default::default(), ); diff --git a/benches/benches/vm.rs b/benches/benches/vm.rs index 3175eafcf1a..16e7cf2461e 100644 --- a/benches/benches/vm.rs +++ b/benches/benches/vm.rs @@ -11,10 +11,12 @@ use criterion::{ BenchmarkGroup, Criterion, }; +use std::sync::Arc; use crate::vm_initialization::vm_initialization; use contract::*; use fuel_core_benches::*; +use fuel_core_storage::transactional::IntoTransaction; use fuel_core_types::fuel_asm::Instruction; use vm_set::*; @@ -37,15 +39,19 @@ where let clock = quanta::Clock::new(); + let original_db = vm.as_mut().database_mut().clone(); + // During block production/validation for each state, which may affect the state of the database, + // we create a new storage transaction. The code here simulates the same behavior to have + // the same nesting level and the same performance. + let block_database_tx = original_db.clone().into_transaction(); + let relayer_database_tx = block_database_tx.into_transaction(); + let thread_database_tx = relayer_database_tx.into_transaction(); + let tx_database_tx = thread_database_tx.into_transaction(); + let database = Database::new(Arc::new(tx_database_tx)); + *vm.as_mut().database_mut() = database.into_transaction(); + let mut total = core::time::Duration::ZERO; for _ in 0..iters { - let original_db = vm.as_mut().database_mut().clone(); - // Simulates the block production/validation with three levels of database transaction. - let block_database_tx = original_db.transaction().as_ref().clone(); - let tx_database_tx = block_database_tx.transaction().as_ref().clone(); - let vm_tx_database_tx = tx_database_tx.transaction().as_ref().clone(); - *vm.as_mut().database_mut() = vm_tx_database_tx; - let start = black_box(clock.raw()); match instruction { Instruction::CALL(call) => { @@ -60,9 +66,10 @@ where let end = black_box(clock.raw()); total += clock.delta(start, end); vm.reset_vm_state(diff); - // restore original db - *vm.as_mut().database_mut() = original_db; + // Reset database changes. + vm.as_mut().database_mut().reset_changes(); } + *vm.as_mut().database_mut() = original_db; total }) }); diff --git a/benches/benches/vm_set/blockchain.rs b/benches/benches/vm_set/blockchain.rs index 651ef3bc748..d886b0e0b32 100644 --- a/benches/benches/vm_set/blockchain.rs +++ b/benches/benches/vm_set/blockchain.rs @@ -12,6 +12,11 @@ use criterion::{ Throughput, }; use fuel_core::{ + database::{ + balances::BalancesInitializer, + database_description::on_chain::OnChain, + state::StateInitializer, + }, service::Config, state::rocks_db::{ RocksDb, @@ -21,6 +26,10 @@ use fuel_core::{ use fuel_core_benches::*; use fuel_core_storage::{ tables::FuelBlocks, + transactional::{ + IntoTransaction, + StorageTransaction, + }, vm_storage::{ IncreaseStorageKey, VmStorage, @@ -60,7 +69,8 @@ impl BenchDb { fn new(contract_id: &ContractId) -> anyhow::Result { let tmp_dir = ShallowTempDir::new(); - let db = Arc::new(RocksDb::default_open(tmp_dir.path(), None).unwrap()); + let db = + Arc::new(RocksDb::::default_open(tmp_dir.path(), None).unwrap()); let mut storage_key = primitive_types::U256::zero(); let mut key_bytes = Bytes32::zeroed(); @@ -103,7 +113,6 @@ impl BenchDb { &block.compress(&config.chain_config.consensus_parameters.chain_id), ) .unwrap(); - database.clone().flush()?; Ok(Self { _tmp_dir: tmp_dir, @@ -112,14 +121,18 @@ impl BenchDb { } /// Creates a `VmDatabase` instance. - fn to_vm_database(&self) -> VmStorage { + fn to_vm_database(&self) -> VmStorage> { let header = ConsensusHeader { prev_root: Default::default(), height: 1.into(), time: Tai64::UNIX_EPOCH, generated: (), }; - VmStorage::new(self.db.clone(), &header, ContractId::zeroed()) + VmStorage::new( + self.db.clone().into_transaction(), + &header, + ContractId::zeroed(), + ) } } diff --git a/benches/src/lib.rs b/benches/src/lib.rs index 4ccf413c1a8..11bd39c6001 100644 --- a/benches/src/lib.rs +++ b/benches/src/lib.rs @@ -31,12 +31,13 @@ use fuel_core_types::{ }, }; +use fuel_core_storage::transactional::StorageTransaction; pub use rand::Rng; use std::iter; const LARGE_GAS_LIMIT: u64 = u64::MAX - 1001; -fn new_db() -> VmStorage { +fn new_db() -> VmStorage> { // when rocksdb is enabled, this creates a new db instance with a temporary path VmStorage::default() } @@ -89,7 +90,7 @@ pub struct VmBench { pub inputs: Vec, pub outputs: Vec, pub witnesses: Vec, - pub db: Option>, + pub db: Option>>, pub instruction: Instruction, pub prepare_call: Option, pub dummy_contract: Option, @@ -100,7 +101,7 @@ pub struct VmBench { #[derive(Debug, Clone)] pub struct VmBenchPrepared { - pub vm: Interpreter, Script>, + pub vm: Interpreter>, Script>, pub instruction: Instruction, pub diff: diff::Diff, } @@ -148,7 +149,7 @@ impl VmBench { pub fn contract_using_db( rng: &mut R, - mut db: VmStorage, + mut db: VmStorage>, instruction: Instruction, ) -> anyhow::Result where @@ -207,7 +208,7 @@ impl VmBench { .with_prepare_call(prepare_call)) } - pub fn with_db(mut self, db: VmStorage) -> Self { + pub fn with_db(mut self, db: VmStorage>) -> Self { self.db.replace(db); self } @@ -468,8 +469,6 @@ impl TryFrom for VmBenchPrepared { let start_vm = vm.clone(); let original_db = vm.as_mut().database_mut().clone(); - let database_tx = original_db.transaction().as_ref().clone(); - *vm.as_mut().database_mut() = database_tx; let mut vm = vm.add_recording(); match instruction { Instruction::CALL(call) => { diff --git a/bin/fuel-core/src/cli/snapshot.rs b/bin/fuel-core/src/cli/snapshot.rs index 025cb463e8a..9562c8262ed 100644 --- a/bin/fuel-core/src/cli/snapshot.rs +++ b/bin/fuel-core/src/cli/snapshot.rs @@ -241,7 +241,7 @@ fn load_chain_config( } fn open_db(path: &Path) -> anyhow::Result { - Database::::open(path, None) + Database::::open_rocksdb(path, None) .map_err(Into::::into) .context(format!("failed to open database at path {path:?}",)) } @@ -270,6 +270,10 @@ mod tests { FuelBlocks, Messages, }, + transactional::{ + IntoTransaction, + StorageTransaction, + }, ContractsAssetKey, ContractsStateKey, StorageAsMut, @@ -310,13 +314,20 @@ mod tests { use super::*; struct DbPopulator { - db: Database, + db: StorageTransaction, rng: StdRng, } impl DbPopulator { fn new(db: Database, rng: StdRng) -> Self { - Self { db, rng } + Self { + db: db.into_transaction(), + rng, + } + } + + fn commit(self) { + self.db.commit().expect("failed to commit transaction"); } fn given_persisted_state( @@ -371,7 +382,10 @@ mod tests { let height = 10u32.into(); block.header_mut().application_mut().da_height = 14u64.into(); block.header_mut().set_block_height(height); - let _ = self.db.storage::().insert(&height, &block); + let _ = self + .db + .storage_as_mut::() + .insert(&height, &block); block } @@ -521,7 +535,7 @@ mod tests { let block = db.given_block(); let state = db.given_persisted_state(10, 10, 10, 10, 10); - drop(db); + db.commit(); // when exec(Command { @@ -565,7 +579,7 @@ mod tests { db.given_block(); let state = db.given_persisted_state(10, 10, 10, 10, 10); - drop(db); + db.commit(); // when exec(Command { @@ -622,7 +636,7 @@ mod tests { let state = sorted_state(db.given_persisted_state(10, 10, 10, 10, 10)); let random_contract = state.contracts.choose(&mut db.rng).unwrap().clone(); let contract_id = random_contract.contract_id; - drop(db); + db.commit(); // when exec(Command { diff --git a/crates/database/src/lib.rs b/crates/database/src/lib.rs index 578f8c48010..c5273fa6667 100644 --- a/crates/database/src/lib.rs +++ b/crates/database/src/lib.rs @@ -21,12 +21,6 @@ pub enum Error { /// Error occurred during serialization or deserialization of the entity. #[display(fmt = "error performing serialization or deserialization")] Codec, - /// Chain can be initialized once. - #[display(fmt = "Failed to initialize chain")] - ChainAlreadyInitialized, - /// Chain should be initialized before usage. - #[display(fmt = "Chain is not yet initialized")] - ChainUninitialized, /// The version of database or data is invalid (possibly not migrated). #[display( fmt = "Invalid database version, expected {expected:#x}, found {found:#x}" @@ -37,6 +31,33 @@ pub enum Error { /// the database version expected by this build of fuel-core expected: u32, }, + /// Multiple heights found in the commit to the database. + #[display(fmt = "Multiple heights found in the commit {heights:?}")] + MultipleHeightsInCommit { + /// List of heights found in the commit. + heights: Vec, + }, + /// Failed to advance the height. + #[display(fmt = "Failed to advance the height")] + FailedToAdvanceHeight, + /// The new and old heights are not linked. + #[display( + fmt = "New and old heights are not linked: prev_height: {prev_height:#x}, new_height: {new_height:#x}" + )] + HeightsAreNotLinked { + /// The old height. + prev_height: u64, + /// The new height. + new_height: u64, + }, + /// The new height is not found, but the old height is set. + #[display( + fmt = "The new height is not found, but the old height is set: prev_height: {prev_height:#x}" + )] + NewHeightIsNotSet { + /// The old height known by the database. + prev_height: u64, + }, /// Not related to database error. #[from] diff --git a/crates/fuel-core/Cargo.toml b/crates/fuel-core/Cargo.toml index eab03e1a43c..5e6f5fd3f15 100644 --- a/crates/fuel-core/Cargo.toml +++ b/crates/fuel-core/Cargo.toml @@ -61,6 +61,7 @@ uuid = { version = "1.1", features = ["v4"] } [dev-dependencies] assert_matches = "1.5" +fuel-core = { path = ".", features = ["test-helpers"] } fuel-core-executor = { workspace = true, features = ["std", "test-helpers"] } fuel-core-services = { path = "./../services", features = ["test-helpers"] } fuel-core-storage = { path = "./../storage", features = ["test-helpers"] } @@ -76,6 +77,6 @@ default = ["rocksdb"] p2p = ["dep:fuel-core-p2p", "dep:fuel-core-sync"] relayer = ["dep:fuel-core-relayer"] rocksdb = ["dep:rocksdb", "dep:tempfile", "dep:num_cpus"] -test-helpers = ["fuel-core-p2p?/test-helpers"] +test-helpers = ["fuel-core-p2p?/test-helpers", "fuel-core-storage/test-helpers"] # features to enable in production, but increase build times rocksdb-production = ["rocksdb", "rocksdb/jemalloc"] diff --git a/crates/fuel-core/src/combined_database.rs b/crates/fuel-core/src/combined_database.rs index b31678c7f51..79a1a3a2bea 100644 --- a/crates/fuel-core/src/combined_database.rs +++ b/crates/fuel-core/src/combined_database.rs @@ -1,4 +1,5 @@ use crate::{ + database, database::{ database_description::{ off_chain::OffChain, @@ -11,10 +12,6 @@ use crate::{ service::DbType, }; use fuel_core_storage::Result as StorageResult; -use fuel_core_types::{ - blockchain::primitives::DaBlockHeight, - fuel_types::BlockHeight, -}; use std::path::PathBuf; #[derive(Clone, Debug, Eq, PartialEq)] @@ -46,19 +43,22 @@ impl CombinedDatabase { } #[cfg(feature = "rocksdb")] - pub fn prune(path: &std::path::Path) -> DatabaseResult<()> { - Database::::prune(path)?; - Database::::prune(path)?; - Database::::prune(path)?; + pub fn prune(path: &std::path::Path) -> database::Result<()> { + crate::state::rocks_db::RocksDb::::prune(path)?; + crate::state::rocks_db::RocksDb::::prune(path)?; + crate::state::rocks_db::RocksDb::::prune(path)?; Ok(()) } #[cfg(feature = "rocksdb")] - pub fn open(path: &std::path::Path, capacity: usize) -> DatabaseResult { + pub fn open( + path: &std::path::Path, + capacity: usize, + ) -> crate::database::Result { // TODO: Use different cache sizes for different databases - let on_chain = Database::open(path, capacity)?; - let off_chain = Database::open(path, capacity)?; - let relayer = Database::open(path, capacity)?; + let on_chain = Database::open_rocksdb(path, capacity)?; + let off_chain = Database::open_rocksdb(path, capacity)?; + let relayer = Database::open_rocksdb(path, capacity)?; Ok(Self { on_chain, off_chain, @@ -104,14 +104,10 @@ impl CombinedDatabase { ) } - pub fn init( - &mut self, - block_height: &BlockHeight, - da_block_height: &DaBlockHeight, - ) -> StorageResult<()> { - self.on_chain.init(block_height)?; - self.off_chain.init(block_height)?; - self.relayer.init(da_block_height)?; + pub fn check_version(&self) -> StorageResult<()> { + self.on_chain.check_version()?; + self.off_chain.check_version()?; + self.relayer.check_version()?; Ok(()) } @@ -136,11 +132,4 @@ impl CombinedDatabase { pub fn relayer(&self) -> &Database { &self.relayer } - - pub fn flush(self) -> DatabaseResult<()> { - self.on_chain.flush()?; - self.off_chain.flush()?; - self.relayer.flush()?; - Ok(()) - } } diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index 94ebe7a8e6b..1d4de06ee3d 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -5,11 +5,15 @@ use crate::{ on_chain::OnChain, relayer::Relayer, DatabaseDescription, + DatabaseMetadata, }, - transaction::DatabaseTransaction, + metadata::MetadataTable, + Error as DatabaseError, }, + graphql_api::storage::blocks::FuelBlockIdsToHeights, state::{ in_memory::memory_store::MemoryStore, + ChangesIterator, DataSource, }, }; @@ -21,67 +25,57 @@ use fuel_core_chain_config::{ ContractStateConfig, MessageConfig, }; +use fuel_core_services::SharedMutex; use fuel_core_storage::{ - blueprint::Blueprint, - codec::{ - Decode, - Encode, - Encoder, - }, + self, iter::{ BoxedIter, IntoBoxedIter, IterDirection, + IterableStore, + IteratorOverTable, }, kv_store::{ - BatchOperations, - KeyValueStore, + KVItem, + KeyValueInspect, Value, - WriteOperation, }, not_found, - structured_storage::{ - StructuredStorage, - TableWithBlueprint, - }, + tables::FuelBlocks, transactional::{ AtomicView, + Changes, + ConflictPolicy, + Modifiable, StorageTransaction, - Transactional, }, Error as StorageError, - Mappable, Result as StorageResult, + StorageAsMut, + StorageInspect, + StorageMutate, }; use fuel_core_types::{ - blockchain::primitives::DaBlockHeight, + blockchain::{ + block::CompressedBlock, + primitives::DaBlockHeight, + }, fuel_types::BlockHeight, }; use itertools::Itertools; use std::{ - fmt::{ - self, - Debug, - Formatter, - }, - marker::Send, + fmt::Debug, sync::Arc, }; pub use fuel_core_database::Error; pub type Result = core::result::Result; -type DatabaseResult = Result; - // TODO: Extract `Database` and all belongs into `fuel-core-database`. #[cfg(feature = "rocksdb")] use crate::state::rocks_db::RocksDb; -use fuel_core_storage::tables::FuelBlocks; -use fuel_core_types::blockchain::block::CompressedBlock; #[cfg(feature = "rocksdb")] use std::path::Path; -#[cfg(feature = "rocksdb")] -use tempfile::TempDir; // Storages implementation pub mod balances; @@ -94,9 +88,7 @@ pub mod message; pub mod metadata; pub mod sealed_block; pub mod state; -pub mod statistic; pub mod storage; -pub mod transaction; pub mod transactions; #[derive(Clone, Debug)] @@ -104,155 +96,69 @@ pub struct Database where Description: DatabaseDescription, { - data: StructuredStorage>, - // used for RAII - _drop: Arc, -} - -type DropFn = Box; -#[derive(Default)] -struct DropResources { - // move resources into this closure to have them dropped when db drops - drop: Option, -} - -impl fmt::Debug for DropResources { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "DropResources") - } -} - -impl From for DropResources { - fn from(closure: F) -> Self { - Self { - drop: Option::Some(Box::new(closure)), - } - } -} - -impl Drop for DropResources { - fn drop(&mut self) { - if let Some(drop) = self.drop.take() { - (drop)() - } - } + height: SharedMutex>, + data: DataSource, } impl Database where Description: DatabaseDescription, + Self: StorageInspect, Error = StorageError>, { - pub fn new(data_source: D) -> Self - where - D: Into>, - { - Self { - data: StructuredStorage::new(data_source.into()), - _drop: Default::default(), - } - } - - pub fn with_drop(mut self, drop: DropFn) -> Self { - self._drop = Arc::new(drop.into()); - self + pub fn new(data_source: DataSource) -> Self { + let mut database = Self { + height: SharedMutex::new(None), + data: data_source, + }; + let height = database + .latest_height() + .expect("Failed to get latest height during creation of the database"); + + database.height = SharedMutex::new(height); + + database } #[cfg(feature = "rocksdb")] - pub fn open(path: &Path, capacity: impl Into>) -> DatabaseResult { + pub fn open_rocksdb(path: &Path, capacity: impl Into>) -> Result { use anyhow::Context; let db = RocksDb::::default_open(path, capacity.into()).map_err(Into::::into).context("Failed to open rocksdb, you may need to wipe a pre-existing incompatible db `rm -rf ~/.fuel/db`")?; - Ok(Database { - data: StructuredStorage::new(Arc::new(db).into()), - _drop: Default::default(), - }) - } - - #[cfg(feature = "rocksdb")] - pub fn prune(path: &Path) -> DatabaseResult<()> { - RocksDb::::prune(path) + Ok(Database::new(Arc::new(db))) } +} +impl Database +where + Description: DatabaseDescription, +{ pub fn in_memory() -> Self { + let data = Arc::>::new(MemoryStore::default()); Self { - data: StructuredStorage::new(Arc::new(MemoryStore::default()).into()), - _drop: Default::default(), + height: SharedMutex::new(None), + data, } } #[cfg(feature = "rocksdb")] - pub fn rocksdb() -> Self { - let tmp_dir = TempDir::new().unwrap(); - let db = RocksDb::::default_open(tmp_dir.path(), None).unwrap(); + pub fn rocksdb_temp() -> Self { + let data = + Arc::>::new(RocksDb::default_open_temp(None).unwrap()); Self { - data: StructuredStorage::new(Arc::new(db).into()), - _drop: Arc::new( - { - move || { - // cleanup temp dir - drop(tmp_dir); - } - } - .into(), - ), + height: SharedMutex::new(None), + data, } } - - pub fn transaction(&self) -> DatabaseTransaction { - self.into() - } - - pub fn flush(self) -> DatabaseResult<()> { - self.data.as_ref().flush() - } - - /// Removes all entries from the column in the database(a.k.a. pruning the one table). - pub fn delete_all(&self, column: Description::Column) -> DatabaseResult<()> { - self.data - .as_ref() - .delete_all(column) - .map_err(|e| anyhow::anyhow!(e).into()) - } } -impl KeyValueStore for DataSource +impl KeyValueInspect for Database where Description: DatabaseDescription, { type Column = Description::Column; - fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { - self.as_ref().put(key, column, value) - } - - fn replace( - &self, - key: &[u8], - column: Self::Column, - value: Value, - ) -> StorageResult> { - self.as_ref().replace(key, column, value) - } - - fn write( - &self, - key: &[u8], - column: Self::Column, - buf: &[u8], - ) -> StorageResult { - self.as_ref().write(key, column, buf) - } - - fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { - self.as_ref().take(key, column) - } - - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { - self.as_ref().delete(key, column) - } - fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { - self.as_ref().exists(key, column) + self.data.as_ref().exists(key, column) } fn size_of_value( @@ -260,11 +166,11 @@ where key: &[u8], column: Self::Column, ) -> StorageResult> { - self.as_ref().size_of_value(key, column) + self.data.as_ref().size_of_value(key, column) } fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { - self.as_ref().get(key, column) + self.data.as_ref().get(key, column) } fn read( @@ -273,135 +179,24 @@ where column: Self::Column, buf: &mut [u8], ) -> StorageResult> { - self.as_ref().read(key, column, buf) + self.data.as_ref().read(key, column, buf) } } -impl BatchOperations for DataSource +impl IterableStore for Database where Description: DatabaseDescription, { - fn batch_write( - &self, - entries: &mut dyn Iterator, Self::Column, WriteOperation)>, - ) -> StorageResult<()> { - self.as_ref().batch_write(entries) - } - - fn delete_all(&self, column: Self::Column) -> StorageResult<()> { - self.as_ref().delete_all(column) - } -} - -/// Read-only methods. -impl Database -where - Description: DatabaseDescription, -{ - pub(crate) fn iter_all( - &self, - direction: Option, - ) -> impl Iterator> + '_ - where - M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, - { - self.iter_all_filtered::(None, None, direction) - } - - pub(crate) fn iter_all_by_prefix( - &self, - prefix: Option

, - ) -> impl Iterator> + '_ - where - M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, - P: AsRef<[u8]>, - { - self.iter_all_filtered::(prefix, None, None) - } - - pub(crate) fn iter_all_by_start( - &self, - start: Option<&M::Key>, - direction: Option, - ) -> impl Iterator> + '_ - where - M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, - { - self.iter_all_filtered::(None, start, direction) - } - - pub(crate) fn iter_all_filtered( + fn iter_store( &self, - prefix: Option

, - start: Option<&M::Key>, - direction: Option, - ) -> impl Iterator> + '_ - where - M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, - P: AsRef<[u8]>, - { - let encoder = start.map(|start| { - >::KeyCodec::encode(start) - }); - - let start = encoder.as_ref().map(|encoder| encoder.as_bytes()); - + column: Self::Column, + prefix: Option<&[u8]>, + start: Option<&[u8]>, + direction: IterDirection, + ) -> BoxedIter { self.data .as_ref() - .iter_all( - M::column(), - prefix.as_ref().map(|p| p.as_ref()), - start.as_ref().map(|cow| cow.as_ref()), - direction.unwrap_or_default(), - ) - .map(|val| { - val.and_then(|(key, value)| { - let key = - >::KeyCodec::decode( - key.as_slice(), - ) - .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; - let value = - >::ValueCodec::decode( - value.as_slice(), - ) - .map_err(|e| StorageError::Codec(anyhow::anyhow!(e)))?; - Ok((key, value)) - }) - }) - } -} - -impl Transactional for Database -where - Description: DatabaseDescription, -{ - type Storage = Database; - - fn transaction(&self) -> StorageTransaction> { - StorageTransaction::new(self.transaction()) - } -} - -impl AsRef> for Database -where - Description: DatabaseDescription, -{ - fn as_ref(&self) -> &Database { - self - } -} - -impl AsMut> for Database -where - Description: DatabaseDescription, -{ - fn as_mut(&mut self) -> &mut Database { - self + .iter_store(column, prefix, start, direction) } } @@ -419,7 +214,7 @@ where } #[cfg(feature = "rocksdb")] { - Self::rocksdb() + Self::rocksdb_temp() } } } @@ -467,13 +262,13 @@ impl ChainStateDb for Database { fn iter_contract_state_configs( &self, - ) -> BoxedIter> { + ) -> BoxedIter> { Self::iter_contract_state_configs(self).into_boxed() } fn iter_contract_balance_configs( &self, - ) -> BoxedIter> { + ) -> BoxedIter> { Self::iter_contract_balance_configs(self).into_boxed() } @@ -492,11 +287,8 @@ impl AtomicView for Database { type Height = BlockHeight; - fn latest_height(&self) -> BlockHeight { - // TODO: The database should track the latest height inside of the database object - // instead of fetching it from the `FuelBlocks` table. As a temporary solution, - // fetch it from the table for now. - self.latest_height().unwrap_or_default() + fn latest_height(&self) -> Option { + *self.height.lock() } fn view_at(&self, _: &BlockHeight) -> StorageResult { @@ -515,11 +307,8 @@ impl AtomicView for Database { type Height = BlockHeight; - fn latest_height(&self) -> BlockHeight { - // TODO: The database should track the latest height inside of the database object - // instead of fetching it from the `FuelBlocks` table. As a temporary solution, - // fetch it from the table for now. - self.latest_height().unwrap_or_default() + fn latest_height(&self) -> Option { + *self.height.lock() } fn view_at(&self, _: &BlockHeight) -> StorageResult { @@ -537,20 +326,8 @@ impl AtomicView for Database { type View = Self; type Height = DaBlockHeight; - fn latest_height(&self) -> Self::Height { - #[cfg(feature = "relayer")] - { - use fuel_core_relayer::ports::RelayerDb; - // TODO: The database should track the latest da height inside of the database object - // instead of fetching it from the `RelayerMetadata` table. As a temporary solution, - // fetch it from the table for now. - // https://github.com/FuelLabs/fuel-core/issues/1589 - self.get_finalized_da_height().unwrap_or_default() - } - #[cfg(not(feature = "relayer"))] - { - DaBlockHeight(0) - } + fn latest_height(&self) -> Option { + *self.height.lock() } fn view_at(&self, _: &Self::Height) -> StorageResult { @@ -562,6 +339,176 @@ impl AtomicView for Database { } } +impl Modifiable for Database { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + commit_changes_with_height_update(self, changes, |iter| { + iter.iter_all::(Some(IterDirection::Reverse)) + .map(|result| result.map(|(height, _)| height)) + .try_collect() + }) + } +} + +impl Modifiable for Database { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + commit_changes_with_height_update(self, changes, |iter| { + iter.iter_all::(Some(IterDirection::Reverse)) + .map(|result| result.map(|(_, height)| height)) + .try_collect() + }) + } +} + +#[cfg(feature = "relayer")] +impl Modifiable for Database { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + commit_changes_with_height_update(self, changes, |iter| { + iter.iter_all::(Some( + IterDirection::Reverse, + )) + .map(|result| result.map(|(height, _)| height)) + .try_collect() + }) + } +} + +#[cfg(not(feature = "relayer"))] +impl Modifiable for Database { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + commit_changes_with_height_update(self, changes, |_| Ok(vec![])) + } +} + +trait DatabaseHeight: Sized { + fn as_u64(&self) -> u64; + + fn advance_height(&self) -> Option; +} + +impl DatabaseHeight for BlockHeight { + fn as_u64(&self) -> u64 { + let height: u32 = (*self).into(); + height as u64 + } + + fn advance_height(&self) -> Option { + self.succ() + } +} + +impl DatabaseHeight for DaBlockHeight { + fn as_u64(&self) -> u64 { + self.0 + } + + fn advance_height(&self) -> Option { + self.0.checked_add(1).map(Into::into) + } +} + +fn commit_changes_with_height_update( + database: &mut Database, + changes: Changes, + heights_lookup: impl Fn( + &ChangesIterator, + ) -> StorageResult>, +) -> StorageResult<()> +where + Description: DatabaseDescription, + Description::Height: Debug + PartialOrd + DatabaseHeight, + for<'a> StorageTransaction<&'a &'a mut Database>: + StorageMutate, Error = StorageError>, +{ + // Gets the all new heights from the `changes` + let iterator = ChangesIterator::::new(&changes); + let new_heights = heights_lookup(&iterator)?; + + // Changes for each block should be committed separately. + // If we have more than one height, it means we are mixing commits + // for several heights in one batch - return error in this case. + if new_heights.len() > 1 { + return Err(DatabaseError::MultipleHeightsInCommit { + heights: new_heights.iter().map(DatabaseHeight::as_u64).collect(), + } + .into()); + } + + let new_height = new_heights.into_iter().last(); + let prev_height = *database.height.lock(); + + match (prev_height, new_height) { + (None, None) => { + // We are inside the regenesis process if the old and new heights are not set. + // In this case, we continue to commit until we discover a new height. + // This height will be the start of the database. + } + (Some(prev_height), Some(new_height)) => { + // Each new commit should be linked to the previous commit to create a monotonically growing database. + + let next_expected_height = prev_height + .advance_height() + .ok_or(DatabaseError::FailedToAdvanceHeight)?; + + // TODO: After https://github.com/FuelLabs/fuel-core/issues/451 + // we can replace `next_expected_height > new_height` with `next_expected_height != new_height`. + if next_expected_height > new_height { + return Err(DatabaseError::HeightsAreNotLinked { + prev_height: prev_height.as_u64(), + new_height: new_height.as_u64(), + } + .into()); + } + } + (None, Some(_)) => { + // The new height is finally found; starting at this point, + // all next commits should be linked(the height should increase each time by one). + } + (Some(prev_height), None) => { + // In production, we shouldn't have cases where we call `commit_chagnes` with intermediate changes. + // The commit always should contain all data for the corresponding height. + return Err(DatabaseError::NewHeightIsNotSet { + prev_height: prev_height.as_u64(), + } + .into()); + } + }; + + let updated_changes = if let Some(new_height) = new_height { + // We want to update the metadata table to include a new height. + // For that, we are building a new storage transaction around `changes`. + // Modifying this transaction will include all required updates into the `changes`. + let mut transaction = StorageTransaction::transaction( + &database, + ConflictPolicy::Overwrite, + changes, + ); + transaction + .storage_as_mut::>() + .insert( + &(), + &DatabaseMetadata::V1 { + version: Description::version(), + height: new_height, + }, + )?; + + transaction.into_changes() + } else { + changes + }; + + let mut guard = database.height.lock(); + database + .data + .as_ref() + .commit_changes(new_height, updated_changes)?; + + // Update the block height + *guard = new_height; + + Ok(()) +} + #[cfg(feature = "rocksdb")] pub fn convert_to_rocksdb_direction(direction: IterDirection) -> rocksdb::Direction { match direction { @@ -572,11 +519,14 @@ pub fn convert_to_rocksdb_direction(direction: IterDirection) -> rocksdb::Direct #[cfg(test)] mod tests { - use crate::database::database_description::{ - off_chain::OffChain, - on_chain::OnChain, - relayer::Relayer, - DatabaseDescription, + use super::*; + use crate::database::{ + database_description::DatabaseDescription, + Database, + }; + use fuel_core_storage::{ + tables::FuelBlocks, + StorageAsMut, }; fn column_keys_not_exceed_count() @@ -591,18 +541,491 @@ mod tests { } } - #[test] - fn column_keys_not_exceed_count_test_on_chain() { - column_keys_not_exceed_count::(); + mod on_chain { + use super::*; + use crate::database::{ + database_description::on_chain::OnChain, + DatabaseHeight, + }; + use fuel_core_storage::{ + tables::Coins, + transactional::WriteTransaction, + }; + use fuel_core_types::{ + blockchain::block::CompressedBlock, + entities::coins::coin::CompressedCoin, + fuel_tx::UtxoId, + }; + + #[test] + fn column_keys_not_exceed_count_test() { + column_keys_not_exceed_count::(); + } + + #[test] + fn database_advances_with_a_new_block() { + // Given + let mut database = Database::::default(); + assert_eq!(database.latest_height().unwrap(), None); + + // When + let advanced_height = 1.into(); + database + .storage_as_mut::() + .insert(&advanced_height, &CompressedBlock::default()) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), Some(advanced_height)); + } + + #[test] + fn database_not_advances_without_block() { + // Given + let mut database = Database::::default(); + assert_eq!(database.latest_height().unwrap(), None); + + // When + database + .storage_as_mut::() + .insert(&UtxoId::default(), &CompressedCoin::default()) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), None); + } + + #[test] + fn database_advances_with_linked_blocks() { + // Given + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&starting_height, &CompressedBlock::default()) + .unwrap(); + assert_eq!(database.latest_height().unwrap(), Some(starting_height)); + + // When + let next_height = starting_height.advance_height().unwrap(); + database + .storage_as_mut::() + .insert(&next_height, &CompressedBlock::default()) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), Some(next_height)); + } + + #[test] + fn database_fails_with_unlinked_blocks() { + // Given + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&starting_height, &CompressedBlock::default()) + .unwrap(); + + // When + let prev_height = 0.into(); + let result = database + .storage_as_mut::() + .insert(&prev_height, &CompressedBlock::default()); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::HeightsAreNotLinked { + prev_height: 1, + new_height: 0 + }) + .to_string() + ); + } + + #[test] + fn database_fails_with_non_advancing_commit() { + // Given + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&starting_height, &CompressedBlock::default()) + .unwrap(); + + // When + let result = database + .storage_as_mut::() + .insert(&UtxoId::default(), &CompressedCoin::default()); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::NewHeightIsNotSet { prev_height: 1 }) + .to_string() + ); + } + + #[test] + fn database_fails_when_commit_with_several_blocks() { + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&starting_height, &CompressedBlock::default()) + .unwrap(); + + // Given + let mut transaction = database.write_transaction(); + let next_height = starting_height.advance_height().unwrap(); + let next_next_height = next_height.advance_height().unwrap(); + transaction + .storage_as_mut::() + .insert(&next_height, &CompressedBlock::default()) + .unwrap(); + transaction + .storage_as_mut::() + .insert(&next_next_height, &CompressedBlock::default()) + .unwrap(); + + // When + let result = transaction.commit(); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::MultipleHeightsInCommit { + heights: vec![3, 2] + }) + .to_string() + ); + } } - #[test] - fn column_keys_not_exceed_count_test_off_chain() { - column_keys_not_exceed_count::(); + mod off_chain { + use super::*; + use crate::{ + database::{ + database_description::off_chain::OffChain, + DatabaseHeight, + }, + fuel_core_graphql_api::storage::messages::OwnedMessageKey, + graphql_api::storage::messages::OwnedMessageIds, + }; + use fuel_core_storage::transactional::WriteTransaction; + + #[test] + fn column_keys_not_exceed_count_test() { + column_keys_not_exceed_count::(); + } + + #[test] + fn database_advances_with_a_new_block() { + // Given + let mut database = Database::::default(); + assert_eq!(database.latest_height().unwrap(), None); + + // When + let advanced_height = 1.into(); + database + .storage_as_mut::() + .insert(&Default::default(), &advanced_height) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), Some(advanced_height)); + } + + #[test] + fn database_not_advances_without_block() { + // Given + let mut database = Database::::default(); + assert_eq!(database.latest_height().unwrap(), None); + + // When + database + .storage_as_mut::() + .insert(&OwnedMessageKey::default(), &()) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), None); + } + + #[test] + fn database_advances_with_linked_blocks() { + // Given + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&Default::default(), &starting_height) + .unwrap(); + assert_eq!(database.latest_height().unwrap(), Some(starting_height)); + + // When + let next_height = starting_height.advance_height().unwrap(); + database + .storage_as_mut::() + .insert(&Default::default(), &next_height) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), Some(next_height)); + } + + #[test] + fn database_fails_with_unlinked_blocks() { + // Given + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&Default::default(), &starting_height) + .unwrap(); + + // When + let prev_height = 0.into(); + let result = database + .storage_as_mut::() + .insert(&Default::default(), &prev_height); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::HeightsAreNotLinked { + prev_height: 1, + new_height: 0 + }) + .to_string() + ); + } + + #[test] + fn database_fails_with_non_advancing_commit() { + // Given + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&Default::default(), &starting_height) + .unwrap(); + + // When + let result = database + .storage_as_mut::() + .insert(&OwnedMessageKey::default(), &()); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::NewHeightIsNotSet { prev_height: 1 }) + .to_string() + ); + } + + #[test] + fn database_fails_when_commit_with_several_blocks() { + let mut database = Database::::default(); + let starting_height = 1.into(); + database + .storage_as_mut::() + .insert(&Default::default(), &starting_height) + .unwrap(); + + // Given + let mut transaction = database.write_transaction(); + let next_height = starting_height.advance_height().unwrap(); + let next_next_height = next_height.advance_height().unwrap(); + transaction + .storage_as_mut::() + .insert(&[1; 32].into(), &next_height) + .unwrap(); + transaction + .storage_as_mut::() + .insert(&[2; 32].into(), &next_next_height) + .unwrap(); + + // When + let result = transaction.commit(); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::MultipleHeightsInCommit { + heights: vec![3, 2] + }) + .to_string() + ); + } } - #[test] - fn column_keys_not_exceed_count_test_relayer() { - column_keys_not_exceed_count::(); + #[cfg(feature = "relayer")] + mod relayer { + use super::*; + use crate::database::{ + database_description::relayer::Relayer, + DatabaseHeight, + }; + use fuel_core_relayer::storage::{ + DaHeightTable, + EventsHistory, + }; + use fuel_core_storage::transactional::WriteTransaction; + + #[test] + fn column_keys_not_exceed_count_test() { + column_keys_not_exceed_count::(); + } + + #[test] + fn database_advances_with_a_new_block() { + // Given + let mut database = Database::::default(); + assert_eq!(database.latest_height().unwrap(), None); + + // When + let advanced_height = 1u64.into(); + database + .storage_as_mut::() + .insert(&advanced_height, &[]) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), Some(advanced_height)); + } + + #[test] + fn database_not_advances_without_block() { + // Given + let mut database = Database::::default(); + assert_eq!(database.latest_height().unwrap(), None); + + // When + database + .storage_as_mut::() + .insert(&(), &DaBlockHeight::default()) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), None); + } + + #[test] + fn database_advances_with_linked_blocks() { + // Given + let mut database = Database::::default(); + let starting_height = 1u64.into(); + database + .storage_as_mut::() + .insert(&starting_height, &[]) + .unwrap(); + assert_eq!(database.latest_height().unwrap(), Some(starting_height)); + + // When + let next_height = starting_height.advance_height().unwrap(); + database + .storage_as_mut::() + .insert(&next_height, &[]) + .unwrap(); + + // Then + assert_eq!(database.latest_height().unwrap(), Some(next_height)); + } + + #[test] + fn database_fails_with_unlinked_blocks() { + // Given + let mut database = Database::::default(); + let starting_height = 1u64.into(); + database + .storage_as_mut::() + .insert(&starting_height, &[]) + .unwrap(); + + // When + let prev_height = 0u64.into(); + let result = database + .storage_as_mut::() + .insert(&prev_height, &[]); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::HeightsAreNotLinked { + prev_height: 1, + new_height: 0 + }) + .to_string() + ); + } + + #[test] + fn database_fails_with_non_advancing_commit() { + // Given + let mut database = Database::::default(); + let starting_height = 1u64.into(); + database + .storage_as_mut::() + .insert(&starting_height, &[]) + .unwrap(); + + // When + let result = database + .storage_as_mut::() + .insert(&(), &DaBlockHeight::default()); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::NewHeightIsNotSet { prev_height: 1 }) + .to_string() + ); + } + + #[test] + fn database_fails_when_commit_with_several_blocks() { + let mut database = Database::::default(); + let starting_height = 1u64.into(); + database + .storage_as_mut::() + .insert(&starting_height, &[]) + .unwrap(); + + // Given + let mut transaction = database.write_transaction(); + let next_height = starting_height.advance_height().unwrap(); + let next_next_height = next_height.advance_height().unwrap(); + transaction + .storage_as_mut::() + .insert(&next_height, &[]) + .unwrap(); + transaction + .storage_as_mut::() + .insert(&next_next_height, &[]) + .unwrap(); + + // When + let result = transaction.commit(); + + // Then + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + StorageError::from(DatabaseError::MultipleHeightsInCommit { + heights: vec![3, 2] + }) + .to_string() + ); + } } } diff --git a/crates/fuel-core/src/database/balances.rs b/crates/fuel-core/src/database/balances.rs index afa097613a6..c40a0522408 100644 --- a/crates/fuel-core/src/database/balances.rs +++ b/crates/fuel-core/src/database/balances.rs @@ -1,4 +1,3 @@ -use crate::database::Database; use fuel_core_chain_config::ContractBalanceConfig; use fuel_core_storage::{ tables::{ @@ -9,6 +8,7 @@ use fuel_core_storage::{ Error as StorageError, StorageAsRef, StorageBatchMutate, + StorageInspect, }; use fuel_core_types::{ fuel_asm::Word, @@ -19,16 +19,35 @@ use fuel_core_types::{ }; use itertools::Itertools; -impl Database { +pub trait BalancesInitializer { /// Initialize the balances of the contract from the all leafs. /// This method is more performant than inserting balances one by one. - pub fn init_contract_balances( + fn init_contract_balances( &mut self, contract_id: &ContractId, balances: S, ) -> Result<(), StorageError> where - S: Iterator, + S: Iterator; + + fn update_contract_balances( + &mut self, + balances: impl IntoIterator, + ) -> Result<(), StorageError>; +} + +impl BalancesInitializer for S +where + S: StorageInspect, + S: StorageBatchMutate, +{ + fn init_contract_balances( + &mut self, + contract_id: &ContractId, + balances: I, + ) -> Result<(), StorageError> + where + I: Iterator, { let balances = balances .map(|(asset, balance)| { @@ -37,12 +56,12 @@ impl Database { .collect_vec(); #[allow(clippy::map_identity)] <_ as StorageBatchMutate>::init_storage( - &mut self.data, + self, &mut balances.iter().map(|(key, value)| (key, value)), ) } - pub fn update_contract_balances( + fn update_contract_balances( &mut self, balances: impl IntoIterator, ) -> Result<(), StorageError> { @@ -51,8 +70,30 @@ impl Database { .group_by(|s| s.contract_id) .into_iter() .try_for_each(|(contract_id, entries)| { - if self.assets_present(&contract_id)? { - self.db_insert_contract_balances(entries.into_iter().collect_vec()) + if self + .storage::() + .get(&contract_id)? + .is_some() + { + let balance_entries = entries + .into_iter() + .map(|balance_entry| { + let db_key = ContractsAssetKey::new( + &balance_entry.contract_id, + &balance_entry.asset_id, + ); + (db_key, balance_entry.amount) + }) + .collect_vec(); + + #[allow(clippy::map_identity)] + let balance_entries_iter = + balance_entries.iter().map(|(key, value)| (key, value)); + + <_ as StorageBatchMutate>::insert_batch( + self, + balance_entries_iter, + ) } else { self.init_contract_balances( &contract_id, @@ -63,40 +104,6 @@ impl Database { Ok(()) } - - fn db_insert_contract_balances( - &mut self, - balances: impl IntoIterator, - ) -> Result<(), StorageError> { - let balance_entries = balances - .into_iter() - .map(|balance_entry| { - let db_key = ContractsAssetKey::new( - &balance_entry.contract_id, - &balance_entry.asset_id, - ); - (db_key, balance_entry.amount) - }) - .collect_vec(); - - #[allow(clippy::map_identity)] - let balance_entries_iter = - balance_entries.iter().map(|(key, value)| (key, value)); - - <_ as StorageBatchMutate>::insert_batch( - &mut self.data, - balance_entries_iter, - )?; - - Ok(()) - } - - fn assets_present(&mut self, key: &ContractId) -> Result { - Ok(self - .storage::() - .get(key)? - .is_some()) - } } #[cfg(test)] @@ -107,9 +114,12 @@ mod tests { }; use super::*; - use crate::database::database_description::on_chain::OnChain; + use crate::database::{ + database_description::on_chain::OnChain, + Database, + }; use fuel_core_storage::{ - tables::merkle::ContractsAssetsMerkleMetadata, + transactional::IntoTransaction, StorageAsMut, }; use fuel_core_types::fuel_types::AssetId; @@ -141,7 +151,7 @@ mod tests { let data = core::iter::from_fn(gen).take(5_000).collect::>(); let contract_id = ContractId::from([1u8; 32]); - let init_database = &mut Database::default(); + let mut init_database = Database::::default().into_transaction(); init_database .init_contract_balances(&contract_id, data.clone().into_iter()) @@ -151,10 +161,10 @@ mod tests { .root(&contract_id) .expect("Should get root"); - let seq_database = &mut Database::::default(); + let mut seq_database = Database::::default().into_transaction(); for (asset, value) in data.iter() { seq_database - .storage::() + .storage_as_mut::() .insert(&ContractsAssetKey::new(&contract_id, asset), value) .expect("Should insert a state"); } @@ -191,6 +201,10 @@ mod tests { } mod update_contract_balance { + use fuel_core_storage::{ + iter::IteratorOverTable, + transactional::WriteTransaction, + }; use fuel_core_types::{ fuel_merkle::sparse::{ self, @@ -216,14 +230,16 @@ mod tests { .take(10) .collect_vec(); - let database = &mut Database::default(); + let mut database = Database::::default(); + let mut transaction = database.write_transaction(); // when for group in &balance_groups { - database + transaction .update_contract_balances(group.clone()) .expect("Should insert contract balances"); } + transaction.commit().unwrap(); // then let balances_in_db: Vec<_> = database @@ -273,7 +289,7 @@ mod tests { .take(100) .collect_vec(); - let database = &mut Database::default(); + let mut database = Database::::default().into_transaction(); // when database.update_contract_balances(balances.clone()).unwrap(); @@ -309,7 +325,7 @@ mod tests { }) .collect_vec(); - let database = &mut Database::default(); + let mut database = Database::::default().into_transaction(); // when let balances = balance_per_contract.clone().into_iter().flatten(); @@ -358,7 +374,7 @@ mod tests { }) .collect_vec(); - let database = &mut Database::default(); + let mut database = Database::::default().into_transaction(); // when use itertools::Itertools; diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index 9927af08516..05bdf825a9f 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -1,18 +1,15 @@ use crate::{ database::{ - database_description::{ - off_chain::OffChain, - on_chain::OnChain, - DatabaseDescription, - DatabaseMetadata, - }, - metadata::MetadataTable, + database_description::off_chain::OffChain, Database, }, fuel_core_graphql_api::storage::blocks::FuelBlockIdsToHeights, }; use fuel_core_storage::{ - iter::IterDirection, + iter::{ + IterDirection, + IteratorOverTable, + }, not_found, tables::{ merkle::{ @@ -24,13 +21,8 @@ use fuel_core_storage::{ Transactions, }, Error as StorageError, - Mappable, - MerkleRootStorage, Result as StorageResult, - StorageAsMut, StorageAsRef, - StorageInspect, - StorageMutate, }; use fuel_core_types::{ blockchain::{ @@ -47,57 +39,6 @@ use fuel_core_types::{ use itertools::Itertools; use std::borrow::Cow; -impl StorageInspect for Database { - type Error = StorageError; - - fn get( - &self, - key: &::Key, - ) -> Result::OwnedValue>>, Self::Error> { - self.data.storage::().get(key) - } - - fn contains_key( - &self, - key: &::Key, - ) -> Result { - self.data.storage::().contains_key(key) - } -} - -impl StorageMutate for Database { - fn insert( - &mut self, - key: &::Key, - value: &::Value, - ) -> Result::OwnedValue>, Self::Error> { - let prev = self - .data - .storage_as_mut::() - .insert(key, value)?; - - // TODO: Temporary solution to store the block height in the database manually here. - // Later it will be controlled by the `commit_changes` function on the `Database` side. - // https://github.com/FuelLabs/fuel-core/issues/1589 - self.storage::>().insert( - &(), - &DatabaseMetadata::V1 { - version: OnChain::version(), - height: *key, - }, - )?; - - Ok(prev) - } - - fn remove( - &mut self, - key: &::Key, - ) -> Result::OwnedValue>, Self::Error> { - self.data.storage_as_mut::().remove(key) - } -} - impl Database { pub fn get_block_height(&self, id: &BlockId) -> StorageResult> { self.storage::() @@ -147,15 +88,6 @@ impl Database { } } -impl MerkleRootStorage for Database { - fn root( - &self, - key: &BlockHeight, - ) -> Result { - self.data.storage_as_ref::().root(key) - } -} - impl Database { pub fn block_history_proof( &self, @@ -202,6 +134,7 @@ impl Database { #[cfg(test)] mod tests { use super::*; + use fuel_core_storage::StorageMutate; use fuel_core_types::{ blockchain::{ block::PartialFuelBlock, diff --git a/crates/fuel-core/src/database/coin.rs b/crates/fuel-core/src/database/coin.rs index a54520e4828..16614512d5b 100644 --- a/crates/fuel-core/src/database/coin.rs +++ b/crates/fuel-core/src/database/coin.rs @@ -10,7 +10,10 @@ use crate::{ }; use fuel_core_chain_config::CoinConfig; use fuel_core_storage::{ - iter::IterDirection, + iter::{ + IterDirection, + IteratorOverTable, + }, not_found, tables::Coins, Result as StorageResult, diff --git a/crates/fuel-core/src/database/contracts.rs b/crates/fuel-core/src/database/contracts.rs index 3e4329308e1..3278bbdcc27 100644 --- a/crates/fuel-core/src/database/contracts.rs +++ b/crates/fuel-core/src/database/contracts.rs @@ -5,7 +5,10 @@ use fuel_core_chain_config::{ ContractStateConfig, }; use fuel_core_storage::{ - iter::IterDirection, + iter::{ + IterDirection, + IteratorOverTable, + }, not_found, tables::{ ContractsAssets, diff --git a/crates/fuel-core/src/database/database_description.rs b/crates/fuel-core/src/database/database_description.rs index 8f2aefec465..f62c6a11695 100644 --- a/crates/fuel-core/src/database/database_description.rs +++ b/crates/fuel-core/src/database/database_description.rs @@ -10,7 +10,7 @@ pub trait DatabaseDescription: 'static + Clone + Debug + Send + Sync { /// The type of the column used by the database. type Column: StorageColumn + strum::EnumCount + enum_iterator::Sequence; /// The type of the height of the database used to track commits. - type Height: Copy; + type Height: Default + Copy; /// Returns the expected version of the database. fn version() -> u32; diff --git a/crates/fuel-core/src/database/genesis_progress.rs b/crates/fuel-core/src/database/genesis_progress.rs index af9c14ba613..0ce25e1d8d9 100644 --- a/crates/fuel-core/src/database/genesis_progress.rs +++ b/crates/fuel-core/src/database/genesis_progress.rs @@ -5,17 +5,20 @@ use fuel_core_storage::{ blueprint::plain::Plain, codec::postcard::Postcard, column::Column, + iter::IteratorOverTable, structured_storage::TableWithBlueprint, tables::{ Coins, ContractsLatestUtxo, Messages, }, + Error as StorageError, Mappable, MerkleRoot, Result, StorageAsMut, StorageInspect, + StorageMutate, }; use fuel_core_types::fuel_merkle::binary::root_calculator::MerkleRootCalculator; use serde::{ @@ -50,16 +53,36 @@ impl TableWithBlueprint for GenesisMetadata { } } -impl Database { - pub fn genesis_progress(&self, key: &GenesisResource) -> Option { +pub trait GenesisProgressInspect { + fn genesis_progress(&self, key: &GenesisResource) -> Option; +} + +pub trait GenesisProgressMutate { + fn update_genesis_progress( + &mut self, + key: GenesisResource, + processed_group: usize, + ) -> Result<()>; +} + +impl GenesisProgressInspect for S +where + S: StorageInspect, +{ + fn genesis_progress(&self, key: &GenesisResource) -> Option { Some( StorageInspect::::get(self, key) - .unwrap()? + .ok()?? .into_owned(), ) } +} - pub fn update_genesis_progress( +impl GenesisProgressMutate for S +where + S: StorageMutate, +{ + fn update_genesis_progress( &mut self, key: GenesisResource, processed_group: usize, @@ -69,7 +92,9 @@ impl Database { Ok(()) } +} +impl Database { pub fn genesis_coins_root(&self) -> Result { let coins = self.iter_all::(None); diff --git a/crates/fuel-core/src/database/message.rs b/crates/fuel-core/src/database/message.rs index 826704d8131..64ddda0569d 100644 --- a/crates/fuel-core/src/database/message.rs +++ b/crates/fuel-core/src/database/message.rs @@ -10,7 +10,10 @@ use crate::{ }; use fuel_core_chain_config::MessageConfig; use fuel_core_storage::{ - iter::IterDirection, + iter::{ + IterDirection, + IteratorOverTable, + }, tables::{ Messages, SpentMessages, diff --git a/crates/fuel-core/src/database/metadata.rs b/crates/fuel-core/src/database/metadata.rs index ef9a70c130d..d9f8672b073 100644 --- a/crates/fuel-core/src/database/metadata.rs +++ b/crates/fuel-core/src/database/metadata.rs @@ -1,29 +1,21 @@ -use crate::{ - database::{ - database_description::{ - DatabaseDescription, - DatabaseMetadata, - }, - storage::UseStructuredImplementation, - Database, - Error as DatabaseError, +use crate::database::{ + database_description::{ + DatabaseDescription, + DatabaseMetadata, }, - state::DataSource, + Database, + Error as DatabaseError, }; use fuel_core_storage::{ blueprint::plain::Plain, codec::postcard::Postcard, - not_found, - structured_storage::{ - StructuredStorage, - TableWithBlueprint, - }, + structured_storage::TableWithBlueprint, Error as StorageError, Mappable, Result as StorageResult, StorageAsRef, + StorageInspect, }; -use fuel_core_types::fuel_merkle::storage::StorageMutate; /// The table that stores all metadata about the database. pub struct MetadataTable(core::marker::PhantomData); @@ -50,43 +42,17 @@ where } } -impl UseStructuredImplementation> - for StructuredStorage> -where - Description: DatabaseDescription, -{ -} - impl Database where Description: DatabaseDescription, - Self: StorageMutate, Error = StorageError>, + Self: StorageInspect, Error = StorageError>, { - /// Ensures the database is initialized and that the database version is correct - pub fn init(&mut self, height: &Description::Height) -> StorageResult<()> { - use fuel_core_storage::StorageAsMut; - - if !self - .storage::>() - .contains_key(&())? - { - let old = self.storage::>().insert( - &(), - &DatabaseMetadata::V1 { - version: Description::version(), - height: *height, - }, - )?; - - if old.is_some() { - return Err(DatabaseError::ChainAlreadyInitialized.into()) - } - } - - let metadata = self - .storage::>() - .get(&())? - .expect("We checked its existence above"); + /// Ensures the version is correct. + pub fn check_version(&self) -> StorageResult<()> { + let Some(metadata) = self.storage::>().get(&())? + else { + return Ok(()); + }; if metadata.version() != Description::version() { return Err(DatabaseError::InvalidDatabaseVersion { @@ -99,11 +65,11 @@ where Ok(()) } - pub fn latest_height(&self) -> StorageResult { + pub fn latest_height(&self) -> StorageResult> { let metadata = self.storage::>().get(&())?; - let metadata = metadata.ok_or(not_found!(MetadataTable))?; + let metadata = metadata.map(|metadata| *metadata.height()); - Ok(*metadata.height()) + Ok(metadata) } } diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index c7fec5f5d3e..f7f8cdbd094 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -1,6 +1,9 @@ use crate::database::Database; use fuel_core_storage::{ - iter::IterDirection, + iter::{ + IterDirection, + IteratorOverTable, + }, not_found, tables::{ FuelBlocks, diff --git a/crates/fuel-core/src/database/state.rs b/crates/fuel-core/src/database/state.rs index 5a19fbc4c20..7de00a95029 100644 --- a/crates/fuel-core/src/database/state.rs +++ b/crates/fuel-core/src/database/state.rs @@ -1,4 +1,3 @@ -use crate::database::Database; use fuel_core_chain_config::ContractStateConfig; use fuel_core_storage::{ tables::{ @@ -9,6 +8,7 @@ use fuel_core_storage::{ Error as StorageError, StorageAsRef, StorageBatchMutate, + StorageInspect, }; use fuel_core_types::fuel_types::{ Bytes32, @@ -16,23 +16,42 @@ use fuel_core_types::fuel_types::{ }; use itertools::Itertools; -impl Database { +pub trait StateInitializer { /// Initialize the state of the contract from all leaves. /// This method is more performant than inserting state one by one. - pub fn init_contract_state( + fn init_contract_state( &mut self, contract_id: &ContractId, slots: S, ) -> Result<(), StorageError> where - S: Iterator)>, + S: Iterator)>; + + /// Updates the state of multiple contracts based on provided state slots. + fn update_contract_states( + &mut self, + states: impl IntoIterator, + ) -> Result<(), StorageError>; +} + +impl StateInitializer for S +where + S: StorageInspect, + S: StorageBatchMutate, +{ + fn init_contract_state( + &mut self, + contract_id: &ContractId, + slots: I, + ) -> Result<(), StorageError> + where + I: Iterator)>, { let slots = slots .map(|(key, value)| (ContractsStateKey::new(contract_id, &key), value)) .collect_vec(); - #[allow(clippy::map_identity)] <_ as StorageBatchMutate>::init_storage( - &mut self.data, + self, &mut slots.iter().map(|(key, value)| (key, value.as_slice())), ) } @@ -52,7 +71,7 @@ impl Database { /// /// # Errors /// On any error while accessing the database. - pub fn update_contract_states( + fn update_contract_states( &mut self, states: impl IntoIterator, ) -> Result<(), StorageError> { @@ -61,8 +80,30 @@ impl Database { .group_by(|s| s.contract_id) .into_iter() .try_for_each(|(contract_id, entries)| { - if self.state_present(&contract_id)? { - self.db_insert_contract_states(entries.into_iter().collect_vec()) + if self + .storage::() + .get(&contract_id)? + .is_some() + { + let state_entries = entries + .into_iter() + .map(|state_entry| { + let db_key = ContractsStateKey::new( + &state_entry.contract_id, + &state_entry.key, + ); + (db_key, state_entry.value) + }) + .collect_vec(); + + let state_entries_iter = state_entries + .iter() + .map(|(key, value)| (key, value.as_slice())); + + <_ as StorageBatchMutate>::insert_batch( + self, + state_entries_iter, + ) } else { self.init_contract_state( &contract_id, @@ -73,61 +114,21 @@ impl Database { Ok(()) } - - fn db_insert_contract_states( - &mut self, - states: impl IntoIterator, - ) -> Result<(), StorageError> { - let state_entries = states - .into_iter() - .map(|state_entry| { - let db_key = - ContractsStateKey::new(&state_entry.contract_id, &state_entry.key); - (db_key, state_entry.value) - }) - .collect_vec(); - - #[allow(clippy::map_identity)] - let state_entries_iter = state_entries - .iter() - .map(|(key, value)| (key, value.as_slice())); - - <_ as StorageBatchMutate>::insert_batch( - &mut self.data, - state_entries_iter, - )?; - - Ok(()) - } - - fn state_present(&mut self, key: &ContractId) -> Result { - Ok(self - .storage::() - .get(key)? - .is_some()) - } } #[cfg(test)] mod tests { - use std::{ - collections::HashSet, - iter::repeat_with, - }; - - use crate::database::database_description::on_chain::OnChain; - use super::*; + use crate::database::{ + database_description::on_chain::OnChain, + Database, + }; use fuel_core_storage::{ - tables::merkle::ContractsStateMerkleMetadata, + transactional::IntoTransaction, StorageAsMut, }; - use rand::{ - self, - rngs::StdRng, - Rng, - SeedableRng, - }; + use fuel_core_types::fuel_types::Bytes32; + use rand::Rng; fn random_bytes32(rng: &mut R) -> Bytes32 where @@ -154,7 +155,7 @@ mod tests { let data = core::iter::from_fn(gen).take(5_000).collect::>(); let contract_id = ContractId::from([1u8; 32]); - let init_database = &mut Database::::default(); + let mut init_database = Database::::default().into_transaction(); init_database .init_contract_state(&contract_id, data.clone().into_iter()) @@ -164,10 +165,10 @@ mod tests { .root(&contract_id) .expect("Should get root"); - let seq_database = &mut Database::::default(); + let mut seq_database = Database::::default().into_transaction(); for (key, value) in data.iter() { seq_database - .storage::() + .storage_as_mut::() .insert(&ContractsStateKey::new(&contract_id, key), value) .expect("Should insert a state"); } @@ -197,10 +198,17 @@ mod tests { } mod update_contract_state { + use core::iter::repeat_with; + use fuel_core_storage::iter::IteratorOverTable; use fuel_core_types::fuel_merkle::sparse::{ self, MerkleTreeKey, }; + use rand::{ + rngs::StdRng, + SeedableRng, + }; + use std::collections::HashSet; use super::*; #[test] @@ -364,7 +372,6 @@ mod tests { let database = &mut Database::::default(); // when - use itertools::Itertools; let contract_0_state = state_per_contract[0] .iter() .chunks(2) diff --git a/crates/fuel-core/src/database/statistic.rs b/crates/fuel-core/src/database/statistic.rs deleted file mode 100644 index 59a18021279..00000000000 --- a/crates/fuel-core/src/database/statistic.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::{ - database::{ - database_description::off_chain::OffChain, - storage::UseStructuredImplementation, - Database, - }, - fuel_core_graphql_api, - state::DataSource, -}; -use fuel_core_storage::{ - blueprint::plain::Plain, - codec::postcard::Postcard, - structured_storage::{ - StructuredStorage, - TableWithBlueprint, - }, - Mappable, - Result as StorageResult, - StorageMutate, -}; - -/// The table that stores all statistic about blockchain. Each key is a string, while the value -/// depends on the context. -pub struct StatisticTable(core::marker::PhantomData); - -impl Mappable for StatisticTable -where - V: Clone, -{ - type Key = str; - type OwnedKey = String; - type Value = V; - type OwnedValue = V; -} - -impl TableWithBlueprint for StatisticTable -where - V: Clone, -{ - type Blueprint = Plain; - type Column = fuel_core_graphql_api::storage::Column; - - fn column() -> Self::Column { - Self::Column::Statistic - } -} - -impl UseStructuredImplementation> - for StructuredStorage> -where - V: Clone, -{ -} - -/// Tracks the total number of transactions written to the chain -/// It's useful for analyzing TPS or other metrics. -pub(crate) const TX_COUNT: &str = "total_tx_count"; - -impl Database { - pub fn increase_tx_count(&mut self, new_txs: u64) -> StorageResult { - use fuel_core_storage::StorageAsRef; - // TODO: how should tx count be initialized after regenesis? - let current_tx_count: u64 = self - .storage::>() - .get(TX_COUNT)? - .unwrap_or_default() - .into_owned(); - // Using saturating_add because this value doesn't significantly impact the correctness of execution. - let new_tx_count = current_tx_count.saturating_add(new_txs); - <_ as StorageMutate>>::insert( - &mut self.data, - TX_COUNT, - &new_tx_count, - )?; - Ok(new_tx_count) - } -} diff --git a/crates/fuel-core/src/database/storage.rs b/crates/fuel-core/src/database/storage.rs index 4f722e845c7..acabb92b5fc 100644 --- a/crates/fuel-core/src/database/storage.rs +++ b/crates/fuel-core/src/database/storage.rs @@ -1,174 +1,116 @@ -use crate::{ - database::{ - database_description::DatabaseDescription, - Database, - }, - fuel_core_graphql_api::storage::{ - blocks::FuelBlockIdsToHeights, - coins::OwnedCoins, - messages::OwnedMessageIds, - transactions::{ - OwnedTransactions, - TransactionStatuses, - }, - }, - state::DataSource, +use crate::database::{ + database_description::DatabaseDescription, + Database, }; use fuel_core_storage::{ structured_storage::StructuredStorage, - tables::{ - merkle::{ - ContractsAssetsMerkleData, - ContractsAssetsMerkleMetadata, - ContractsStateMerkleData, - ContractsStateMerkleMetadata, - FuelBlockMerkleData, - FuelBlockMerkleMetadata, - }, - Coins, - ContractsAssets, - ContractsInfo, - ContractsLatestUtxo, - ContractsRawCode, - ContractsState, - Messages, - ProcessedTransactions, - SealedBlockConsensus, - SpentMessages, - Transactions, - }, Error as StorageError, Mappable, MerkleRoot, MerkleRootStorage, Result as StorageResult, - StorageAsMut, StorageAsRef, - StorageBatchMutate, StorageInspect, - StorageMutate, StorageRead, StorageSize, - StorageWrite, }; use std::borrow::Cow; -use super::genesis_progress::GenesisMetadata; - -/// The trait allows selectively inheriting the implementation of storage traits from `StructuredStorage` -/// for the `Database`. Not all default implementations of the `StructuredStorage` are suitable -/// for the `Database`. Sometimes we want to override some of them and add a custom implementation -/// with additional logic. For example, we want to override the `StorageMutate` trait for the `Messages` -/// table to also track the owner of messages. -pub trait UseStructuredImplementation -where - M: Mappable, -{ -} - -/// The trait allows to implementation of `UseStructuredImplementation` for the `StructuredStorage` for multiple tables. -macro_rules! use_structured_implementation { - ($($m:ty),*) => { - $( - impl UseStructuredImplementation<$m> for StructuredStorage> - where - Description: DatabaseDescription, - {} - )* - }; -} - -use_structured_implementation!( - ContractsRawCode, - ContractsAssets, - ContractsState, - ContractsLatestUtxo, - ContractsInfo, - SpentMessages, - SealedBlockConsensus, - Transactions, - ProcessedTransactions, - ContractsStateMerkleMetadata, - ContractsStateMerkleData, - ContractsAssetsMerkleMetadata, - ContractsAssetsMerkleData, - Coins, - OwnedCoins, - Messages, - OwnedMessageIds, - OwnedTransactions, - TransactionStatuses, - FuelBlockIdsToHeights, - FuelBlockMerkleData, - FuelBlockMerkleMetadata, - GenesisMetadata -); - -#[cfg(feature = "relayer")] -use_structured_implementation!( - fuel_core_relayer::storage::DaHeightTable, - fuel_core_relayer::storage::EventsHistory -); +#[cfg(feature = "test-helpers")] +use fuel_core_storage::transactional::{ + ConflictPolicy, + Modifiable, + StorageTransaction, +}; +#[cfg(feature = "test-helpers")] +use fuel_core_storage::{ + StorageAsMut, + StorageBatchMutate, + StorageMutate, + StorageWrite, +}; impl StorageInspect for Database where Description: DatabaseDescription, M: Mappable, - StructuredStorage>: - StorageInspect + UseStructuredImplementation, + for<'a> StructuredStorage<&'a Self>: StorageInspect, { type Error = StorageError; fn get(&self, key: &M::Key) -> StorageResult>> { - self.data.storage::().get(key) + let storage = StructuredStorage::new(self); + let value = storage.storage::().get(key)?; + + if let Some(cow) = value { + Ok(Some(Cow::Owned(cow.into_owned()))) + } else { + Ok(None) + } } fn contains_key(&self, key: &M::Key) -> StorageResult { - self.data.storage::().contains_key(key) + StructuredStorage::new(self) + .storage::() + .contains_key(key) } } +#[cfg(feature = "test-helpers")] impl StorageMutate for Database where Description: DatabaseDescription, M: Mappable, - StructuredStorage>: - StorageMutate + UseStructuredImplementation, + for<'a> StructuredStorage<&'a Self>: StorageInspect, + for<'a> StorageTransaction<&'a Self>: StorageMutate, + Self: Modifiable, { fn insert( &mut self, key: &M::Key, value: &M::Value, ) -> StorageResult> { - self.data.storage_as_mut::().insert(key, value) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + let prev = transaction.storage_as_mut::().insert(key, value)?; + self.commit_changes(transaction.into_changes())?; + Ok(prev) } fn remove(&mut self, key: &M::Key) -> StorageResult> { - self.data.storage_as_mut::().remove(key) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + let prev = transaction.storage_as_mut::().remove(key)?; + self.commit_changes(transaction.into_changes())?; + Ok(prev) } } -impl MerkleRootStorage for Database +impl StorageSize for Database where Description: DatabaseDescription, M: Mappable, - StructuredStorage>: - MerkleRootStorage + UseStructuredImplementation, + for<'a> StructuredStorage<&'a Self>: StorageSize, { - fn root(&self, key: &Key) -> StorageResult { - self.data.storage::().root(key) + fn size_of_value(&self, key: &M::Key) -> StorageResult> { + <_ as StorageSize>::size_of_value(&StructuredStorage::new(self), key) } } -impl StorageSize for Database +impl MerkleRootStorage for Database where Description: DatabaseDescription, M: Mappable, - StructuredStorage>: - StorageSize + UseStructuredImplementation, + for<'a> StructuredStorage<&'a Self>: MerkleRootStorage, { - fn size_of_value(&self, key: &M::Key) -> StorageResult> { - <_ as StorageSize>::size_of_value(&self.data, key) + fn root(&self, key: &Key) -> StorageResult { + StructuredStorage::new(self).storage::().root(key) } } @@ -176,27 +118,35 @@ impl StorageRead for Database where Description: DatabaseDescription, M: Mappable, - StructuredStorage>: - StorageRead + UseStructuredImplementation, + for<'a> StructuredStorage<&'a Self>: StorageRead, { fn read(&self, key: &M::Key, buf: &mut [u8]) -> StorageResult> { - self.data.storage::().read(key, buf) + StructuredStorage::new(self).storage::().read(key, buf) } fn read_alloc(&self, key: &M::Key) -> StorageResult>> { - self.data.storage::().read_alloc(key) + StructuredStorage::new(self).storage::().read_alloc(key) } } +#[cfg(feature = "test-helpers")] impl StorageWrite for Database where Description: DatabaseDescription, M: Mappable, - StructuredStorage>: - StorageWrite + UseStructuredImplementation, + for<'a> StructuredStorage<&'a Self>: StorageInspect, + for<'a> StorageTransaction<&'a Self>: StorageWrite, + Self: Modifiable, { fn write(&mut self, key: &M::Key, buf: &[u8]) -> Result { - <_ as StorageWrite>::write(&mut self.data, key, buf) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + let prev = <_ as StorageWrite>::write(&mut transaction, key, buf)?; + self.commit_changes(transaction.into_changes())?; + Ok(prev) } fn replace( @@ -204,19 +154,36 @@ where key: &M::Key, buf: &[u8], ) -> Result<(usize, Option>), Self::Error> { - <_ as StorageWrite>::replace(&mut self.data, key, buf) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + let prev = <_ as StorageWrite>::replace(&mut transaction, key, buf)?; + self.commit_changes(transaction.into_changes())?; + Ok(prev) } fn take(&mut self, key: &M::Key) -> Result>, Self::Error> { - <_ as StorageWrite>::take(&mut self.data, key) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + let prev = <_ as StorageWrite>::take(&mut transaction, key)?; + self.commit_changes(transaction.into_changes())?; + Ok(prev) } } -impl StorageBatchMutate for Database +#[cfg(feature = "test-helpers")] +impl StorageBatchMutate for Database where + Description: DatabaseDescription, M: Mappable, - StructuredStorage: - StorageBatchMutate + UseStructuredImplementation, + for<'a> StructuredStorage<&'a Self>: StorageInspect, + for<'a> StorageTransaction<&'a Self>: StorageBatchMutate, + Self: Modifiable, { fn init_storage<'a, Iter>(&mut self, set: Iter) -> StorageResult<()> where @@ -224,7 +191,14 @@ where M::Key: 'a, M::Value: 'a, { - StorageBatchMutate::init_storage(&mut self.data, set) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + StorageBatchMutate::init_storage(&mut transaction, set)?; + self.commit_changes(transaction.into_changes())?; + Ok(()) } fn insert_batch<'a, Iter>(&mut self, set: Iter) -> StorageResult<()> @@ -233,7 +207,14 @@ where M::Key: 'a, M::Value: 'a, { - StorageBatchMutate::insert_batch(&mut self.data, set) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + StorageBatchMutate::insert_batch(&mut transaction, set)?; + self.commit_changes(transaction.into_changes())?; + Ok(()) } fn remove_batch<'a, Iter>(&mut self, set: Iter) -> StorageResult<()> @@ -241,6 +222,13 @@ where Iter: 'a + Iterator, M::Key: 'a, { - StorageBatchMutate::remove_batch(&mut self.data, set) + let mut transaction = StorageTransaction::transaction( + &*self, + ConflictPolicy::Overwrite, + Default::default(), + ); + StorageBatchMutate::remove_batch(&mut transaction, set)?; + self.commit_changes(transaction.into_changes())?; + Ok(()) } } diff --git a/crates/fuel-core/src/database/transaction.rs b/crates/fuel-core/src/database/transaction.rs deleted file mode 100644 index 26fd488fd69..00000000000 --- a/crates/fuel-core/src/database/transaction.rs +++ /dev/null @@ -1,104 +0,0 @@ -use crate::{ - database::{ - database_description::DatabaseDescription, - Database, - }, - state::{ - in_memory::transaction::MemoryTransactionView, - DataSource, - }, -}; -use fuel_core_storage::{ - transactional::Transaction, - Result as StorageResult, -}; -use std::{ - fmt::Debug, - ops::{ - Deref, - DerefMut, - }, - sync::Arc, -}; - -#[derive(Clone, Debug)] -pub struct DatabaseTransaction -where - Description: DatabaseDescription, -{ - // The primary datastores - changes: Arc>, - // The inner db impl using these stores - database: Database, -} - -impl AsRef> for DatabaseTransaction -where - Description: DatabaseDescription, -{ - fn as_ref(&self) -> &Database { - &self.database - } -} - -impl AsMut> for DatabaseTransaction -where - Description: DatabaseDescription, -{ - fn as_mut(&mut self) -> &mut Database { - &mut self.database - } -} - -impl Deref for DatabaseTransaction -where - Description: DatabaseDescription, -{ - type Target = Database; - - fn deref(&self) -> &Self::Target { - &self.database - } -} - -impl DerefMut for DatabaseTransaction -where - Description: DatabaseDescription, -{ - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.database - } -} - -impl Default for DatabaseTransaction -where - Description: DatabaseDescription, -{ - fn default() -> Self { - Database::::default().transaction() - } -} - -impl Transaction> for DatabaseTransaction -where - Description: DatabaseDescription, -{ - fn commit(&mut self) -> StorageResult<()> { - // TODO: should commit be fallible if this api is meant to be atomic? - self.changes.commit() - } -} - -impl From<&Database> for DatabaseTransaction -where - Description: DatabaseDescription, -{ - fn from(source: &Database) -> Self { - let database: &DataSource = source.data.as_ref(); - let data = Arc::new(MemoryTransactionView::new(database.clone())); - Self { - changes: data.clone(), - database: Database::::new(data), - } - } -} diff --git a/crates/fuel-core/src/database/transactions.rs b/crates/fuel-core/src/database/transactions.rs index db5c82ee41e..20caa5f28b0 100644 --- a/crates/fuel-core/src/database/transactions.rs +++ b/crates/fuel-core/src/database/transactions.rs @@ -7,12 +7,14 @@ use crate::{ OwnedTransactionIndexCursor, OwnedTransactionIndexKey, OwnedTransactions, - TransactionIndex, TransactionStatuses, }, }; use fuel_core_storage::{ - iter::IterDirection, + iter::{ + IterDirection, + IteratorOverTable, + }, tables::Transactions, Result as StorageResult, }; @@ -23,10 +25,7 @@ use fuel_core_types::{ Transaction, TxPointer, }, - fuel_types::{ - Address, - BlockHeight, - }, + fuel_types::Address, services::txpool::TransactionStatus, }; @@ -65,29 +64,6 @@ impl Database { }) } - pub fn record_tx_id_owner( - &mut self, - owner: &Address, - block_height: BlockHeight, - tx_idx: TransactionIndex, - tx_id: &Bytes32, - ) -> StorageResult> { - use fuel_core_storage::StorageAsMut; - self.storage::().insert( - &OwnedTransactionIndexKey::new(owner, block_height, tx_idx), - tx_id, - ) - } - - pub fn update_tx_status( - &mut self, - id: &Bytes32, - status: TransactionStatus, - ) -> StorageResult> { - use fuel_core_storage::StorageAsMut; - self.storage::().insert(id, &status) - } - pub fn get_tx_status( &self, id: &Bytes32, diff --git a/crates/fuel-core/src/executor.rs b/crates/fuel-core/src/executor.rs index 989c0689605..bdceda7abc5 100644 --- a/crates/fuel-core/src/executor.rs +++ b/crates/fuel-core/src/executor.rs @@ -2,7 +2,8 @@ #[allow(clippy::cast_possible_truncation)] #[cfg(test)] mod tests { - use crate::database::Database; + use crate as fuel_core; + use fuel_core::database::Database; use fuel_core_executor::{ executor::{ block_component::PartialBlockComponent, @@ -134,10 +135,7 @@ mod tests { Rng, SeedableRng, }; - use std::{ - ops::DerefMut, - sync::Arc, - }; + use std::sync::Arc; #[derive(Clone, Debug)] struct DisabledRelayer; @@ -156,8 +154,8 @@ mod tests { type View = Self; type Height = DaBlockHeight; - fn latest_height(&self) -> Self::Height { - 0u64.into() + fn latest_height(&self) -> Option { + Some(0u64.into()) } fn view_at(&self, _: &Self::Height) -> StorageResult { @@ -302,8 +300,8 @@ mod tests { // Happy path test case that a produced block will also validate #[test] fn executor_validates_correctly_produced_block() { - let producer = create_executor(Default::default(), Default::default()); - let verifier = create_executor(Default::default(), Default::default()); + let mut producer = create_executor(Default::default(), Default::default()); + let mut verifier = create_executor(Default::default(), Default::default()); let block = test_block(1u32.into(), 0u64.into(), 10); let ExecutionResult { @@ -326,7 +324,7 @@ mod tests { // Ensure transaction commitment != default after execution #[test] fn executor_commits_transactions_to_block() { - let producer = create_executor(Default::default(), Default::default()); + let mut producer = create_executor(Default::default(), Default::default()); let block = test_block(1u32.into(), 0u64.into(), 10); let start_block = block.clone(); @@ -370,7 +368,10 @@ mod tests { mod coinbase { use super::*; - use fuel_core_storage::transactional::AtomicView; + use fuel_core_storage::transactional::{ + AtomicView, + Modifiable, + }; #[test] fn executor_commits_transactions_with_non_zero_coinbase_generation() { @@ -418,7 +419,7 @@ mod tests { .insert(&recipient, &[]) .expect("Should insert coinbase contract"); - let producer = create_executor(database.clone(), config); + let mut producer = create_executor(database.clone(), config); let expected_fee_amount_1 = TransactionFee::checked_from_tx( producer.config.consensus_parameters.gas_costs(), @@ -441,18 +442,23 @@ mod tests { }, changes, ) = producer - .execute_without_commit(ExecutionTypes::Production(Components { - header_to_produce: header, - transactions_source: OnceTransactionsSource::new(vec![ - script.into(), - invalid_duplicate_tx, - ]), - gas_price: price, - gas_limit: u64::MAX, - })) + .execute_without_commit_with_source(ExecutionTypes::Production( + Components { + header_to_produce: header, + transactions_source: OnceTransactionsSource::new(vec![ + script.into(), + invalid_duplicate_tx, + ]), + gas_price: price, + gas_limit: u64::MAX, + }, + )) .unwrap() .into(); - changes.commit().unwrap(); + producer + .database_view_provider + .commit_changes(changes) + .unwrap(); assert_eq!(skipped_transactions.len(), 1); assert_eq!(block.transactions().len(), 2); @@ -518,15 +524,22 @@ mod tests { }, changes, ) = producer - .execute_without_commit(ExecutionTypes::Production(Components { - header_to_produce: header, - transactions_source: OnceTransactionsSource::new(vec![script.into()]), - gas_price: price, - gas_limit: u64::MAX, - })) + .execute_without_commit_with_source(ExecutionTypes::Production( + Components { + header_to_produce: header, + transactions_source: OnceTransactionsSource::new(vec![ + script.into() + ]), + gas_price: price, + gas_limit: u64::MAX, + }, + )) .unwrap() .into(); - changes.commit().unwrap(); + producer + .database_view_provider + .commit_changes(changes) + .unwrap(); assert_eq!(skipped_transactions.len(), 0); assert_eq!(block.transactions().len(), 2); @@ -599,7 +612,7 @@ mod tests { let producer = create_executor(Default::default(), config); let result = producer - .execute_without_commit(ExecutionTypes::DryRun(Components { + .execute_without_commit_with_source(ExecutionTypes::DryRun(Components { header_to_produce: Default::default(), transactions_source: OnceTransactionsSource::new(vec![script.into()]), gas_price: 0, @@ -653,18 +666,22 @@ mod tests { skipped_transactions, .. } = producer - .execute_without_commit(ExecutionTypes::Production(Components { - header_to_produce: PartialBlockHeader::default(), - transactions_source: OnceTransactionsSource::new(vec![script.into()]), - gas_price: price, - gas_limit: u64::MAX, - })) + .execute_without_commit_with_source(ExecutionTypes::Production( + Components { + header_to_produce: PartialBlockHeader::default(), + transactions_source: OnceTransactionsSource::new(vec![ + script.into() + ]), + gas_price: price, + gas_limit: u64::MAX, + }, + )) .unwrap() .into_result(); assert!(skipped_transactions.is_empty()); let produced_txs = produced_block.transactions().to_vec(); - let validator = create_executor( + let mut validator = create_executor( Default::default(), // Use the same config as block producer producer.config.as_ref().clone(), @@ -733,7 +750,7 @@ mod tests { coinbase_recipient: config_coinbase, ..Default::default() }; - let producer = create_executor(Default::default(), config); + let mut producer = create_executor(Default::default(), config); let mut block = Block::default(); *block.transactions_mut() = vec![script.clone().into()]; @@ -786,7 +803,7 @@ mod tests { *block.transactions_mut() = vec![mint.into()]; block.header_mut().recalculate_metadata(); - let validator = create_executor( + let mut validator = create_executor( Default::default(), Config { utxo_validation_default: false, @@ -818,7 +835,7 @@ mod tests { *block.transactions_mut() = vec![mint.into(), tx]; block.header_mut().recalculate_metadata(); - let validator = create_executor(Default::default(), Default::default()); + let mut validator = create_executor(Default::default(), Default::default()); let validation_err = validator .execute_and_commit(ExecutionBlock::Validation(block), Default::default()) .expect_err("Expected error because coinbase if invalid"); @@ -832,7 +849,7 @@ mod tests { fn invalidate_block_missed_coinbase() { let block = Block::default(); - let validator = create_executor(Default::default(), Default::default()); + let mut validator = create_executor(Default::default(), Default::default()); let validation_err = validator .execute_and_commit(ExecutionBlock::Validation(block), Default::default()) .expect_err("Expected error because coinbase is missing"); @@ -854,7 +871,7 @@ mod tests { *block.transactions_mut() = vec![mint.into()]; block.header_mut().recalculate_metadata(); - let validator = create_executor(Default::default(), Default::default()); + let mut validator = create_executor(Default::default(), Default::default()); let validation_err = validator .execute_and_commit(ExecutionBlock::Validation(block), Default::default()) .expect_err("Expected error because coinbase if invalid"); @@ -884,7 +901,7 @@ mod tests { let mut config = Config::default(); config.consensus_parameters.base_asset_id = [1u8; 32].into(); - let validator = create_executor(Default::default(), config); + let mut validator = create_executor(Default::default(), config); let validation_err = validator .execute_and_commit(ExecutionBlock::Validation(block), Default::default()) .expect_err("Expected error because coinbase if invalid"); @@ -912,7 +929,7 @@ mod tests { *block.transactions_mut() = vec![mint.into()]; block.header_mut().recalculate_metadata(); - let validator = create_executor(Default::default(), Default::default()); + let mut validator = create_executor(Default::default(), Default::default()); let validation_err = validator .execute_and_commit(ExecutionBlock::Validation(block), Default::default()) .expect_err("Expected error because coinbase if invalid"); @@ -1163,9 +1180,9 @@ mod tests { let tx_id = tx.id(&ChainId::default()); - let producer = create_executor(Default::default(), Default::default()); + let mut producer = create_executor(Default::default(), Default::default()); - let verifier = create_executor(Default::default(), Default::default()); + let mut verifier = create_executor(Default::default(), Default::default()); let mut block = Block::default(); *block.transactions_mut() = vec![tx]; @@ -1206,9 +1223,9 @@ mod tests { .clone() .into(); - let producer = create_executor(Default::default(), Default::default()); + let mut producer = create_executor(Default::default(), Default::default()); - let verifier = create_executor(Default::default(), Default::default()); + let mut verifier = create_executor(Default::default(), Default::default()); let mut block = Block::default(); *block.transactions_mut() = vec![tx]; @@ -1236,7 +1253,7 @@ mod tests { let mut tx: Script = Script::default(); tx.policies_mut().set(PolicyType::MaxFee, Some(0)); - let executor = create_executor( + let mut executor = create_executor( Database::default(), Config { utxo_validation_default: true, @@ -1311,7 +1328,7 @@ mod tests { db.storage::() .insert(&second_input.utxo_id().unwrap().clone(), &second_coin) .unwrap(); - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: true, @@ -1387,7 +1404,7 @@ mod tests { db.storage::() .insert(&input.utxo_id().unwrap().clone(), &coin) .unwrap(); - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: true, @@ -1443,7 +1460,7 @@ mod tests { db.storage::() .insert(&input.utxo_id().unwrap().clone(), &coin) .unwrap(); - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: true, @@ -1494,7 +1511,7 @@ mod tests { .finalize_as_transaction(); let (tx2, tx3) = setup_executable_script(); - let executor = create_executor(Default::default(), Default::default()); + let mut executor = create_executor(Default::default(), Default::default()); let block = PartialFuelBlock { header: Default::default(), @@ -1543,7 +1560,7 @@ mod tests { .into(); let mut db = &Database::default(); - let executor = create_executor(db.clone(), Default::default()); + let mut executor = create_executor(db.clone(), Default::default()); let block = PartialFuelBlock { header: Default::default(), @@ -1588,7 +1605,7 @@ mod tests { .into(); let db = &mut Database::default(); - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: false, @@ -1645,7 +1662,7 @@ mod tests { .into(); let db = &mut Database::default(); - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: false, @@ -1746,9 +1763,9 @@ mod tests { .build() .transaction() .clone(); - let db = &mut Database::default(); + let db = Database::default(); - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: false, @@ -1852,9 +1869,9 @@ mod tests { .build() .transaction() .clone(); - let db = &mut Database::default(); + let db = Database::default(); - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: false, @@ -1901,7 +1918,7 @@ mod tests { let ExecutionResult { block, tx_status, .. } = executor - .execute_without_commit(ExecutionTypes::Production(Components { + .execute_without_commit_with_source(ExecutionTypes::Production(Components { header_to_produce: block.header, transactions_source: OnceTransactionsSource::new(block.transactions), gas_price: 0, @@ -1917,10 +1934,10 @@ mod tests { assert_eq!(tx.inputs()[0].balance_root(), balance_root); assert_eq!(tx.inputs()[0].state_root(), state_root); - executor - .execute_without_commit::(ExecutionTypes::Validation( - block, - )) + let _ = executor + .execute_without_commit_with_source::( + ExecutionTypes::Validation(block), + ) .expect("Validation of block should be successful"); } @@ -1954,7 +1971,7 @@ mod tests { } let db = &mut Database::default(); - let executor = create_executor(db.clone(), Default::default()); + let mut executor = create_executor(db.clone(), Default::default()); let block = PartialFuelBlock { header: PartialBlockHeader { @@ -2022,7 +2039,7 @@ mod tests { db.storage::().insert(&utxo_id, &coin).unwrap(); } - let executor = create_executor( + let mut executor = create_executor( db.clone(), Config { utxo_validation_default: true, @@ -2097,7 +2114,7 @@ mod tests { let db = Database::default(); - let setup = create_executor(db.clone(), Default::default()); + let mut setup = create_executor(db.clone(), Default::default()); let ExecutionResult { skipped_transactions, @@ -2110,22 +2127,22 @@ mod tests { .unwrap(); assert!(skipped_transactions.is_empty()); - let producer_view = db.transaction().deref_mut().clone(); - let producer = create_executor(producer_view, Default::default()); + let producer = create_executor(db.clone(), Default::default()); let ExecutionResult { block: second_block, skipped_transactions, .. } = producer - .execute_and_commit( + .execute_without_commit( ExecutionBlock::Production(second_block), Default::default(), ) - .unwrap(); + .unwrap() + .into_result(); assert!(skipped_transactions.is_empty()); let verifier = create_executor(db, Default::default()); - let verify_result = verifier.execute_and_commit( + let verify_result = verifier.execute_without_commit( ExecutionBlock::Validation(second_block), Default::default(), ); @@ -2179,7 +2196,7 @@ mod tests { let db = Database::default(); - let setup = create_executor(db.clone(), Default::default()); + let mut setup = create_executor(db.clone(), Default::default()); setup .execute_and_commit( @@ -2188,18 +2205,18 @@ mod tests { ) .unwrap(); - let producer_view = db.transaction().deref_mut().clone(); - let producer = create_executor(producer_view, Default::default()); + let producer = create_executor(db.clone(), Default::default()); let ExecutionResult { block: mut second_block, .. } = producer - .execute_and_commit( + .execute_without_commit( ExecutionBlock::Production(second_block), Default::default(), ) - .unwrap(); + .unwrap() + .into_result(); // Corrupt the utxo_id of the contract output if let Transaction::Script(script) = &mut second_block.transactions_mut()[0] { if let Input::Contract(contract::Contract { utxo_id, .. }) = @@ -2211,7 +2228,7 @@ mod tests { } let verifier = create_executor(db, Default::default()); - let verify_result = verifier.execute_and_commit( + let verify_result = verifier.execute_without_commit( ExecutionBlock::Validation(second_block), Default::default(), ); @@ -2230,7 +2247,7 @@ mod tests { let script_id = script.id(&ChainId::default()); let mut database = &Database::default(); - let executor = create_executor(database.clone(), Default::default()); + let mut executor = create_executor(database.clone(), Default::default()); let block = PartialFuelBlock { header: Default::default(), @@ -2281,7 +2298,7 @@ mod tests { let tx_id = tx.id(&ChainId::default()); let mut database = &Database::default(); - let executor = create_executor(database.clone(), Default::default()); + let mut executor = create_executor(database.clone(), Default::default()); let block = PartialFuelBlock { header: Default::default(), @@ -2405,7 +2422,7 @@ mod tests { transactions: vec![tx.into()], }; - let exec = make_executor(&messages); + let mut exec = make_executor(&messages); let view = exec.database_view_provider.latest_view(); assert!(!view.message_is_spent(message_coin.nonce()).unwrap()); assert!(!view.message_is_spent(message_data.nonce()).unwrap()); @@ -2459,7 +2476,7 @@ mod tests { transactions: vec![tx.into()], }; - let exec = make_executor(&messages); + let mut exec = make_executor(&messages); let view = exec.database_view_provider.latest_view(); assert!(!view.message_is_spent(message_coin.nonce()).unwrap()); assert!(!view.message_is_spent(message_data.nonce()).unwrap()); @@ -2747,7 +2764,7 @@ mod tests { .unwrap(); // make executor with db - let executor = create_executor( + let mut executor = create_executor( database.clone(), Config { utxo_validation_default: true, @@ -2816,7 +2833,7 @@ mod tests { .unwrap(); // make executor with db - let executor = create_executor( + let mut executor = create_executor( database.clone(), Config { utxo_validation_default: true, @@ -2899,7 +2916,7 @@ mod tests { skipped_transactions, .. } = producer - .execute_without_commit(ExecutionTypes::Production(Components { + .execute_without_commit_with_source(ExecutionTypes::Production(Components { header_to_produce: PartialBlockHeader::default(), transactions_source: OnceTransactionsSource::new(vec![tx.into()]), gas_price: 1, @@ -2910,52 +2927,50 @@ mod tests { assert!(skipped_transactions.is_empty()); let validator = create_executor(db.clone(), config); - let result = validator.execute_without_commit::( - ExecutionTypes::Validation(block), - ); + let result = validator + .execute_without_commit_with_source::( + ExecutionTypes::Validation(block), + ); assert!(result.is_ok(), "{result:?}") } #[cfg(feature = "relayer")] mod relayer { use super::*; - use crate::database::database_description::{ - on_chain::OnChain, - relayer::Relayer, + use crate::{ + database::database_description::{ + on_chain::OnChain, + relayer::Relayer, + }, + state::ChangesIterator, }; use fuel_core_relayer::storage::EventsHistory; use fuel_core_storage::{ + iter::IteratorOverTable, tables::{ FuelBlocks, SpentMessages, }, - transactional::Transaction, StorageAsMut, }; fn database_with_genesis_block(da_block_height: u64) -> Database { - let db = Database::default(); + let mut db = Database::default(); let mut block = Block::default(); block.header_mut().set_da_height(da_block_height.into()); block.header_mut().recalculate_metadata(); - let mut db_transaction = db.transaction(); - db_transaction - .as_mut() - .storage::() + db.storage::() .insert(&0.into(), &block) .expect("Should insert genesis block without any problems"); - db_transaction.commit().expect("Should commit"); db } fn add_message_to_relayer(db: &mut Database, message: Message) { - let mut db_transaction = db.transaction(); let da_height = message.da_height(); db.storage::() .insert(&da_height, &[Event::Message(message)]) .expect("Should insert event"); - db_transaction.commit().expect("Should commit events"); } fn add_messages_to_relayer(db: &mut Database, relayer_da_height: u64) { @@ -3055,13 +3070,15 @@ mod tests { // When let producer = create_relayer_executor(on_chain_db, relayer_db); let block = test_block(block_height.into(), block_da_height.into(), 0); - let result = producer.execute_and_commit( - ExecutionTypes::Production(block.into()), - Default::default(), - )?; + let (result, changes) = producer + .execute_without_commit( + ExecutionTypes::Production(block.into()), + Default::default(), + )? + .into(); // Then - let view = producer.database_view_provider.latest_view(); + let view = ChangesIterator::::new(&changes); assert_eq!( view.iter_all::(None).count() as u64, block_da_height - genesis_da_height @@ -3098,15 +3115,16 @@ mod tests { // When let producer = create_relayer_executor(on_chain_db, relayer_db); let block = test_block(block_height.into(), block_da_height.into(), 10); - let result = producer - .execute_and_commit( + let (result, changes) = producer + .execute_without_commit( ExecutionTypes::Production(block.into()), Default::default(), ) - .unwrap(); + .unwrap() + .into(); // Then - let view = producer.database_view_provider.latest_view(); + let view = ChangesIterator::::new(&changes); assert!(result.skipped_transactions.is_empty()); assert_eq!(view.iter_all::(None).count() as u64, 0); } @@ -3143,15 +3161,16 @@ mod tests { let mut block = test_block(block_height.into(), block_da_height.into(), 0); *block.transactions_mut() = vec![tx]; let producer = create_relayer_executor(on_chain_db, relayer_db); - let result = producer - .execute_and_commit( + let (result, changes) = producer + .execute_without_commit( ExecutionTypes::Production(block.into()), Default::default(), ) - .unwrap(); + .unwrap() + .into(); // Then - let view = producer.database_view_provider.latest_view(); + let view = ChangesIterator::::new(&changes); assert!(result.skipped_transactions.is_empty()); assert_eq!(view.iter_all::(None).count() as u64, 0); // Message added during this block immediately became spent. diff --git a/crates/fuel-core/src/graphql_api/database/arc_wrapper.rs b/crates/fuel-core/src/graphql_api/database/arc_wrapper.rs index a86043bd80a..5355bde48d8 100644 --- a/crates/fuel-core/src/graphql_api/database/arc_wrapper.rs +++ b/crates/fuel-core/src/graphql_api/database/arc_wrapper.rs @@ -38,7 +38,7 @@ where type View = OnChainView; type Height = Height; - fn latest_height(&self) -> Self::Height { + fn latest_height(&self) -> Option { self.inner.latest_height() } @@ -60,7 +60,7 @@ where type View = OffChainView; type Height = Height; - fn latest_height(&self) -> Self::Height { + fn latest_height(&self) -> Option { self.inner.latest_height() } diff --git a/crates/fuel-core/src/graphql_api/ports.rs b/crates/fuel-core/src/graphql_api/ports.rs index d90c90de24c..069852e5964 100644 --- a/crates/fuel-core/src/graphql_api/ports.rs +++ b/crates/fuel-core/src/graphql_api/ports.rs @@ -199,19 +199,12 @@ pub trait P2pPort: Send + Sync { pub mod worker { use super::super::storage::blocks::FuelBlockIdsToHeights; - use crate::{ - database::{ - database_description::off_chain::OffChain, - metadata::MetadataTable, - }, - fuel_core_graphql_api::storage::{ - coins::OwnedCoins, - messages::OwnedMessageIds, - }, + use crate::fuel_core_graphql_api::storage::{ + coins::OwnedCoins, + messages::OwnedMessageIds, }; use fuel_core_services::stream::BoxStream; use fuel_core_storage::{ - transactional::Transactional, Error as StorageError, Result as StorageResult, StorageMutate, @@ -228,14 +221,19 @@ pub mod worker { }, }; + pub trait Transactional: Send + Sync { + type Transaction<'a>: OffChainDatabase + where + Self: 'a; + + /// Creates a write database transaction. + fn transaction(&mut self) -> Self::Transaction<'_>; + } + pub trait OffChainDatabase: - Send - + Sync - + StorageMutate + StorageMutate + StorageMutate - + StorageMutate, Error = StorageError> + StorageMutate - + Transactional { fn record_tx_id_owner( &mut self, @@ -254,6 +252,9 @@ pub mod worker { /// Update metadata about the total number of transactions on the chain. /// Returns the total count after the update. fn increase_tx_count(&mut self, new_txs_count: u64) -> StorageResult; + + /// Commits the underlying changes into the database. + fn commit(self) -> StorageResult<()>; } pub trait BlockImporter { diff --git a/crates/fuel-core/src/graphql_api/storage.rs b/crates/fuel-core/src/graphql_api/storage.rs index d06df8bfedf..ef277cda84a 100644 --- a/crates/fuel-core/src/graphql_api/storage.rs +++ b/crates/fuel-core/src/graphql_api/storage.rs @@ -1,8 +1,44 @@ -use fuel_core_storage::kv_store::StorageColumn; +use crate::{ + fuel_core_graphql_api::storage::{ + blocks::FuelBlockIdsToHeights, + coins::OwnedCoins, + messages::OwnedMessageIds, + transactions::{ + OwnedTransactionIndexKey, + OwnedTransactions, + TransactionStatuses, + }, + }, + graphql_api::ports::worker::OffChainDatabase, +}; +use fuel_core_storage::{ + kv_store::{ + KeyValueInspect, + StorageColumn, + }, + transactional::{ + Modifiable, + StorageTransaction, + }, + Error as StorageError, + Result as StorageResult, + StorageAsMut, + StorageMutate, +}; +use fuel_core_types::{ + fuel_tx::{ + Address, + Bytes32, + }, + fuel_types::BlockHeight, + services::txpool::TransactionStatus, +}; +use statistic::StatisticTable; pub mod blocks; pub mod coins; pub mod messages; +pub mod statistic; pub mod transactions; /// GraphQL database tables column ids to the corresponding [`fuel_core_storage::Mappable`] table. @@ -54,3 +90,54 @@ impl StorageColumn for Column { self.as_u32() } } + +impl OffChainDatabase for StorageTransaction +where + S: KeyValueInspect + Modifiable, + StorageTransaction: StorageMutate + + StorageMutate + + StorageMutate, +{ + fn record_tx_id_owner( + &mut self, + owner: &Address, + block_height: BlockHeight, + tx_idx: u16, + tx_id: &Bytes32, + ) -> StorageResult> { + self.storage::().insert( + &OwnedTransactionIndexKey::new(owner, block_height, tx_idx), + tx_id, + ) + } + + fn update_tx_status( + &mut self, + id: &Bytes32, + status: TransactionStatus, + ) -> StorageResult> { + self.storage::().insert(id, &status) + } + + fn increase_tx_count(&mut self, new_txs_count: u64) -> StorageResult { + /// Tracks the total number of transactions written to the chain + /// It's useful for analyzing TPS or other metrics. + const TX_COUNT: &str = "total_tx_count"; + + // TODO: how should tx count be initialized after regenesis? + let current_tx_count: u64 = self + .storage::>() + .get(TX_COUNT)? + .unwrap_or_default() + .into_owned(); + // Using saturating_add because this value doesn't significantly impact the correctness of execution. + let new_tx_count = current_tx_count.saturating_add(new_txs_count); + <_ as StorageMutate>>::insert(self, TX_COUNT, &new_tx_count)?; + Ok(new_tx_count) + } + + fn commit(self) -> StorageResult<()> { + self.commit()?; + Ok(()) + } +} diff --git a/crates/fuel-core/src/graphql_api/storage/statistic.rs b/crates/fuel-core/src/graphql_api/storage/statistic.rs new file mode 100644 index 00000000000..8294a9b3d42 --- /dev/null +++ b/crates/fuel-core/src/graphql_api/storage/statistic.rs @@ -0,0 +1,32 @@ +use fuel_core_storage::{ + blueprint::plain::Plain, + codec::postcard::Postcard, + structured_storage::TableWithBlueprint, + Mappable, +}; + +/// The table that stores all statistic about blockchain. Each key is a string, while the value +/// depends on the context. +pub struct StatisticTable(core::marker::PhantomData); + +impl Mappable for StatisticTable +where + V: Clone, +{ + type Key = str; + type OwnedKey = String; + type Value = V; + type OwnedValue = V; +} + +impl TableWithBlueprint for StatisticTable +where + V: Clone, +{ + type Blueprint = Plain; + type Column = super::Column; + + fn column() -> Self::Column { + Self::Column::Statistic + } +} diff --git a/crates/fuel-core/src/graphql_api/worker_service.rs b/crates/fuel-core/src/graphql_api/worker_service.rs index 734c48f38f3..3b5c206989f 100644 --- a/crates/fuel-core/src/graphql_api/worker_service.rs +++ b/crates/fuel-core/src/graphql_api/worker_service.rs @@ -1,24 +1,15 @@ -use crate::{ - database::{ - database_description::{ - off_chain::OffChain, - DatabaseDescription, - DatabaseMetadata, +use crate::fuel_core_graphql_api::{ + ports, + ports::worker::OffChainDatabase, + storage::{ + blocks::FuelBlockIdsToHeights, + coins::{ + owner_coin_id_key, + OwnedCoins, }, - metadata::MetadataTable, - }, - fuel_core_graphql_api::{ - ports, - storage::{ - blocks::FuelBlockIdsToHeights, - coins::{ - owner_coin_id_key, - OwnedCoins, - }, - messages::{ - OwnedMessageIds, - OwnedMessageKey, - }, + messages::{ + OwnedMessageIds, + OwnedMessageKey, }, }, }; @@ -87,48 +78,32 @@ pub struct Task { impl Task where TxPool: ports::worker::TxPool, - D: ports::worker::OffChainDatabase, + D: ports::worker::Transactional, { fn process_block(&mut self, result: SharedImportResult) -> anyhow::Result<()> { let block = &result.sealed_block.entity; let mut transaction = self.database.transaction(); // save the status for every transaction using the finalized block id - persist_transaction_status(&result, transaction.as_mut())?; + persist_transaction_status(&result, &mut transaction)?; // save the associated owner for each transaction in the block - index_tx_owners_for_block(block, transaction.as_mut())?; + index_tx_owners_for_block(block, &mut transaction)?; let height = block.header().height(); let block_id = block.id(); transaction - .as_mut() .storage::() .insert(&block_id, height)?; let total_tx_count = transaction - .as_mut() .increase_tx_count(block.transactions().len() as u64) .unwrap_or_default(); process_executor_events( result.events.iter().map(Cow::Borrowed), - transaction.as_mut(), + &mut transaction, )?; - // TODO: Temporary solution to store the block height in the database manually here. - // Later it will be controlled by the `commit_changes` function on the `Database` side. - // https://github.com/FuelLabs/fuel-core/issues/1589 - transaction - .as_mut() - .storage::>() - .insert( - &(), - &DatabaseMetadata::V1 { - version: OffChain::version(), - height: *block.header().height(), - }, - )?; - transaction.commit()?; for status in result.tx_status.iter() { @@ -145,13 +120,13 @@ where } /// Process the executor events and update the indexes for the messages and coins. -pub fn process_executor_events<'a, D, Iter>( +pub fn process_executor_events<'a, Iter, T>( events: Iter, - block_st_transaction: &mut D, + block_st_transaction: &mut T, ) -> anyhow::Result<()> where - D: ports::worker::OffChainDatabase, Iter: Iterator>, + T: OffChainDatabase, { for event in events { match event.deref() { @@ -189,12 +164,12 @@ where } /// Associate all transactions within a block to their respective UTXO owners -fn index_tx_owners_for_block( +fn index_tx_owners_for_block( block: &Block, - block_st_transaction: &mut D, + block_st_transaction: &mut T, ) -> anyhow::Result<()> where - D: ports::worker::OffChainDatabase, + T: OffChainDatabase, { for (tx_idx, tx) in block.transactions().iter().enumerate() { let block_height = *block.header().height(); @@ -230,16 +205,16 @@ where } /// Index the tx id by owner for all of the inputs and outputs -fn persist_owners_index( +fn persist_owners_index( block_height: BlockHeight, inputs: &[Input], outputs: &[Output], tx_id: &Bytes32, tx_idx: u16, - db: &mut D, + db: &mut T, ) -> StorageResult<()> where - D: ports::worker::OffChainDatabase, + T: OffChainDatabase, { let mut owners = vec![]; for input in inputs { @@ -272,12 +247,12 @@ where Ok(()) } -fn persist_transaction_status( +fn persist_transaction_status( import_result: &ImportResult, - db: &mut D, + db: &mut T, ) -> StorageResult<()> where - D: ports::worker::OffChainDatabase, + T: OffChainDatabase, { for TransactionExecutionStatus { id, result } in import_result.tx_status.iter() { let status = @@ -298,7 +273,7 @@ where impl RunnableService for Task where TxPool: ports::worker::TxPool, - D: ports::worker::OffChainDatabase, + D: ports::worker::Transactional, { const NAME: &'static str = "GraphQL_Off_Chain_Worker"; type SharedData = EmptyShared; @@ -314,7 +289,9 @@ where _: &StateWatcher, _: Self::TaskParams, ) -> anyhow::Result { - let total_tx_count = self.database.increase_tx_count(0).unwrap_or_default(); + let mut db_tx = self.database.transaction(); + let total_tx_count = db_tx.increase_tx_count(0).unwrap_or_default(); + db_tx.commit()?; graphql_metrics().total_txs_count.set(total_tx_count as i64); // TODO: It is possible that the node was shut down before we processed all imported blocks. @@ -332,7 +309,7 @@ where impl RunnableTask for Task where TxPool: ports::worker::TxPool, - D: ports::worker::OffChainDatabase, + D: ports::worker::Transactional, { async fn run(&mut self, watcher: &mut StateWatcher) -> anyhow::Result { let should_continue; @@ -379,7 +356,7 @@ pub fn new_service( where TxPool: ports::worker::TxPool, I: ports::worker::BlockImporter, - D: ports::worker::OffChainDatabase, + D: ports::worker::Transactional, { let block_importer = block_importer.block_events(); ServiceRunner::new(Task { diff --git a/crates/fuel-core/src/schema/dap.rs b/crates/fuel-core/src/schema/dap.rs index bb6d3450fa8..acceb76ce14 100644 --- a/crates/fuel-core/src/schema/dap.rs +++ b/crates/fuel-core/src/schema/dap.rs @@ -1,7 +1,6 @@ use crate::{ database::{ database_description::on_chain::OnChain, - transaction::DatabaseTransaction, Database, }, schema::scalars::{ @@ -18,6 +17,10 @@ use async_graphql::{ }; use fuel_core_storage::{ not_found, + transactional::{ + IntoTransaction, + StorageTransaction, + }, vm_storage::VmStorage, InterpreterStorage, }; @@ -65,11 +68,12 @@ pub struct Config { debug_enabled: bool, } +type FrozenDatabase = VmStorage>>; + #[derive(Debug, Clone, Default)] pub struct ConcreteStorage { - vm: HashMap, Script>>, + vm: HashMap>, tx: HashMap>, - db: HashMap>, params: ConsensusParameters, } @@ -104,12 +108,12 @@ impl ConcreteStorage { pub fn init( &mut self, txs: &[Script], - storage: DatabaseTransaction, + storage: Database, ) -> anyhow::Result { let id = Uuid::new_v4(); let id = ID::from(id); - let vm_database = Self::vm_database(&storage)?; + let vm_database = Self::vm_database(storage)?; let tx = Self::dummy_tx(); let checked_tx = tx .into_checked_basic(vm_database.block_height()?, &self.params) @@ -134,23 +138,17 @@ impl ConcreteStorage { let mut vm = Interpreter::with_storage(vm_database, interpreter_params); vm.transact(ready_tx).map_err(|e| anyhow::anyhow!(e))?; self.vm.insert(id.clone(), vm); - self.db.insert(id.clone(), storage); Ok(id) } pub fn kill(&mut self, id: &ID) -> bool { self.tx.remove(id); - self.vm.remove(id); - self.db.remove(id).is_some() + self.vm.remove(id).is_some() } - pub fn reset( - &mut self, - id: &ID, - storage: DatabaseTransaction, - ) -> anyhow::Result<()> { - let vm_database = Self::vm_database(&storage)?; + pub fn reset(&mut self, id: &ID, storage: Database) -> anyhow::Result<()> { + let vm_database = Self::vm_database(storage)?; let tx = self .tx .get(id) @@ -177,7 +175,6 @@ impl ConcreteStorage { self.vm.insert(id.clone(), vm).ok_or_else(|| { io::Error::new(io::ErrorKind::NotFound, "The VM instance was not found") })?; - self.db.insert(id.clone(), storage); Ok(()) } @@ -191,15 +188,13 @@ impl ConcreteStorage { .ok_or_else(|| anyhow::anyhow!("The VM instance was not found")) } - fn vm_database( - storage: &DatabaseTransaction, - ) -> anyhow::Result> { + fn vm_database(storage: Database) -> anyhow::Result { let block = storage .get_current_block()? .ok_or(not_found!("Block for VMDatabase"))?; let vm_database = VmStorage::new( - storage.as_ref().clone(), + storage.into_transaction(), block.header().consensus(), // TODO: Use a real coinbase address Default::default(), @@ -304,7 +299,7 @@ impl DapMutation { .data_unchecked::() .lock() .await - .init(&[], db.transaction())?; + .init(&[], db.clone())?; debug!("Session {:?} initialized", id); @@ -333,7 +328,7 @@ impl DapMutation { ctx.data_unchecked::() .lock() .await - .reset(&id, db.transaction())?; + .reset(&id, db.clone())?; debug!("Session {:?} was reset", id); @@ -417,21 +412,18 @@ impl DapMutation { .map_err(|_| async_graphql::Error::new("Invalid transaction JSON"))?; let mut locked = ctx.data_unchecked::().lock().await; - - let db = locked.db.get(&id).ok_or("Invalid debugging session ID")?; - let params = locked.params.clone(); - let checked_tx = tx - .into_checked_basic(db.latest_height()?, ¶ms) - .map_err(|err| anyhow::anyhow!("{:?}", err))? - .into(); - let vm = locked .vm .get_mut(&id) .ok_or_else(|| async_graphql::Error::new("VM not found"))?; + let checked_tx = tx + .into_checked_basic(vm.as_ref().block_height()?, ¶ms) + .map_err(|err| anyhow::anyhow!("{:?}", err))? + .into(); + let gas_costs = params.gas_costs(); let fee_params = params.fee_params(); diff --git a/crates/fuel-core/src/service.rs b/crates/fuel-core/src/service.rs index 92c735948d1..b976ec96de5 100644 --- a/crates/fuel-core/src/service.rs +++ b/crates/fuel-core/src/service.rs @@ -196,12 +196,10 @@ pub struct Task { impl Task { /// Private inner method for initializing the fuel service task - pub fn new(mut database: CombinedDatabase, config: Config) -> anyhow::Result { + pub fn new(database: CombinedDatabase, config: Config) -> anyhow::Result { // initialize state tracing::info!("Initializing database"); - let block_height = config.state_reader.block_height(); - let da_block_height = 0u64.into(); - database.init(&block_height, &da_block_height)?; + database.check_version()?; // initialize sub services tracing::info!("Initializing sub services"); @@ -232,7 +230,7 @@ impl RunnableService for Task { _: Self::TaskParams, ) -> anyhow::Result { let on_view = self.shared.database.on_chain().latest_view(); - let off_view = self.shared.database.off_chain().latest_view(); + let mut off_view = self.shared.database.off_chain().latest_view(); // check if chain is initialized if let Err(err) = on_view.get_genesis() { if err.is_not_found() { @@ -242,7 +240,7 @@ impl RunnableService for Task { genesis::off_chain::execute_genesis_block( &self.shared.config, - &off_view, + &mut off_view, )?; } } @@ -287,7 +285,6 @@ impl RunnableTask for Task { ); } } - self.shared.database.flush()?; Ok(()) } } diff --git a/crates/fuel-core/src/service/adapters.rs b/crates/fuel-core/src/service/adapters.rs index 19d042338b3..a688439c715 100644 --- a/crates/fuel-core/src/service/adapters.rs +++ b/crates/fuel-core/src/service/adapters.rs @@ -27,6 +27,8 @@ pub mod graphql_api; #[cfg(feature = "p2p")] pub mod p2p; pub mod producer; +#[cfg(feature = "relayer")] +pub mod relayer; #[cfg(feature = "p2p")] pub mod sync; pub mod txpool; diff --git a/crates/fuel-core/src/service/adapters/block_importer.rs b/crates/fuel-core/src/service/adapters/block_importer.rs index 62d9968a7f0..3b50758a9e0 100644 --- a/crates/fuel-core/src/service/adapters/block_importer.rs +++ b/crates/fuel-core/src/service/adapters/block_importer.rs @@ -11,22 +11,27 @@ use fuel_core_importer::{ ports::{ BlockVerifier, Executor, - ExecutorDatabase, ImporterDatabase, }, Config, Importer, }; use fuel_core_storage::{ - iter::IterDirection, + iter::{ + IterDirection, + IteratorOverTable, + }, tables::{ + merkle::{ + DenseMetadataKey, + FuelBlockMerkleMetadata, + }, FuelBlocks, - SealedBlockConsensus, - Transactions, }, - transactional::StorageTransaction, + transactional::Changes, + MerkleRoot, Result as StorageResult, - StorageAsMut, + StorageAsRef, }; use fuel_core_types::{ blockchain::{ @@ -34,11 +39,7 @@ use fuel_core_types::{ consensus::Consensus, SealedBlock, }, - fuel_tx::UniqueIdentifier, - fuel_types::{ - BlockHeight, - ChainId, - }, + fuel_types::BlockHeight, services::executor::{ ExecutionTypes, Result as ExecutorResult, @@ -88,43 +89,20 @@ impl ImporterDatabase for Database { .transpose()? .map(|(height, _)| height)) } -} - -impl ExecutorDatabase for Database { - fn store_new_block( - &mut self, - chain_id: &ChainId, - block: &SealedBlock, - ) -> StorageResult { - let height = block.entity.header().height(); - let mut found = self - .storage::() - .insert(height, &block.entity.compress(chain_id))? - .is_some(); - found |= self - .storage::() - .insert(height, &block.consensus)? - .is_some(); - // TODO: Use `batch_insert` from https://github.com/FuelLabs/fuel-core/pull/1576 - for tx in block.entity.transactions() { - found |= self - .storage::() - .insert(&tx.id(chain_id), tx)? - .is_some(); - } - Ok(!found) + fn latest_block_root(&self) -> StorageResult> { + Ok(self + .storage_as_ref::() + .get(&DenseMetadataKey::Latest)? + .map(|cow| *cow.root())) } } impl Executor for ExecutorAdapter { - type Database = Database; - fn execute_without_commit( &self, block: Block, - ) -> ExecutorResult>> - { + ) -> ExecutorResult> { self._execute_without_commit::(ExecutionTypes::Validation( block, )) diff --git a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs index 2cb294d01da..32220003fc6 100644 --- a/crates/fuel-core/src/service/adapters/consensus_module/poa.rs +++ b/crates/fuel-core/src/service/adapters/consensus_module/poa.rs @@ -1,5 +1,4 @@ use crate::{ - database::Database, fuel_core_graphql_api::ports::ConsensusModulePort, service::adapters::{ BlockImporterAdapter, @@ -23,7 +22,7 @@ use fuel_core_poa::{ }, }; use fuel_core_services::stream::BoxStream; -use fuel_core_storage::transactional::StorageTransaction; +use fuel_core_storage::transactional::Changes; use fuel_core_types::{ fuel_asm::Word, fuel_tx::TxId, @@ -103,15 +102,13 @@ impl TransactionPool for TxPoolAdapter { #[async_trait::async_trait] impl fuel_core_poa::ports::BlockProducer for BlockProducerAdapter { - type Database = Database; - async fn produce_and_execute_block( &self, height: BlockHeight, block_time: Tai64, source: TransactionsSource, max_gas: Word, - ) -> anyhow::Result>> { + ) -> anyhow::Result> { match source { TransactionsSource::TxPool => { self.block_producer @@ -131,11 +128,9 @@ impl fuel_core_poa::ports::BlockProducer for BlockProducerAdapter { #[async_trait::async_trait] impl BlockImporter for BlockImporterAdapter { - type Database = Database; - async fn commit_result( &self, - result: UncommittedImporterResult>, + result: UncommittedImporterResult, ) -> anyhow::Result<()> { self.block_importer .commit_result(result) diff --git a/crates/fuel-core/src/service/adapters/executor.rs b/crates/fuel-core/src/service/adapters/executor.rs index 388ae396479..7250c20e374 100644 --- a/crates/fuel-core/src/service/adapters/executor.rs +++ b/crates/fuel-core/src/service/adapters/executor.rs @@ -12,10 +12,7 @@ use fuel_core_executor::{ executor::ExecutionBlockWithSource, ports::MaybeCheckedTransaction, }; -use fuel_core_storage::{ - transactional::StorageTransaction, - Error as StorageError, -}; +use fuel_core_storage::transactional::Changes; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, fuel_tx, @@ -44,11 +41,11 @@ impl ExecutorAdapter { pub(crate) fn _execute_without_commit( &self, block: ExecutionBlockWithSource, - ) -> ExecutorResult>> + ) -> ExecutorResult> where TxSource: fuel_core_executor::ports::TransactionsSource, { - self.executor.execute_without_commit(block) + self.executor.execute_without_commit_with_source(block) } pub(crate) fn _dry_run( @@ -60,13 +57,6 @@ impl ExecutorAdapter { } } -/// Implemented to satisfy: `GenesisCommitment for ContractRef<&'a mut Database>` -impl fuel_core_executor::refs::ContractStorageTrait for Database { - type InnerError = StorageError; -} - -impl fuel_core_executor::ports::ExecutorDatabaseTrait for Database {} - impl fuel_core_executor::ports::RelayerPort for Database { fn enabled(&self) -> bool { #[cfg(feature = "relayer")] diff --git a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs index 418d2f755b9..6c01a6f9c24 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/off_chain.rs @@ -5,7 +5,7 @@ use crate::{ }, fuel_core_graphql_api::{ ports::{ - worker, + worker::Transactional, OffChainDatabase, }, storage::transactions::OwnedTransactionIndexCursor, @@ -18,6 +18,10 @@ use fuel_core_storage::{ IterDirection, }, not_found, + transactional::{ + IntoTransaction, + StorageTransaction, + }, Error as StorageError, Result as StorageResult, }; @@ -26,7 +30,6 @@ use fuel_core_types::{ blockchain::primitives::BlockId, fuel_tx::{ Address, - Bytes32, TxPointer, UtxoId, }, @@ -87,26 +90,10 @@ impl OffChainDatabase for Database { } } -impl worker::OffChainDatabase for Database { - fn record_tx_id_owner( - &mut self, - owner: &Address, - block_height: BlockHeight, - tx_idx: u16, - tx_id: &Bytes32, - ) -> StorageResult> { - Database::record_tx_id_owner(self, owner, block_height, tx_idx, tx_id) - } - - fn update_tx_status( - &mut self, - id: &Bytes32, - status: TransactionStatus, - ) -> StorageResult> { - Database::update_tx_status(self, id, status) - } +impl Transactional for Database { + type Transaction<'a> = StorageTransaction<&'a mut Self> where Self: 'a; - fn increase_tx_count(&mut self, new_txs_count: u64) -> StorageResult { - Database::increase_tx_count(self, new_txs_count) + fn transaction(&mut self) -> Self::Transaction<'_> { + self.into_transaction() } } diff --git a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs index 19ab02bea69..1662133ecc0 100644 --- a/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs +++ b/crates/fuel-core/src/service/adapters/graphql_api/on_chain.rs @@ -14,6 +14,7 @@ use fuel_core_storage::{ BoxedIter, IntoBoxedIter, IterDirection, + IteratorOverTable, }, not_found, tables::FuelBlocks, diff --git a/crates/fuel-core/src/service/adapters/producer.rs b/crates/fuel-core/src/service/adapters/producer.rs index 7680776e797..573bc66879d 100644 --- a/crates/fuel-core/src/service/adapters/producer.rs +++ b/crates/fuel-core/src/service/adapters/producer.rs @@ -16,7 +16,7 @@ use fuel_core_producer::ports::TxPool; use fuel_core_storage::{ not_found, tables::FuelBlocks, - transactional::StorageTransaction, + transactional::Changes, Result as StorageResult, StorageAsRef, }; @@ -64,23 +64,19 @@ impl TxPool for TxPoolAdapter { } impl fuel_core_producer::ports::Executor for ExecutorAdapter { - type Database = Database; - fn execute_without_commit( &self, component: Components, - ) -> ExecutorResult>> { + ) -> ExecutorResult> { self._execute_without_commit(ExecutionTypes::Production(component)) } } impl fuel_core_producer::ports::Executor> for ExecutorAdapter { - type Database = Database; - fn execute_without_commit( &self, component: Components>, - ) -> ExecutorResult>> { + ) -> ExecutorResult> { let Components { header_to_produce, transactions_source, diff --git a/crates/fuel-core/src/service/adapters/relayer.rs b/crates/fuel-core/src/service/adapters/relayer.rs new file mode 100644 index 00000000000..83623436d58 --- /dev/null +++ b/crates/fuel-core/src/service/adapters/relayer.rs @@ -0,0 +1,17 @@ +use crate::database::{ + database_description::relayer::Relayer, + Database, +}; +use fuel_core_relayer::ports::Transactional; +use fuel_core_storage::transactional::{ + IntoTransaction, + StorageTransaction, +}; + +impl Transactional for Database { + type Transaction<'a> = StorageTransaction<&'a mut Self> where Self: 'a; + + fn transaction(&mut self) -> Self::Transaction<'_> { + self.into_transaction() + } +} diff --git a/crates/fuel-core/src/service/genesis.rs b/crates/fuel-core/src/service/genesis.rs index aeed6509f47..f8dc1db03d2 100644 --- a/crates/fuel-core/src/service/genesis.rs +++ b/crates/fuel-core/src/service/genesis.rs @@ -14,7 +14,6 @@ use fuel_core_chain_config::{ MessageConfig, }; use fuel_core_storage::{ - structured_storage::TableWithBlueprint, tables::{ Coins, ContractsInfo, @@ -23,8 +22,9 @@ use fuel_core_storage::{ Messages, }, transactional::{ + Changes, + ReadTransaction, StorageTransaction, - Transactional, }, StorageAsMut, }; @@ -64,11 +64,13 @@ use fuel_core_types::{ UncommittedResult as UncommittedImportResult, }, }; +use strum::IntoEnumIterator; pub mod off_chain; mod runner; mod workers; +use crate::database::genesis_progress::GenesisResource; pub use runner::{ GenesisRunner, TransactionOpener, @@ -78,7 +80,7 @@ pub use runner::{ pub async fn execute_genesis_block( config: &Config, original_database: &Database, -) -> anyhow::Result>> { +) -> anyhow::Result> { let workers = GenesisWorkers::new(original_database.clone(), config.state_reader.clone()); @@ -98,12 +100,12 @@ pub async fn execute_genesis_block( consensus, }; - let mut database_transaction = Transactional::transaction(original_database); - cleanup_genesis_progress(database_transaction.as_mut())?; + let mut database_transaction = original_database.read_transaction(); + cleanup_genesis_progress(&mut database_transaction)?; let result = UncommittedImportResult::new( ImportResult::new_from_local(block, vec![], vec![]), - database_transaction, + database_transaction.into_changes(), ); Ok(result) @@ -125,10 +127,16 @@ async fn import_chain_state(workers: GenesisWorkers) -> anyhow::Result<()> { Ok(()) } -fn cleanup_genesis_progress(database: &mut Database) -> anyhow::Result<()> { - database - .delete_all(GenesisMetadata::column()) - .map_err(|e| e.into()) +fn cleanup_genesis_progress( + transaction: &mut StorageTransaction<&Database>, +) -> anyhow::Result<()> { + for resource in GenesisResource::iter() { + transaction + .storage_as_mut::() + .remove(&resource) + .map_err(|e| anyhow::anyhow!("{e}"))?; + } + Ok(()) } pub fn create_genesis_block(config: &Config) -> Block { @@ -172,7 +180,7 @@ pub async fn execute_and_commit_genesis_block( } fn init_coin( - db: &mut Database, + transaction: &mut StorageTransaction<&mut Database>, coin: &CoinConfig, height: BlockHeight, ) -> anyhow::Result<()> { @@ -195,7 +203,7 @@ fn init_coin( )); } - if db + if transaction .storage::() .insert(&utxo_id, &compressed_coin)? .is_some() @@ -207,7 +215,7 @@ fn init_coin( } fn init_contract( - db: &mut Database, + transaction: &mut StorageTransaction<&mut Database>, contract_config: &ContractConfig, height: BlockHeight, ) -> anyhow::Result<()> { @@ -225,7 +233,7 @@ fn init_contract( } // insert contract code - if db + if transaction .storage::() .insert(&contract_id, contract.as_ref())? .is_some() @@ -234,14 +242,14 @@ fn init_contract( } // insert contract salt - if db + if transaction .storage::() .insert(&contract_id, &ContractsInfoType::V1(salt.into()))? .is_some() { return Err(anyhow!("Contract info should not exist")); } - if db + if transaction .storage::() .insert( &contract_id, @@ -256,7 +264,7 @@ fn init_contract( } fn init_da_message( - db: &mut Database, + transaction: &mut StorageTransaction<&mut Database>, msg: MessageConfig, da_height: DaBlockHeight, ) -> anyhow::Result<()> { @@ -268,7 +276,7 @@ fn init_da_message( )); } - if db + if transaction .storage::() .insert(message.id(), &message)? .is_some() @@ -285,7 +293,10 @@ mod tests { use crate::{ combined_database::CombinedDatabase, - database::genesis_progress::GenesisResource, + database::genesis_progress::{ + GenesisProgressInspect, + GenesisResource, + }, service::{ config::Config, FuelService, @@ -354,6 +365,7 @@ mod tests { assert_eq!( block_height, db.latest_height() + .unwrap() .expect("Expected a block height to be set") ) } @@ -595,15 +607,16 @@ mod tests { ..Config::local_node() }; - let db = &Database::default(); + let db = Database::default(); - execute_and_commit_genesis_block(&config, db).await.unwrap(); + execute_and_commit_genesis_block(&config, &db) + .await + .unwrap(); let expected_msg: Message = msg.into(); let ret_msg = db - .as_ref() - .storage::() + .storage::() .get(expected_msg.id()) .unwrap() .unwrap() diff --git a/crates/fuel-core/src/service/genesis/off_chain.rs b/crates/fuel-core/src/service/genesis/off_chain.rs index 82bfbc9b9bc..fda95590689 100644 --- a/crates/fuel-core/src/service/genesis/off_chain.rs +++ b/crates/fuel-core/src/service/genesis/off_chain.rs @@ -10,7 +10,7 @@ use fuel_core_chain_config::{ CoinConfig, MessageConfig, }; -use fuel_core_storage::transactional::Transactional; +use fuel_core_storage::transactional::WriteTransaction; use fuel_core_types::{ entities::coins::coin::Coin, services::executor::Event, @@ -18,27 +18,27 @@ use fuel_core_types::{ use std::borrow::Cow; fn process_messages( - db: &Database, + original_database: &mut Database, messages: Vec, ) -> anyhow::Result<()> { - let mut database_transaction = Transactional::transaction(db); + let mut database_transaction = original_database.write_transaction(); let message_events = messages.iter().map(|config| { let message = config.clone().into(); Cow::Owned(Event::MessageImported(message)) }); - worker_service::process_executor_events( - message_events, - database_transaction.as_mut(), - )?; + worker_service::process_executor_events(message_events, &mut database_transaction)?; database_transaction.commit()?; Ok(()) } -fn process_coins(db: &Database, coins: Vec) -> anyhow::Result<()> { - let mut database_transaction = Transactional::transaction(db); +fn process_coins( + original_database: &mut Database, + coins: Vec, +) -> anyhow::Result<()> { + let mut database_transaction = original_database.write_transaction(); let coin_events = coins.iter().map(|config| { let coin = Coin { @@ -51,7 +51,7 @@ fn process_coins(db: &Database, coins: Vec) -> anyhow::Res Cow::Owned(Event::CoinCreated(coin)) }); - worker_service::process_executor_events(coin_events, database_transaction.as_mut())?; + worker_service::process_executor_events(coin_events, &mut database_transaction)?; database_transaction.commit()?; Ok(()) @@ -62,7 +62,7 @@ fn process_coins(db: &Database, coins: Vec) -> anyhow::Res // https://github.com/FuelLabs/fuel-core/issues/1619 pub fn execute_genesis_block( config: &Config, - original_database: &Database, + original_database: &mut Database, ) -> anyhow::Result<()> { for message_group in config.state_reader.messages()? { process_messages(original_database, message_group?.data)?; diff --git a/crates/fuel-core/src/service/genesis/runner.rs b/crates/fuel-core/src/service/genesis/runner.rs index b89c039c450..bba91399ae0 100644 --- a/crates/fuel-core/src/service/genesis/runner.rs +++ b/crates/fuel-core/src/service/genesis/runner.rs @@ -1,24 +1,30 @@ use fuel_core_chain_config::Group; -use fuel_core_storage::transactional::Transaction; +use fuel_core_storage::transactional::{ + StorageTransaction, + WriteTransaction, +}; use std::sync::Arc; use tokio::sync::Notify; use tokio_util::sync::CancellationToken; use crate::database::{ - database_description::on_chain::OnChain, - genesis_progress::GenesisResource, - transaction::DatabaseTransaction, + genesis_progress::{ + GenesisProgressInspect, + GenesisProgressMutate, + GenesisResource, + }, Database, }; pub trait TransactionOpener { - fn transaction(&mut self) -> DatabaseTransaction; + fn transaction(&mut self) -> StorageTransaction<&mut Database>; + fn view_only(&self) -> &Database; } impl TransactionOpener for Database { - fn transaction(&mut self) -> DatabaseTransaction { - Database::transaction(self) + fn transaction(&mut self) -> StorageTransaction<&mut Database> { + self.write_transaction() } fn view_only(&self) -> &Database { @@ -39,10 +45,11 @@ pub trait ProcessState { type Item; fn genesis_resource() -> GenesisResource; + fn process( &mut self, group: Vec, - tx: &mut Database, + tx: &mut StorageTransaction<&mut Database>, ) -> anyhow::Result<()>; } @@ -85,7 +92,7 @@ where let mut tx = self.tx_opener.transaction(); let group = group?; let group_num = group.index; - self.handler.process(group.data, tx.as_mut())?; + self.handler.process(group.data, &mut tx)?; tx.update_genesis_progress(Logic::genesis_resource(), group_num)?; tx.commit()?; Ok(()) @@ -116,17 +123,25 @@ mod tests { use fuel_core_chain_config::Group; use fuel_core_storage::{ column::Column, - iter::IteratorableStore, + iter::{ + BoxedIter, + IterDirection, + IterableStore, + }, kv_store::{ - BatchOperations, - KeyValueStore, + KVItem, + KeyValueInspect, Value, - WriteOperation, }, tables::Coins, - Error, + transactional::{ + Changes, + StorageTransaction, + WriteTransaction, + }, Result as StorageResult, StorageAsMut, + StorageAsRef, StorageInspect, }; use fuel_core_types::{ @@ -135,15 +150,18 @@ mod tests { CompressedCoinV1, }, fuel_tx::UtxoId, + fuel_types::BlockHeight, }; use tokio::sync::Notify; use tokio_util::sync::CancellationToken; use crate::{ database::{ - database_description::on_chain::OnChain, - genesis_progress::GenesisResource, - transaction::DatabaseTransaction, + genesis_progress::{ + GenesisProgressInspect, + GenesisProgressMutate, + GenesisResource, + }, Database, }, service::genesis::runner::{ @@ -152,18 +170,22 @@ mod tests { }, state::{ in_memory::memory_store::MemoryStore, - DataSource, TransactableStorage, }, }; use super::ProcessState; - type TestHandler<'a, K> = - Box anyhow::Result<()> + Send + 'a>; + type TestHandler<'a, K> = Box< + dyn FnMut(K, &mut StorageTransaction<&mut Database>) -> anyhow::Result<()> + + Send + + 'a, + >; fn to_handler<'a, K: 'a>( - closure: impl FnMut(K, &mut Database) -> anyhow::Result<()> + Send + 'a, + closure: impl FnMut(K, &mut StorageTransaction<&mut Database>) -> anyhow::Result<()> + + Send + + 'a, ) -> TestHandler<'a, K> { Box::new(closure) } @@ -178,7 +200,7 @@ mod tests { fn process( &mut self, group: Vec, - tx: &mut Database, + tx: &mut StorageTransaction<&mut Database>, ) -> anyhow::Result<()> { group.into_iter().try_for_each(|item| self(item, tx)) } @@ -206,7 +228,7 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|group: usize, _: &mut Database| { + to_handler(|group: usize, _: _| { called_with_groups.push(group); Ok(()) }), @@ -233,7 +255,7 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|element, _: &mut Database| { + to_handler(|element, _: _| { called_with.push(element); Ok(()) }), @@ -255,22 +277,22 @@ mod tests { let outer_db = Database::default(); let utxo_id = UtxoId::new(Default::default(), 0); - let is_coin_present = - |db: &Database| StorageInspect::::contains_key(&db, &utxo_id).unwrap(); - let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|_, tx: &mut Database| { + to_handler(|_, tx: _| { insert_a_coin(tx, &utxo_id); assert!( - is_coin_present(tx), + tx.storage::().contains_key(&utxo_id).unwrap(), "Coin should be present in the tx db view" ); assert!( - !is_coin_present(&outer_db), + !outer_db + .storage_as_ref::() + .contains_key(&utxo_id) + .unwrap(), "Coin should not be present in the outer db " ); @@ -284,10 +306,13 @@ mod tests { runner.run().unwrap(); // then - assert!(is_coin_present(&outer_db)); + assert!(outer_db + .storage_as_ref::() + .contains_key(&utxo_id) + .unwrap()); } - fn insert_a_coin(tx: &mut Database, utxo_id: &UtxoId) { + fn insert_a_coin(tx: &mut StorageTransaction<&mut Database>, utxo_id: &UtxoId) { let coin: CompressedCoin = CompressedCoinV1::default().into(); tx.storage_as_mut::().insert(utxo_id, &coin).unwrap(); @@ -303,7 +328,7 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|_, tx: &mut Database| { + to_handler(|_, tx: _| { insert_a_coin(tx, &utxo_id); bail!("Some error") }), @@ -325,7 +350,7 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|_, _: &mut Database| bail!("Some error")), + to_handler(|_, _: _| bail!("Some error")), groups, Database::default(), ); @@ -344,7 +369,7 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|_: (), _: &mut Database| Ok(())), + to_handler(|_: (), _: _| Ok(())), groups, Database::default(), ); @@ -364,7 +389,7 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|_, _: &mut Database| Ok(())), + to_handler(|_, _: _| Ok(())), groups, db.clone(), ); @@ -383,10 +408,10 @@ mod tests { counter: usize, } impl TransactionOpener for OnlyOneTransactionAllowed { - fn transaction(&mut self) -> DatabaseTransaction { + fn transaction(&mut self) -> StorageTransaction<&mut Database> { if self.counter == 0 { self.counter += 1; - Database::transaction(&self.db) + self.db.write_transaction() } else { panic!("Only one transaction should be opened") } @@ -409,7 +434,7 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|_, tx: &mut Database| { + to_handler(|_, tx: _| { insert_a_coin(tx, &utxo_id); Ok(()) }), @@ -439,7 +464,7 @@ mod tests { GenesisRunner::new( Some(Arc::clone(&finished_signal)), cancel_token.clone(), - to_handler(move |el, _: &mut Database| { + to_handler(move |el, _: _| { read_groups.lock().unwrap().push(el); Ok(()) }), @@ -498,7 +523,8 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::clone(&finished_signal)), CancellationToken::new(), - Box::new(|_: (), _: &mut Database| Ok(())) as TestHandler<()>, + Box::new(|_: (), _: &mut StorageTransaction<&mut Database>| Ok(())) + as TestHandler<()>, groups, Database::default(), ); @@ -526,56 +552,33 @@ mod tests { } } - impl KeyValueStore for BrokenTransactions { + impl KeyValueInspect for BrokenTransactions { type Column = Column; - fn write(&self, key: &[u8], column: Column, buf: &[u8]) -> StorageResult { - self.store.write(key, column, buf) - } - - fn delete(&self, key: &[u8], column: Column) -> StorageResult<()> { - self.store.delete(key, column) - } fn get(&self, key: &[u8], column: Column) -> StorageResult> { self.store.get(key, column) } } - impl BatchOperations for BrokenTransactions { - fn batch_write( - &self, - _entries: &mut dyn Iterator, Column, WriteOperation)>, - ) -> StorageResult<()> { - Err(Error::Other(anyhow!("I refuse to work!"))) - } - - fn delete_all(&self, _column: Column) -> StorageResult<()> { - Err(Error::Other(anyhow!("I refuse to work!"))) - } - } - - impl IteratorableStore for BrokenTransactions { - fn iter_all( + impl IterableStore for BrokenTransactions { + fn iter_store( &self, - _column: Self::Column, - _prefix: Option<&[u8]>, - _start: Option<&[u8]>, - _direction: fuel_core_storage::iter::IterDirection, - ) -> fuel_core_storage::iter::BoxedIter - { + _: Self::Column, + _: Option<&[u8]>, + _: Option<&[u8]>, + _: IterDirection, + ) -> BoxedIter { unimplemented!() } } - impl From for DataSource { - fn from(inner: BrokenTransactions) -> Self { - DataSource::new(Arc::new(inner)) - } - } - - impl TransactableStorage for BrokenTransactions { - fn flush(&self) -> crate::database::Result<()> { - unimplemented!() + impl TransactableStorage for BrokenTransactions { + fn commit_changes( + &self, + _: Option, + _: Changes, + ) -> StorageResult<()> { + Err(anyhow::anyhow!("I refuse to work!").into()) } } @@ -586,9 +589,9 @@ mod tests { let runner = GenesisRunner::new( Some(Arc::new(Notify::new())), CancellationToken::new(), - to_handler(|_, _: &mut Database| Ok(())), + to_handler(|_, _: _| Ok(())), groups, - Database::new(BrokenTransactions::new()), + Database::new(Arc::new(BrokenTransactions::new())), ); // when let result = runner.run(); diff --git a/crates/fuel-core/src/service/genesis/workers.rs b/crates/fuel-core/src/service/genesis/workers.rs index c38cad2f11f..b8a3f86a1f2 100644 --- a/crates/fuel-core/src/service/genesis/workers.rs +++ b/crates/fuel-core/src/service/genesis/workers.rs @@ -11,7 +11,9 @@ use std::{ }; use crate::database::{ + balances::BalancesInitializer, genesis_progress::GenesisResource, + state::StateInitializer, Database, }; use fuel_core_chain_config::{ @@ -23,6 +25,7 @@ use fuel_core_chain_config::{ MessageConfig, StateReader, }; +use fuel_core_storage::transactional::StorageTransaction; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, fuel_types::BlockHeight, @@ -198,7 +201,7 @@ impl ProcessState for Handler { fn process( &mut self, group: Vec, - tx: &mut Database, + tx: &mut StorageTransaction<&mut Database>, ) -> anyhow::Result<()> { group.into_iter().try_for_each(|coin| { init_coin(tx, &coin, self.block_height)?; @@ -217,7 +220,7 @@ impl ProcessState for Handler { fn process( &mut self, group: Vec, - tx: &mut Database, + tx: &mut StorageTransaction<&mut Database>, ) -> anyhow::Result<()> { group .into_iter() @@ -235,7 +238,7 @@ impl ProcessState for Handler { fn process( &mut self, group: Vec, - tx: &mut Database, + tx: &mut StorageTransaction<&mut Database>, ) -> anyhow::Result<()> { group.into_iter().try_for_each(|contract| { init_contract(tx, &contract, self.block_height)?; @@ -254,7 +257,7 @@ impl ProcessState for Handler { fn process( &mut self, group: Vec, - tx: &mut Database, + tx: &mut StorageTransaction<&mut Database>, ) -> anyhow::Result<()> { tx.update_contract_states(group)?; Ok(()) @@ -271,7 +274,7 @@ impl ProcessState for Handler { fn process( &mut self, group: Vec, - tx: &mut Database, + tx: &mut StorageTransaction<&mut Database>, ) -> anyhow::Result<()> { tx.update_contract_balances(group)?; Ok(()) diff --git a/crates/fuel-core/src/state.rs b/crates/fuel-core/src/state.rs index 34a05bf7abe..a707ba30b27 100644 --- a/crates/fuel-core/src/state.rs +++ b/crates/fuel-core/src/state.rs @@ -1,22 +1,20 @@ -use crate::{ - database::{ - database_description::{ - on_chain::OnChain, - DatabaseDescription, - }, - Result as DatabaseResult, - }, - state::in_memory::{ - memory_store::MemoryStore, - transaction::MemoryTransactionView, - }, -}; +use crate::database::database_description::DatabaseDescription; use fuel_core_storage::{ iter::{ + BoxedIter, + IntoBoxedIter, IterDirection, - IteratorableStore, + IterableStore, }, - kv_store::BatchOperations, + kv_store::{ + KVItem, + KeyValueInspect, + StorageColumn, + Value, + WriteOperation, + }, + transactional::Changes, + Result as StorageResult, }; use std::{ fmt::Debug, @@ -27,70 +25,88 @@ pub mod in_memory; #[cfg(feature = "rocksdb")] pub mod rocks_db; -type DataSourceInner = Arc>; - -#[derive(Clone, Debug)] -pub struct DataSource(DataSourceInner) +#[allow(type_alias_bounds)] +pub type DataSource where - Description: DatabaseDescription; + Description: DatabaseDescription, += Arc>; -impl DataSource { - pub fn new(inner: DataSourceInner<::Column>) -> Self { - Self(inner) - } +pub trait TransactableStorage: IterableStore + Debug + Send + Sync { + /// Commits the changes into the storage. + fn commit_changes( + &self, + height: Option, + changes: Changes, + ) -> StorageResult<()>; } -impl From>> - for DataSource +// It is used only to allow conversion of the `StorageTransaction` into the `DataSource`. +#[cfg(feature = "test-helpers")] +impl TransactableStorage + for fuel_core_storage::transactional::StorageTransaction where - Description: DatabaseDescription, + S: IterableStore + Debug + Send + Sync, { - fn from(inner: Arc>) -> Self { - Self(inner) + fn commit_changes(&self, _: Option, _: Changes) -> StorageResult<()> { + unimplemented!() } } -#[cfg(feature = "rocksdb")] -impl From>> for DataSource -where - Description: DatabaseDescription, -{ - fn from(inner: Arc>) -> Self { - Self(inner) - } +/// A type that allows to iterate over the `Changes`. +pub struct ChangesIterator<'a, Description> { + changes: &'a Changes, + _marker: core::marker::PhantomData, } -impl From>> for DataSource -where - Description: DatabaseDescription, -{ - fn from(inner: Arc>) -> Self { - Self(inner) +impl<'a, Description> ChangesIterator<'a, Description> { + /// Creates a new instance of the `ChangesIterator`. + pub fn new(changes: &'a Changes) -> Self { + Self { + changes, + _marker: Default::default(), + } } } -impl core::ops::Deref for DataSource +impl<'a, Description> KeyValueInspect for ChangesIterator<'a, Description> where Description: DatabaseDescription, { - type Target = DataSourceInner; + type Column = Description::Column; - fn deref(&self) -> &Self::Target { - &self.0 + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + Ok(self + .changes + .get(&column.id()) + .and_then(|tree| tree.get(&key.to_vec())) + .and_then(|operation| match operation { + WriteOperation::Insert(value) => Some(value.clone()), + WriteOperation::Remove => None, + })) } } -impl core::ops::DerefMut for DataSource +impl<'a, Description> IterableStore for ChangesIterator<'a, Description> where Description: DatabaseDescription, { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 + fn iter_store( + &self, + column: Self::Column, + prefix: Option<&[u8]>, + start: Option<&[u8]>, + direction: IterDirection, + ) -> BoxedIter { + if let Some(tree) = self.changes.get(&column.id()) { + fuel_core_storage::iter::iterator(tree, prefix, start, direction) + .filter_map(|(key, value)| match value { + WriteOperation::Insert(value) => Some((key.clone(), value.clone())), + WriteOperation::Remove => None, + }) + .map(Ok) + .into_boxed() + } else { + core::iter::empty().into_boxed() + } } } - -pub trait TransactableStorage: - IteratorableStore + BatchOperations + Debug + Send + Sync -{ - fn flush(&self) -> DatabaseResult<()>; -} diff --git a/crates/fuel-core/src/state/in_memory.rs b/crates/fuel-core/src/state/in_memory.rs index cf705ac02a3..83dfa95baa9 100644 --- a/crates/fuel-core/src/state/in_memory.rs +++ b/crates/fuel-core/src/state/in_memory.rs @@ -1,2 +1,2 @@ +// TODO: Move the implementation from the module to here. pub mod memory_store; -pub mod transaction; diff --git a/crates/fuel-core/src/state/in_memory/memory_store.rs b/crates/fuel-core/src/state/in_memory/memory_store.rs index 48e42e433ec..28f1fca30ce 100644 --- a/crates/fuel-core/src/state/in_memory/memory_store.rs +++ b/crates/fuel-core/src/state/in_memory/memory_store.rs @@ -1,38 +1,34 @@ use crate::{ - database::{ - database_description::{ - on_chain::OnChain, - DatabaseDescription, - }, - Result as DatabaseResult, + database::database_description::{ + on_chain::OnChain, + DatabaseDescription, }, state::{ - BatchOperations, IterDirection, TransactableStorage, }, }; use fuel_core_storage::{ iter::{ + iterator, BoxedIter, IntoBoxedIter, - IteratorableStore, + IterableStore, }, kv_store::{ KVItem, - KeyValueStore, + KeyValueInspect, StorageColumn, Value, + WriteOperation, }, + transactional::Changes, Result as StorageResult, }; use std::{ collections::BTreeMap, fmt::Debug, - sync::{ - Arc, - Mutex, - }, + sync::Mutex, }; #[derive(Debug)] @@ -40,7 +36,6 @@ pub struct MemoryStore where Description: DatabaseDescription, { - // TODO: Remove `Mutex`. inner: Vec, Value>>>, _marker: core::marker::PhantomData, } @@ -77,116 +72,34 @@ where (kv.0.clone(), kv.1.clone()) } - let collection: Vec<_> = match (prefix, start) { - (None, None) => { - if direction == IterDirection::Forward { - lock.iter().map(clone).collect() - } else { - lock.iter().rev().map(clone).collect() - } - } - (Some(prefix), None) => { - if direction == IterDirection::Forward { - lock.range(prefix.to_vec()..) - .take_while(|(key, _)| key.starts_with(prefix)) - .map(clone) - .collect() - } else { - let mut vec: Vec<_> = lock - .range(prefix.to_vec()..) - .into_boxed() - .take_while(|(key, _)| key.starts_with(prefix)) - .map(clone) - .collect(); - - vec.reverse(); - vec - } - } - (None, Some(start)) => { - if direction == IterDirection::Forward { - lock.range(start.to_vec()..).map(clone).collect() - } else { - lock.range(..=start.to_vec()).rev().map(clone).collect() - } - } - (Some(prefix), Some(start)) => { - if direction == IterDirection::Forward { - lock.range(start.to_vec()..) - .take_while(|(key, _)| key.starts_with(prefix)) - .map(clone) - .collect() - } else { - lock.range(..=start.to_vec()) - .rev() - .take_while(|(key, _)| key.starts_with(prefix)) - .map(clone) - .collect() - } - } - }; + let collection: Vec<_> = iterator(&lock, prefix, start, direction) + .map(clone) + .collect(); collection.into_iter().map(Ok) } } -impl KeyValueStore for MemoryStore +impl KeyValueInspect for MemoryStore where Description: DatabaseDescription, { type Column = Description::Column; - fn replace( - &self, - key: &[u8], - column: Self::Column, - value: Value, - ) -> StorageResult> { - Ok(self.inner[column.as_usize()] - .lock() - .expect("poisoned") - .insert(key.to_vec(), value)) - } - - fn write( - &self, - key: &[u8], - column: Self::Column, - buf: &[u8], - ) -> StorageResult { - let len = buf.len(); - self.inner[column.as_usize()] - .lock() - .expect("poisoned") - .insert(key.to_vec(), Arc::new(buf.to_vec())); - Ok(len) - } - - fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { - Ok(self.inner[column.as_usize()] - .lock() - .expect("poisoned") - .remove(&key.to_vec())) - } - - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { - self.take(key, column).map(|_| ()) - } - fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { Ok(self.inner[column.as_usize()] .lock() - .expect("poisoned") + .map_err(|e| anyhow::anyhow!("The lock is poisoned: {}", e))? .get(&key.to_vec()) .cloned()) } } -impl IteratorableStore for MemoryStore +impl IterableStore for MemoryStore where Description: DatabaseDescription, { - fn iter_all( + fn iter_store( &self, column: Self::Column, prefix: Option<&[u8]>, @@ -197,27 +110,30 @@ where } } -impl BatchOperations for MemoryStore +impl TransactableStorage for MemoryStore where Description: DatabaseDescription, { - fn delete_all(&self, column: Self::Column) -> StorageResult<()> { - self.inner[column.as_usize()] - .lock() - .expect("poisoned") - .clear(); - - Ok(()) - } -} - -impl TransactableStorage for MemoryStore -where - Description: DatabaseDescription, -{ - fn flush(&self) -> DatabaseResult<()> { - for lock in self.inner.iter() { - lock.lock().expect("poisoned").clear(); + fn commit_changes( + &self, + _: Option, + changes: Changes, + ) -> StorageResult<()> { + for (column, btree) in changes.into_iter() { + let mut lock = self.inner[column as usize] + .lock() + .map_err(|e| anyhow::anyhow!("The lock is poisoned: {}", e))?; + + for (key, operation) in btree.into_iter() { + match operation { + WriteOperation::Insert(value) => { + lock.insert(key, value); + } + WriteOperation::Remove => { + lock.remove(&key); + } + } + } } Ok(()) } @@ -226,14 +142,44 @@ where #[cfg(test)] mod tests { use super::*; - use fuel_core_storage::column::Column; + use fuel_core_storage::{ + column::Column, + kv_store::KeyValueMutate, + transactional::ReadTransaction, + }; use std::sync::Arc; + impl KeyValueMutate for MemoryStore + where + Description: DatabaseDescription, + { + fn write( + &mut self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + let mut transaction = self.read_transaction(); + let len = transaction.write(key, column, buf)?; + let changes = transaction.into_changes(); + self.commit_changes(Default::default(), changes)?; + Ok(len) + } + + fn delete(&mut self, key: &[u8], column: Self::Column) -> StorageResult<()> { + let mut transaction = self.read_transaction(); + transaction.delete(key, column)?; + let changes = transaction.into_changes(); + self.commit_changes(Default::default(), changes)?; + Ok(()) + } + } + #[test] fn can_use_unit_value() { let key = vec![0x00]; - let db = MemoryStore::::default(); + let mut db = MemoryStore::::default(); let expected = Arc::new(vec![]); db.put(&key.to_vec(), Column::Metadata, expected.clone()) .unwrap(); @@ -258,7 +204,7 @@ mod tests { fn can_use_unit_key() { let key: Vec = Vec::with_capacity(0); - let db = MemoryStore::::default(); + let mut db = MemoryStore::::default(); let expected = Arc::new(vec![1, 2, 3]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); @@ -282,7 +228,7 @@ mod tests { fn can_use_unit_key_and_value() { let key: Vec = Vec::with_capacity(0); - let db = MemoryStore::::default(); + let mut db = MemoryStore::::default(); let expected = Arc::new(vec![]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); diff --git a/crates/fuel-core/src/state/in_memory/transaction.rs b/crates/fuel-core/src/state/in_memory/transaction.rs deleted file mode 100644 index 56be0b36bfd..00000000000 --- a/crates/fuel-core/src/state/in_memory/transaction.rs +++ /dev/null @@ -1,725 +0,0 @@ -use crate::{ - database::{ - database_description::{ - on_chain::OnChain, - DatabaseDescription, - }, - Result as DatabaseResult, - }, - state::{ - in_memory::memory_store::MemoryStore, - BatchOperations, - DataSource, - IterDirection, - TransactableStorage, - }, -}; -use fuel_core_storage::{ - iter::{ - BoxedIter, - IntoBoxedIter, - IteratorableStore, - }, - kv_store::{ - KVItem, - KeyValueStore, - StorageColumn, - Value, - WriteOperation, - }, - Result as StorageResult, -}; -use itertools::{ - EitherOrBoth, - Itertools, -}; -use std::{ - cmp::Ordering, - collections::HashMap, - fmt::Debug, - ops::DerefMut, - sync::{ - Arc, - Mutex, - }, -}; - -#[derive(Debug)] -pub struct MemoryTransactionView -where - Description: DatabaseDescription, -{ - view_layer: MemoryStore, - // TODO: Remove `Mutex`. - // use hashmap to collapse changes (e.g. insert then remove the same key) - changes: Vec, WriteOperation>>>, - data_source: DataSource, -} - -impl MemoryTransactionView -where - Description: DatabaseDescription, -{ - pub fn new(source: D) -> Self - where - D: Into>, - { - use strum::EnumCount; - Self { - view_layer: MemoryStore::default(), - changes: (0..Description::Column::COUNT) - .map(|_| Mutex::new(HashMap::new())) - .collect(), - data_source: source.into(), - } - } - - pub fn commit(&self) -> StorageResult<()> { - let mut iter = self - .changes - .iter() - .zip(enum_iterator::all::()) - .flat_map(|(column_map, column)| { - let mut map = column_map.lock().expect("poisoned lock"); - let changes = core::mem::take(map.deref_mut()); - - changes.into_iter().map(move |t| (t.0, column, t.1)) - }); - - self.data_source.batch_write(&mut iter) - } -} - -impl KeyValueStore for MemoryTransactionView -where - Description: DatabaseDescription, -{ - type Column = Description::Column; - - fn replace( - &self, - key: &[u8], - column: Self::Column, - value: Value, - ) -> StorageResult> { - let key_vec = key.to_vec(); - let contained_key = self.changes[column.as_usize()] - .lock() - .expect("poisoned lock") - .insert(key_vec, WriteOperation::Insert(value.clone())) - .is_some(); - let res = self.view_layer.replace(key, column, value); - if contained_key { - res - } else { - self.data_source.get(key, column) - } - } - - fn write( - &self, - key: &[u8], - column: Self::Column, - buf: &[u8], - ) -> StorageResult { - let k = key.to_vec(); - self.changes[column.as_usize()] - .lock() - .expect("poisoned lock") - .insert(k, WriteOperation::Insert(Arc::new(buf.to_vec()))); - self.view_layer.write(key, column, buf) - } - - fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { - let k = key.to_vec(); - let contained_key = { - let mut lock = self.changes[column.as_usize()] - .lock() - .expect("poisoned lock"); - lock.insert(k, WriteOperation::Remove).is_some() - }; - let res = self.view_layer.take(key, column); - if contained_key { - res - } else { - self.data_source.get(key, column) - } - } - - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { - let k = key.to_vec(); - self.changes[column.as_usize()] - .lock() - .expect("poisoned lock") - .insert(k, WriteOperation::Remove); - self.view_layer.delete(key, column) - } - - fn size_of_value( - &self, - key: &[u8], - column: Self::Column, - ) -> StorageResult> { - // try to fetch data from View layer if any changes to the key - if self.changes[column.as_usize()] - .lock() - .expect("poisoned lock") - .contains_key(&key.to_vec()) - { - self.view_layer.size_of_value(key, column) - } else { - // fall-through to original data source - // Note: The getting size from original database may be more performant than from `get` - self.data_source.size_of_value(key, column) - } - } - - fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { - // try to fetch data from View layer if any changes to the key - if self.changes[column.as_usize()] - .lock() - .expect("poisoned lock") - .contains_key(&key.to_vec()) - { - self.view_layer.get(key, column) - } else { - // fall-through to original data source - self.data_source.get(key, column) - } - } - - fn read( - &self, - key: &[u8], - column: Self::Column, - buf: &mut [u8], - ) -> StorageResult> { - // try to fetch data from View layer if any changes to the key - if self.changes[column.as_usize()] - .lock() - .expect("poisoned lock") - .contains_key(&key.to_vec()) - { - self.view_layer.read(key, column, buf) - } else { - // fall-through to original data source - // Note: The read from original database may be more performant than from `get` - self.data_source.read(key, column, buf) - } - } -} - -impl IteratorableStore for MemoryTransactionView -where - Description: DatabaseDescription, -{ - fn iter_all( - &self, - column: Self::Column, - prefix: Option<&[u8]>, - start: Option<&[u8]>, - direction: IterDirection, - ) -> BoxedIter { - // iterate over inmemory + db while also filtering deleted entries - self.view_layer - // iter_all returns items in sorted order - .iter_all(column, prefix, start, direction) - // Merge two sorted iterators (our current view overlay + backing data source) - .merge_join_by( - self.data_source.iter_all(column, prefix, start, direction), - move |i, j| { - if let (Ok(i), Ok(j)) = (i, j) { - if IterDirection::Forward == direction { - i.0.cmp(&j.0) - } else { - j.0.cmp(&i.0) - } - } else { - // prioritize errors from db result first - if j.is_err() { - Ordering::Greater - } else { - Ordering::Less - } - } - }, - ) - .map(|either_both| { - match either_both { - // in the case of overlap, choose the left-side (our view overlay) - EitherOrBoth::Both(v, _) - | EitherOrBoth::Left(v) - | EitherOrBoth::Right(v) => v, - } - }) - // filter entries which have been deleted over the course of this transaction - .filter(move |item| { - if let Ok((key, _)) = item { - !matches!( - self.changes[column.as_usize()] - .lock() - .expect("poisoned") - .get(key), - Some(WriteOperation::Remove) - ) - } else { - // ensure errors are propagated - true - } - }).into_boxed() - } -} - -impl BatchOperations for MemoryTransactionView -where - Description: DatabaseDescription, -{ - fn delete_all(&self, column: Self::Column) -> StorageResult<()> { - self.view_layer.delete_all(column)?; - - let ops: Vec<_> = self - .data_source - .iter_all(column, None, None, IterDirection::Forward) - .map_ok(|(key, _)| (key, WriteOperation::Remove)) - .try_collect()?; - - self.changes[column.as_usize()] - .lock() - .expect("poisoned lock") - .extend(ops); - - Ok(()) - } -} - -impl TransactableStorage for MemoryTransactionView -where - Description: DatabaseDescription, -{ - fn flush(&self) -> DatabaseResult<()> { - for lock in self.changes.iter() { - lock.lock().expect("poisoned lock").clear(); - } - self.view_layer.flush()?; - self.data_source.flush() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use fuel_core_storage::column::Column; - use std::sync::Arc; - - type MemoryTransactionView = super::MemoryTransactionView; - - #[test] - fn get_returns_from_view() { - // setup - let store = Arc::new(MemoryStore::default()); - let view = MemoryTransactionView::new(store); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - view.put(&key, Column::Metadata, expected.clone()).unwrap(); - // test - let ret = view.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret, Some(expected)) - } - - #[test] - fn get_returns_from_data_store_when_key_not_in_view() { - // setup - let store = Arc::new(MemoryStore::default()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - store.put(&key, Column::Metadata, expected.clone()).unwrap(); - let view = MemoryTransactionView::new(store); - // test - let ret = view.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret, Some(expected)) - } - - #[test] - fn get_does_not_fetch_from_datastore_if_intentionally_deleted_from_view() { - // setup - let store = Arc::new(MemoryStore::default()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - store.put(&key, Column::Metadata, expected.clone()).unwrap(); - let view = MemoryTransactionView::new(store.clone()); - view.delete(&key, Column::Metadata).unwrap(); - // test - let ret = view.get(&key, Column::Metadata).unwrap(); - let original = store.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret, None); - // also ensure the original value is still intact and we aren't just passing - // through None from the data store - assert_eq!(original, Some(expected)) - } - - #[test] - fn can_insert_value_into_view() { - // setup - let store = Arc::new(MemoryStore::default()); - let view = MemoryTransactionView::new(store); - let expected = Arc::new(vec![1, 2, 3]); - view.put(&[0xA, 0xB, 0xC], Column::Metadata, expected.clone()) - .unwrap(); - // test - let ret = view - .replace(&[0xA, 0xB, 0xC], Column::Metadata, Arc::new(vec![2, 4, 6])) - .unwrap(); - // verify - assert_eq!(ret, Some(expected)) - } - - #[test] - fn delete_value_from_view_returns_value() { - // setup - let store = Arc::new(MemoryStore::default()); - let view = MemoryTransactionView::new(store); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - view.put(&key, Column::Metadata, expected.clone()).unwrap(); - // test - let ret = view.take(&key, Column::Metadata).unwrap(); - let get = view.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret, Some(expected)); - assert_eq!(get, None) - } - - #[test] - fn delete_returns_datastore_value_when_not_in_view() { - // setup - let store = Arc::new(MemoryStore::default()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - store.put(&key, Column::Metadata, expected.clone()).unwrap(); - let view = MemoryTransactionView::new(store); - // test - let ret = view.take(&key, Column::Metadata).unwrap(); - let get = view.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret, Some(expected)); - assert_eq!(get, None) - } - - #[test] - fn delete_does_not_return_datastore_value_when_deleted_twice() { - // setup - let store = Arc::new(MemoryStore::default()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - store.put(&key, Column::Metadata, expected.clone()).unwrap(); - let view = MemoryTransactionView::new(store); - // test - let ret1 = view.take(&key, Column::Metadata).unwrap(); - let ret2 = view.take(&key, Column::Metadata).unwrap(); - let get = view.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret1, Some(expected)); - assert_eq!(ret2, None); - assert_eq!(get, None) - } - - #[test] - fn exists_checks_view_values() { - // setup - let store = Arc::new(MemoryStore::default()); - let view = MemoryTransactionView::new(store); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - view.put(&key, Column::Metadata, expected).unwrap(); - // test - let ret = view.exists(&key, Column::Metadata).unwrap(); - // verify - assert!(ret) - } - - #[test] - fn exists_checks_data_store_when_not_in_view() { - // setup - let store = Arc::new(MemoryStore::default()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - store.put(&key, Column::Metadata, expected).unwrap(); - let view = MemoryTransactionView::new(store); - // test - let ret = view.exists(&key, Column::Metadata).unwrap(); - // verify - assert!(ret) - } - - #[test] - fn exists_does_not_check_data_store_after_intentional_removal_from_view() { - // setup - let store = Arc::new(MemoryStore::default()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - store.put(&key, Column::Metadata, expected).unwrap(); - let view = MemoryTransactionView::new(store.clone()); - view.delete(&key, Column::Metadata).unwrap(); - // test - let ret = view.exists(&key, Column::Metadata).unwrap(); - let original = store.exists(&key, Column::Metadata).unwrap(); - // verify - assert!(!ret); - // also ensure the original value is still intact and we aren't just passing - // through None from the data store - assert!(original) - } - - #[test] - fn commit_applies_puts() { - // setup - let store = Arc::new(MemoryStore::default()); - let view = MemoryTransactionView::new(store.clone()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - view.put(&key, Column::Metadata, expected.clone()).unwrap(); - // test - view.commit().unwrap(); - let ret = store.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret, Some(expected)) - } - - #[test] - fn commit_applies_deletes() { - // setup - let store = Arc::new(MemoryStore::default()); - let key = vec![0xA, 0xB, 0xC]; - let expected = Arc::new(vec![1, 2, 3]); - store.put(&key, Column::Metadata, expected).unwrap(); - let view = MemoryTransactionView::new(store.clone()); - // test - view.delete(&key, Column::Metadata).unwrap(); - view.commit().unwrap(); - let ret = store.get(&key, Column::Metadata).unwrap(); - // verify - assert_eq!(ret, None) - } - - #[test] - fn iter_all_is_sorted_across_source_and_view() { - // setup - let store = Arc::new(MemoryStore::default()); - (0..10).step_by(2).for_each(|i| { - store - .put(&[i], Column::Metadata, Arc::new(vec![1])) - .unwrap(); - }); - - let view = MemoryTransactionView::new(store); - // test - (0..10).step_by(3).for_each(|i| { - view.put(&[i], Column::Metadata, Arc::new(vec![2])).unwrap(); - }); - - let ret: Vec<_> = view - .iter_all(Column::Metadata, None, None, IterDirection::Forward) - .map_ok(|(k, _)| k[0]) - .try_collect() - .unwrap(); - // verify - assert_eq!(ret, vec![0, 2, 3, 4, 6, 8, 9]) - } - - #[test] - fn iter_all_is_reversible() { - // setup - let store = Arc::new(MemoryStore::default()); - (0..10).step_by(2).for_each(|i| { - store - .put(&[i], Column::Metadata, Arc::new(vec![1])) - .unwrap(); - }); - - let view = MemoryTransactionView::new(store); - // test - (0..10).step_by(3).for_each(|i| { - view.put(&[i], Column::Metadata, Arc::new(vec![2])).unwrap(); - }); - - let ret: Vec<_> = view - .iter_all(Column::Metadata, None, None, IterDirection::Reverse) - .map_ok(|(k, _)| k[0]) - .try_collect() - .unwrap(); - // verify - assert_eq!(ret, vec![9, 8, 6, 4, 3, 2, 0]) - } - - #[test] - fn iter_all_overrides_data_source_keys() { - // setup - let store = Arc::new(MemoryStore::default()); - (0..10).step_by(2).for_each(|i| { - store - .put(&[i], Column::Metadata, Arc::new(vec![0xA])) - .unwrap(); - }); - - let view = MemoryTransactionView::new(store); - // test - (0..10).step_by(2).for_each(|i| { - view.put(&[i], Column::Metadata, Arc::new(vec![0xB])) - .unwrap(); - }); - - let ret: Vec<_> = view - .iter_all(Column::Metadata, None, None, IterDirection::Forward) - // return all the values from the iterator - .map_ok(|(_, v)| v[0]) - .try_collect() - .unwrap(); - // verify - assert_eq!(ret, vec![0xB, 0xB, 0xB, 0xB, 0xB]) - } - - #[test] - fn iter_all_hides_deleted_data_source_keys() { - // setup - let store = Arc::new(MemoryStore::default()); - (0..10).step_by(2).for_each(|i| { - store - .put(&[i], Column::Metadata, Arc::new(vec![0xA])) - .unwrap(); - }); - - let view = MemoryTransactionView::new(store); - // test - view.delete(&[0], Column::Metadata).unwrap(); - view.delete(&[6], Column::Metadata).unwrap(); - - let ret: Vec<_> = view - .iter_all(Column::Metadata, None, None, IterDirection::Forward) - // return all the values from the iterator - .map_ok(|(k, _)| k[0]) - .try_collect() - .unwrap(); - // verify - assert_eq!(ret, vec![2, 4, 8]) - } - - #[test] - fn can_use_unit_value() { - let key = vec![0x00]; - - let store = Arc::new(MemoryStore::default()); - let db = MemoryTransactionView::new(store.clone()); - let expected = Arc::new(vec![]); - db.put(&key, Column::Metadata, expected.clone()).unwrap(); - - assert_eq!(db.get(&key, Column::Metadata).unwrap().unwrap(), expected); - - assert!(db.exists(&key, Column::Metadata).unwrap()); - - assert_eq!( - db.iter_all(Column::Metadata, None, None, IterDirection::Forward) - .collect::, _>>() - .unwrap(), - vec![(key.clone(), expected.clone())] - ); - - assert_eq!(db.take(&key, Column::Metadata).unwrap().unwrap(), expected); - - assert!(!db.exists(&key, Column::Metadata).unwrap()); - - db.commit().unwrap(); - - assert!(!store.exists(&key, Column::Metadata).unwrap()); - - let store = Arc::new(MemoryStore::default()); - let db = MemoryTransactionView::new(store.clone()); - db.put(&key, Column::Metadata, expected.clone()).unwrap(); - db.commit().unwrap(); - - assert_eq!( - store.get(&key, Column::Metadata).unwrap().unwrap(), - expected - ); - } - - #[test] - fn can_use_unit_key() { - let key: Vec = Vec::with_capacity(0); - - let store = Arc::new(MemoryStore::default()); - let db = MemoryTransactionView::new(store.clone()); - let expected = Arc::new(vec![]); - db.put(&key, Column::Metadata, expected.clone()).unwrap(); - - assert_eq!(db.get(&key, Column::Metadata).unwrap().unwrap(), expected); - - assert!(db.exists(&key, Column::Metadata).unwrap()); - - assert_eq!( - db.iter_all(Column::Metadata, None, None, IterDirection::Forward) - .collect::, _>>() - .unwrap(), - vec![(key.clone(), expected.clone())] - ); - - assert_eq!(db.take(&key, Column::Metadata).unwrap().unwrap(), expected); - - assert!(!db.exists(&key, Column::Metadata).unwrap()); - - db.commit().unwrap(); - - assert!(!store.exists(&key, Column::Metadata).unwrap()); - - let store = Arc::new(MemoryStore::default()); - let db = MemoryTransactionView::new(store.clone()); - db.put(&key, Column::Metadata, expected.clone()).unwrap(); - db.commit().unwrap(); - - assert_eq!( - store.get(&key, Column::Metadata).unwrap().unwrap(), - expected - ); - } - - #[test] - fn can_use_unit_key_and_value() { - let key: Vec = Vec::with_capacity(0); - - let store = Arc::new(MemoryStore::default()); - let db = MemoryTransactionView::new(store.clone()); - let expected = Arc::new(vec![]); - db.put(&key, Column::Metadata, expected.clone()).unwrap(); - - assert_eq!(db.get(&key, Column::Metadata).unwrap().unwrap(), expected); - - assert!(db.exists(&key, Column::Metadata).unwrap()); - - assert_eq!( - db.iter_all(Column::Metadata, None, None, IterDirection::Forward) - .collect::, _>>() - .unwrap(), - vec![(key.clone(), expected.clone())] - ); - - assert_eq!(db.take(&key, Column::Metadata).unwrap().unwrap(), expected); - - assert!(!db.exists(&key, Column::Metadata).unwrap()); - - db.commit().unwrap(); - - assert!(!store.exists(&key, Column::Metadata).unwrap()); - - let store = Arc::new(MemoryStore::default()); - let db = MemoryTransactionView::new(store.clone()); - db.put(&key, Column::Metadata, expected.clone()).unwrap(); - db.commit().unwrap(); - - assert_eq!( - store.get(&key, Column::Metadata).unwrap().unwrap(), - expected - ); - } -} diff --git a/crates/fuel-core/src/state/rocks_db.rs b/crates/fuel-core/src/state/rocks_db.rs index 9155b0b03c4..6d7dcc8e136 100644 --- a/crates/fuel-core/src/state/rocks_db.rs +++ b/crates/fuel-core/src/state/rocks_db.rs @@ -6,7 +6,6 @@ use crate::{ Result as DatabaseResult, }, state::{ - BatchOperations, IterDirection, TransactableStorage, }, @@ -16,16 +15,16 @@ use fuel_core_storage::{ iter::{ BoxedIter, IntoBoxedIter, - IteratorableStore, + IterableStore, }, kv_store::{ KVItem, - KeyValueStore, + KeyValueInspect, StorageColumn, Value, WriteOperation, }, - Error as StorageError, + transactional::Changes, Result as StorageResult, }; use rand::RngCore; @@ -46,7 +45,11 @@ use rocksdb::{ use std::{ cmp, env, - fmt::Debug, + fmt, + fmt::{ + Debug, + Formatter, + }, iter, path::{ Path, @@ -54,6 +57,7 @@ use std::{ }, sync::Arc, }; +use tempfile::TempDir; type DB = DBWithThreadMode; @@ -92,9 +96,40 @@ impl Drop for ShallowTempDir { } } +type DropFn = Box; +#[derive(Default)] +struct DropResources { + // move resources into this closure to have them dropped when db drops + drop: Option, +} + +impl fmt::Debug for DropResources { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "DropResources") + } +} + +impl From for DropResources { + fn from(closure: F) -> Self { + Self { + drop: Option::Some(Box::new(closure)), + } + } +} + +impl Drop for DropResources { + fn drop(&mut self) { + if let Some(drop) = self.drop.take() { + (drop)() + } + } +} + #[derive(Debug)] pub struct RocksDb { db: DB, + // used for RAII + _drop: DropResources, _marker: core::marker::PhantomData, } @@ -102,6 +137,27 @@ impl RocksDb where Description: DatabaseDescription, { + pub fn default_open_temp(capacity: Option) -> DatabaseResult { + let tmp_dir = TempDir::new().unwrap(); + let path = tmp_dir.path(); + let result = Self::open( + path, + enum_iterator::all::().collect::>(), + capacity, + ); + let mut db = result?; + + db._drop = { + move || { + // cleanup temp dir + drop(tmp_dir); + } + } + .into(); + + Ok(db) + } + pub fn default_open>( path: P, capacity: Option, @@ -114,6 +170,7 @@ where } pub fn prune(path: &Path) -> DatabaseResult<()> { + let path = path.join(Description::name()); DB::destroy(&Options::default(), path) .map_err(|e| DatabaseError::Other(e.into()))?; Ok(()) @@ -146,7 +203,10 @@ where block_opts.set_bloom_filter(10.0, true); let cf_descriptors = columns.clone().into_iter().map(|i| { - ColumnFamilyDescriptor::new(Self::col_name(i), Self::cf_opts(i, &block_opts)) + ColumnFamilyDescriptor::new( + Self::col_name(i.id()), + Self::cf_opts(i, &block_opts), + ) }); let mut opts = Options::default(); @@ -174,7 +234,7 @@ where Ok(db) => { for i in columns { let opts = Self::cf_opts(i, &block_opts); - db.create_cf(Self::col_name(i), &opts) + db.create_cf(Self::col_name(i.id()), &opts) .map_err(|e| DatabaseError::Other(e.into()))?; } Ok(db) @@ -186,7 +246,7 @@ where let cf_descriptors = columns.clone().into_iter().map(|i| { ColumnFamilyDescriptor::new( - Self::col_name(i), + Self::col_name(i.id()), Self::cf_opts(i, &block_opts), ) }); @@ -199,19 +259,24 @@ where .map_err(|e| DatabaseError::Other(e.into()))?; let rocks_db = RocksDb { db, + _drop: Default::default(), _marker: Default::default(), }; Ok(rocks_db) } fn cf(&self, column: Description::Column) -> Arc { + self.cf_u32(column.id()) + } + + fn cf_u32(&self, column: u32) -> Arc { self.db .cf_handle(&Self::col_name(column)) .expect("invalid column state") } - fn col_name(column: Description::Column) -> String { - format!("col-{}", column.as_usize()) + fn col_name(column: u32) -> String { + format!("col-{}", column) } fn cf_opts(column: Description::Column, block_opts: &BlockBasedOptions) -> Options { @@ -242,7 +307,7 @@ where ) -> impl Iterator + '_ { let maybe_next_item = next_prefix(prefix.to_vec()) .and_then(|next_prefix| { - self.iter_all( + self.iter_store( column, Some(next_prefix.as_slice()), None, @@ -310,35 +375,12 @@ where } } -impl KeyValueStore for RocksDb +impl KeyValueInspect for RocksDb where Description: DatabaseDescription, { type Column = Description::Column; - fn write( - &self, - key: &[u8], - column: Self::Column, - buf: &[u8], - ) -> StorageResult { - let r = buf.len(); - self.db - .put_cf(&self.cf(column), key, buf) - .map_err(|e| DatabaseError::Other(e.into()))?; - - database_metrics().write_meter.inc(); - database_metrics().bytes_written.observe(r as f64); - - Ok(r) - } - - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { - self.db - .delete_cf(&self.cf(column), key) - .map_err(|e| DatabaseError::Other(e.into()).into()) - } - fn size_of_value( &self, key: &[u8], @@ -396,11 +438,11 @@ where } } -impl IteratorableStore for RocksDb +impl IterableStore for RocksDb where Description: DatabaseDescription, { - fn iter_all( + fn iter_store( &self, column: Self::Column, prefix: Option<&[u8]>, @@ -467,23 +509,27 @@ where } } -impl BatchOperations for RocksDb +impl TransactableStorage for RocksDb where Description: DatabaseDescription, { - fn batch_write( + fn commit_changes( &self, - entries: &mut dyn Iterator, Self::Column, WriteOperation)>, + _: Option, + changes: Changes, ) -> StorageResult<()> { let mut batch = WriteBatch::default(); - for (key, column, op) in entries { - match op { - WriteOperation::Insert(value) => { - batch.put_cf(&self.cf(column), key, value.as_ref()); - } - WriteOperation::Remove => { - batch.delete_cf(&self.cf(column), key); + for (column, ops) in changes { + let cf = self.cf_u32(column); + for (key, op) in ops { + match op { + WriteOperation::Insert(value) => { + batch.put_cf(&cf, key, value.as_ref()); + } + WriteOperation::Remove => { + batch.delete_cf(&cf, key); + } } } } @@ -497,51 +543,6 @@ where .write(batch) .map_err(|e| DatabaseError::Other(e.into()).into()) } - - // use delete_range to delete all keys in a column - fn delete_all(&self, column: Self::Column) -> StorageResult<()> { - let mut batch = WriteBatch::default(); - let first = self - .iter_all(column, None, None, IterDirection::Forward) - .next() - .transpose()? - .map(|(key, _)| key) - .unwrap_or_default(); - let last = self - .iter_all(column, None, None, IterDirection::Reverse) - .next() - .transpose()? - .map(|(key, _)| key) - .unwrap_or_default(); - batch.delete_range_cf(&self.cf(column), first, last.clone()); - - // delete_range doesn't delete the last key, so we need to delete it separately - batch.delete_cf(&self.cf(column), last); - - database_metrics().write_meter.inc(); - database_metrics() - .bytes_written - .observe(batch.size_in_bytes() as f64); - - self.db - .write(batch) - .map_err(|e| StorageError::Other(e.into())) - } -} - -impl TransactableStorage for RocksDb -where - Description: DatabaseDescription, -{ - fn flush(&self) -> DatabaseResult<()> { - self.db - .flush_wal(true) - .map_err(|e| anyhow::anyhow!("Unable to flush WAL file: {}", e))?; - self.db - .flush() - .map_err(|e| anyhow::anyhow!("Unable to flush SST files: {}", e))?; - Ok(()) - } } /// The `None` means overflow, so there is not following prefix. @@ -559,9 +560,44 @@ fn next_prefix(mut prefix: Vec) -> Option> { mod tests { use super::*; use crate::database::database_description::on_chain::OnChain; - use fuel_core_storage::column::Column; + use fuel_core_storage::{ + column::Column, + kv_store::KeyValueMutate, + transactional::ReadTransaction, + }; + use std::collections::{ + BTreeMap, + HashMap, + }; use tempfile::TempDir; + impl KeyValueMutate for RocksDb + where + Description: DatabaseDescription, + { + fn write( + &mut self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + let mut transaction = self.read_transaction(); + let len = transaction.write(key, column, buf)?; + let changes = transaction.into_changes(); + self.commit_changes(Default::default(), changes)?; + + Ok(len) + } + + fn delete(&mut self, key: &[u8], column: Self::Column) -> StorageResult<()> { + let mut transaction = self.read_transaction(); + transaction.delete(key, column)?; + let changes = transaction.into_changes(); + self.commit_changes(Default::default(), changes)?; + Ok(()) + } + } + fn create_db() -> (RocksDb, TempDir) { let tmp_dir = TempDir::new().unwrap(); ( @@ -574,7 +610,7 @@ mod tests { fn can_put_and_read() { let key = vec![0xA, 0xB, 0xC]; - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); let expected = Arc::new(vec![1, 2, 3]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); @@ -585,7 +621,7 @@ mod tests { fn put_returns_previous_value() { let key = vec![0xA, 0xB, 0xC]; - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); let expected = Arc::new(vec![1, 2, 3]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); let prev = db @@ -599,7 +635,7 @@ mod tests { fn delete_and_get() { let key = vec![0xA, 0xB, 0xC]; - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); let expected = Arc::new(vec![1, 2, 3]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); assert_eq!(db.get(&key, Column::Metadata).unwrap().unwrap(), expected); @@ -612,70 +648,54 @@ mod tests { fn key_exists() { let key = vec![0xA, 0xB, 0xC]; - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); let expected = Arc::new(vec![1, 2, 3]); db.put(&key, Column::Metadata, expected).unwrap(); assert!(db.exists(&key, Column::Metadata).unwrap()); } #[test] - fn batch_write_inserts() { + fn commit_changes_inserts() { let key = vec![0xA, 0xB, 0xC]; let value = Arc::new(vec![1, 2, 3]); let (db, _tmp) = create_db(); let ops = vec![( - key.clone(), - Column::Metadata, - WriteOperation::Insert(value.clone()), + Column::Metadata.id(), + BTreeMap::from_iter(vec![( + key.clone(), + WriteOperation::Insert(value.clone()), + )]), )]; - db.batch_write(&mut ops.into_iter()).unwrap(); + db.commit_changes(Default::default(), HashMap::from_iter(ops)) + .unwrap(); assert_eq!(db.get(&key, Column::Metadata).unwrap().unwrap(), value) } #[test] - fn batch_write_removes() { + fn commit_changes_removes() { let key = vec![0xA, 0xB, 0xC]; let value = Arc::new(vec![1, 2, 3]); - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); db.put(&key, Column::Metadata, value).unwrap(); - let ops = vec![(key.clone(), Column::Metadata, WriteOperation::Remove)]; - db.batch_write(&mut ops.into_iter()).unwrap(); + let ops = vec![( + Column::Metadata.id(), + BTreeMap::from_iter(vec![(key.clone(), WriteOperation::Remove)]), + )]; + db.commit_changes(Default::default(), HashMap::from_iter(ops)) + .unwrap(); assert_eq!(db.get(&key, Column::Metadata).unwrap(), None); } - #[test] - fn delete_all_with_different_key_lengths() { - let (db, _tmp) = create_db(); - - let keys = vec![ - vec![], // unit key - vec![0xA], - vec![0xB, 0xC], - vec![0xD, 0xE, 0xF], - ]; - let value = Arc::new(vec![1, 2, 3]); - - for key in &keys { - db.put(key, Column::Metadata, value.clone()).unwrap(); - } - - db.delete_all(Column::Metadata).unwrap(); - - for key in &keys { - assert_eq!(db.get(key, Column::Metadata).unwrap(), None); - } - } - #[test] fn can_use_unit_value() { let key = vec![0x00]; - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); let expected = Arc::new(vec![]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); @@ -684,7 +704,7 @@ mod tests { assert!(db.exists(&key, Column::Metadata).unwrap()); assert_eq!( - db.iter_all(Column::Metadata, None, None, IterDirection::Forward) + db.iter_store(Column::Metadata, None, None, IterDirection::Forward) .collect::, _>>() .unwrap()[0], (key.clone(), expected.clone()) @@ -699,7 +719,7 @@ mod tests { fn can_use_unit_key() { let key: Vec = Vec::with_capacity(0); - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); let expected = Arc::new(vec![1, 2, 3]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); @@ -708,7 +728,7 @@ mod tests { assert!(db.exists(&key, Column::Metadata).unwrap()); assert_eq!( - db.iter_all(Column::Metadata, None, None, IterDirection::Forward) + db.iter_store(Column::Metadata, None, None, IterDirection::Forward) .collect::, _>>() .unwrap()[0], (key.clone(), expected.clone()) @@ -723,7 +743,7 @@ mod tests { fn can_use_unit_key_and_value() { let key: Vec = Vec::with_capacity(0); - let (db, _tmp) = create_db(); + let (mut db, _tmp) = create_db(); let expected = Arc::new(vec![]); db.put(&key, Column::Metadata, expected.clone()).unwrap(); @@ -732,7 +752,7 @@ mod tests { assert!(db.exists(&key, Column::Metadata).unwrap()); assert_eq!( - db.iter_all(Column::Metadata, None, None, IterDirection::Forward) + db.iter_store(Column::Metadata, None, None, IterDirection::Forward) .collect::, _>>() .unwrap()[0], (key.clone(), expected.clone()) diff --git a/crates/services/consensus_module/poa/src/ports.rs b/crates/services/consensus_module/poa/src/ports.rs index b02f0e39e7f..e3d7519078f 100644 --- a/crates/services/consensus_module/poa/src/ports.rs +++ b/crates/services/consensus_module/poa/src/ports.rs @@ -1,6 +1,6 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::{ - transactional::StorageTransaction, + transactional::Changes, Result as StorageResult, }; use fuel_core_types::{ @@ -43,9 +43,6 @@ pub trait TransactionPool: Send + Sync { fn transaction_status_events(&self) -> BoxStream; } -#[cfg(test)] -use fuel_core_storage::test_helpers::EmptyStorage; - /// The source of transactions for the block. pub enum TransactionsSource { /// The source of transactions for the block is the `TxPool`. @@ -54,28 +51,24 @@ pub enum TransactionsSource { SpecificTransactions(Vec), } -#[cfg_attr(test, mockall::automock(type Database=EmptyStorage;))] +#[cfg_attr(test, mockall::automock)] #[async_trait::async_trait] pub trait BlockProducer: Send + Sync { - type Database; - async fn produce_and_execute_block( &self, height: BlockHeight, block_time: Tai64, source: TransactionsSource, max_gas: Word, - ) -> anyhow::Result>>; + ) -> anyhow::Result>; } -#[cfg_attr(test, mockall::automock(type Database=EmptyStorage;))] +#[cfg_attr(test, mockall::automock)] #[async_trait::async_trait] pub trait BlockImporter: Send + Sync { - type Database; - async fn commit_result( &self, - result: UncommittedImportResult>, + result: UncommittedImportResult, ) -> anyhow::Result<()>; fn block_stream(&self) -> BoxStream; diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 03dcc948563..296d8ceeae9 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -29,7 +29,7 @@ use fuel_core_services::{ ServiceRunner, StateWatcher, }; -use fuel_core_storage::transactional::StorageTransaction; +use fuel_core_storage::transactional::Changes; use fuel_core_types::{ blockchain::{ block::Block, @@ -245,11 +245,11 @@ where } } -impl MainTask +impl MainTask where T: TransactionPool, - B: BlockProducer, - I: BlockImporter, + B: BlockProducer, + I: BlockImporter, { // Request the block producer to make a new block, and return it when ready async fn signal_produce_block( @@ -257,7 +257,7 @@ where height: BlockHeight, block_time: Tai64, source: TransactionsSource, - ) -> anyhow::Result>> { + ) -> anyhow::Result> { self.block_producer .produce_and_execute_block(height, block_time, source, self.block_gas_limit) .await @@ -333,7 +333,7 @@ where tx_status, events, }, - db_transaction, + changes, ) = self .signal_produce_block(height, block_time, source) .await? @@ -360,7 +360,7 @@ where self.block_importer .commit_result(Uncommitted::new( ImportResult::new_from_local(block, tx_status, events), - db_transaction, + changes, )) .await?; @@ -455,11 +455,11 @@ where } #[async_trait::async_trait] -impl RunnableTask for MainTask +impl RunnableTask for MainTask where T: TransactionPool, - B: BlockProducer, - I: BlockImporter, + B: BlockProducer, + I: BlockImporter, { async fn run(&mut self, watcher: &mut StateWatcher) -> anyhow::Result { let should_continue; @@ -530,7 +530,7 @@ where } } -pub fn new_service( +pub fn new_service( last_block: &BlockHeader, config: Config, txpool: T, @@ -540,8 +540,8 @@ pub fn new_service( ) -> Service where T: TransactionPool + 'static, - B: BlockProducer + 'static, - I: BlockImporter + 'static, + B: BlockProducer + 'static, + I: BlockImporter + 'static, P: P2pPort, { Service::new(MainTask::new( diff --git a/crates/services/consensus_module/poa/src/service_test.rs b/crates/services/consensus_module/poa/src/service_test.rs index 59cbc4fb591..c864a5187c0 100644 --- a/crates/services/consensus_module/poa/src/service_test.rs +++ b/crates/services/consensus_module/poa/src/service_test.rs @@ -18,10 +18,6 @@ use fuel_core_services::{ Service as StorageTrait, State, }; -use fuel_core_storage::{ - test_helpers::EmptyStorage, - transactional::StorageTransaction, -}; use fuel_core_types::{ blockchain::{ header::BlockHeader, @@ -131,7 +127,7 @@ impl TestContextBuilder { tx_status: Default::default(), events: Default::default(), }, - StorageTransaction::new(EmptyStorage), + Default::default(), )) }); producer @@ -290,7 +286,7 @@ async fn remove_skipped_transactions() { tx_status: Default::default(), events: Default::default(), }, - StorageTransaction::new(EmptyStorage), + Default::default(), )) }); diff --git a/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs b/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs index c08035dfdb7..c562d61c288 100644 --- a/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs +++ b/crates/services/consensus_module/poa/src/service_test/manually_produce_tests.rs @@ -94,7 +94,7 @@ async fn can_manually_produce_block( tx_status: Default::default(), events: Default::default(), }, - StorageTransaction::new(EmptyStorage), + Default::default(), )) }); ctx_builder.with_importer(importer); diff --git a/crates/services/executor/src/executor.rs b/crates/services/executor/src/executor.rs index c34cd74c9b8..413ac0a8719 100644 --- a/crates/services/executor/src/executor.rs +++ b/crates/services/executor/src/executor.rs @@ -1,6 +1,5 @@ use crate::{ ports::{ - ExecutorDatabaseTrait, MaybeCheckedTransaction, RelayerPort, TransactionsSource, @@ -10,6 +9,9 @@ use crate::{ }; use block_component::*; use fuel_core_storage::{ + column::Column, + kv_store::KeyValueInspect, + structured_storage::StructuredStorage, tables::{ Coins, ContractsInfo, @@ -21,8 +23,12 @@ use fuel_core_storage::{ }, transactional::{ AtomicView, + Changes, + ConflictPolicy, + Modifiable, + ReadTransaction, StorageTransaction, - Transactional, + WriteTransaction, }, vm_storage::VmStorage, StorageAsMut, @@ -179,25 +185,59 @@ impl Executor where R: AtomicView, R::View: RelayerPort, - D: AtomicView, - D::View: ExecutorDatabaseTrait, + D: AtomicView + Modifiable, + D::View: KeyValueInspect, { #[cfg(any(test, feature = "test-helpers"))] /// Executes the block and commits the result of the execution into the inner `Database`. pub fn execute_and_commit( - &self, + &mut self, block: fuel_core_types::services::executor::ExecutionBlock, options: ExecutionOptions, ) -> ExecutorResult { + let (result, changes) = self.execute_without_commit(block, options)?.into(); + + self.database_view_provider.commit_changes(changes)?; + Ok(result) + } + + #[cfg(any(test, feature = "test-helpers"))] + pub fn execute_without_commit( + &self, + block: fuel_core_types::services::executor::ExecutionBlock, + options: ExecutionOptions, + ) -> ExecutorResult> { let executor = ExecutionInstance { database: self.database_view_provider.latest_view(), relayer: self.relayer_view_provider.latest_view(), config: self.config.clone(), options, }; - executor.execute_and_commit(block) + + let component = match block { + ExecutionTypes::DryRun(_) => { + panic!("It is not possible to commit the dry run result"); + } + ExecutionTypes::Production(block) => ExecutionTypes::Production(Components { + header_to_produce: block.header, + transactions_source: OnceTransactionsSource::new(block.transactions), + gas_price: 0, + gas_limit: u64::MAX, + }), + ExecutionTypes::Validation(block) => ExecutionTypes::Validation(block), + }; + + executor.execute_without_commit(component) } +} +impl Executor +where + R: AtomicView, + R::View: RelayerPort, + D: AtomicView, + D::View: KeyValueInspect, +{ /// Executes the partial block and returns `ExecutionData` as a result. #[cfg(any(test, feature = "test-helpers"))] pub fn execute_block( @@ -214,14 +254,13 @@ where config: self.config.clone(), options, }; - let mut block_transaction = executor.database.transaction(); - executor.execute_block(block_transaction.as_mut(), block) + executor.execute_block(block) } - pub fn execute_without_commit( + pub fn execute_without_commit_with_source( &self, block: ExecutionBlockWithSource, - ) -> ExecutorResult>> + ) -> ExecutorResult> where TxSource: TransactionsSource, { @@ -265,6 +304,7 @@ pub struct ExecutionData { message_ids: Vec, tx_status: Vec, events: Vec, + changes: Changes, pub skipped_transactions: Vec<(TxId, ExecutorError)>, } @@ -298,42 +338,12 @@ struct ExecutionInstance { impl ExecutionInstance where R: RelayerPort, - D: ExecutorDatabaseTrait, -{ - #[cfg(any(test, feature = "test-helpers"))] - /// Executes the block and commits the result of the execution into the inner `Database`. - fn execute_and_commit( - self, - block: fuel_core_types::services::executor::ExecutionBlock, - ) -> ExecutorResult { - let component = match block { - ExecutionTypes::DryRun(_) => { - panic!("It is not possible to commit the dry run result"); - } - ExecutionTypes::Production(block) => ExecutionTypes::Production(Components { - header_to_produce: block.header, - transactions_source: OnceTransactionsSource::new(block.transactions), - gas_price: 0, - gas_limit: u64::MAX, - }), - ExecutionTypes::Validation(block) => ExecutionTypes::Validation(block), - }; - - let (result, db_transaction) = self.execute_without_commit(component)?.into(); - db_transaction.commit()?; - Ok(result) - } -} - -impl ExecutionInstance -where - R: RelayerPort, - D: ExecutorDatabaseTrait, + D: KeyValueInspect, { pub fn execute_without_commit( self, block: ExecutionBlockWithSource, - ) -> ExecutorResult>> + ) -> ExecutorResult> where TxSource: TransactionsSource, { @@ -429,13 +439,13 @@ pub mod block_component { impl ExecutionInstance where R: RelayerPort, - D: ExecutorDatabaseTrait, + D: KeyValueInspect, { #[tracing::instrument(skip_all)] fn execute_inner( self, block: ExecutionBlockWithSource, - ) -> ExecutorResult>> + ) -> ExecutorResult> where TxSource: TransactionsSource, { @@ -446,9 +456,6 @@ where // a partial header. let block = block.map_v(PartialFuelBlock::from); - // Create a new storage transaction. - let mut block_st_transaction = self.database.transaction(); - let (block, execution_data) = match block { ExecutionTypes::DryRun(component) => { let mut block = @@ -460,10 +467,8 @@ where component.gas_limit, ); - let execution_data = self.execute_block( - block_st_transaction.as_mut(), - ExecutionType::DryRun(component), - )?; + let execution_data = + self.execute_block(ExecutionType::DryRun(component))?; (block, execution_data) } ExecutionTypes::Production(component) => { @@ -476,18 +481,14 @@ where component.gas_limit, ); - let execution_data = self.execute_block( - block_st_transaction.as_mut(), - ExecutionType::Production(component), - )?; + let execution_data = + self.execute_block(ExecutionType::Production(component))?; (block, execution_data) } ExecutionTypes::Validation(mut block) => { let component = PartialBlockComponent::from_partial_block(&mut block); - let execution_data = self.execute_block( - block_st_transaction.as_mut(), - ExecutionType::Validation(component), - )?; + let execution_data = + self.execute_block(ExecutionType::Validation(component))?; (block, execution_data) } }; @@ -499,6 +500,7 @@ where tx_status, skipped_transactions, events, + changes, .. } = execution_data; @@ -531,19 +533,22 @@ where }; // Get the complete fuel block. - Ok(UncommittedResult::new(result, block_st_transaction)) + Ok(UncommittedResult::new(result, changes)) } #[tracing::instrument(skip_all)] /// Execute the fuel block with all transactions. fn execute_block( &self, - block_st_transaction: &mut D, block: ExecutionType>, ) -> ExecutorResult where TxSource: TransactionsSource, { + let mut block_st_transaction = self + .database + .read_transaction() + .with_policy(ConflictPolicy::Overwrite); let mut data = ExecutionData { coinbase: 0, used_gas: 0, @@ -552,6 +557,7 @@ where message_ids: Vec::new(), tx_status: Vec::new(), events: Vec::new(), + changes: Default::default(), skipped_transactions: Vec::new(), }; let execution_data = &mut data; @@ -565,9 +571,22 @@ where let block_height = *block.header.height(); if self.relayer.enabled() { - self.process_da(block_st_transaction, &block.header, execution_data)?; + self.process_da(&mut block_st_transaction, &block.header, execution_data)?; } + // The block level storage transaction that also contains data from the relayer. + // Starting from this point, modifications from each thread should be independent + // and shouldn't touch the same data. + let mut block_with_relayer_data_transaction = block_st_transaction.write_transaction() + // Enforces independent changes from each thread. + .with_policy(ConflictPolicy::Fail); + + // We execute transactions in a single thread right now, but later, + // we will execute them in parallel with a separate independent storage transaction per thread. + let mut thread_block_transaction = block_with_relayer_data_transaction + .read_transaction() + .with_policy(ConflictPolicy::Overwrite); + // ALl transactions should be in the `TxSource`. // We use `block.transactions` to store executed transactions. debug_assert!(block.transactions.is_empty()); @@ -578,7 +597,9 @@ where -> ExecutorResult<()> { let tx_count = execution_data.tx_count; let tx = { - let mut tx_st_transaction = block_st_transaction.transaction(); + let mut tx_st_transaction = thread_block_transaction + .write_transaction() + .with_policy(ConflictPolicy::Overwrite); let tx_id = tx.id(&self.config.consensus_parameters.chain_id); let result = self.execute_transaction( tx, @@ -669,19 +690,28 @@ where )?; } + let changes_from_thread = thread_block_transaction.into_changes(); + block_with_relayer_data_transaction.commit_changes(changes_from_thread)?; + block_with_relayer_data_transaction.commit()?; + if execution_kind != ExecutionKind::DryRun && !data.found_mint { return Err(ExecutorError::MintMissing) } + data.changes = block_st_transaction.into_changes(); + Ok(data) } - fn process_da( + fn process_da( &self, - block_st_transaction: &mut D, + block_st_transaction: &mut StorageTransaction, header: &PartialBlockHeader, execution_data: &mut ExecutionData, - ) -> ExecutorResult<()> { + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { let block_height = *header.height(); let prev_block_height = block_height .pred() @@ -723,7 +753,7 @@ where } #[allow(clippy::too_many_arguments)] - fn execute_transaction( + fn execute_transaction( &self, tx: MaybeCheckedTransaction, tx_id: &TxId, @@ -731,15 +761,17 @@ where gas_price: Word, execution_data: &mut ExecutionData, execution_kind: ExecutionKind, - tx_st_transaction: &mut StorageTransaction, - ) -> ExecutorResult { + tx_st_transaction: &mut StorageTransaction, + ) -> ExecutorResult + where + T: KeyValueInspect, + { if execution_data.found_mint { return Err(ExecutorError::MintIsNotLastTransaction) } // Throw a clear error if the transaction id is a duplicate if tx_st_transaction - .as_ref() .storage::() .contains_key(tx_id)? { @@ -782,15 +814,18 @@ where } } - fn execute_mint( + fn execute_mint( &self, checked_mint: Checked, header: &PartialBlockHeader, gas_price: Word, execution_data: &mut ExecutionData, - block_st_transaction: &mut StorageTransaction, + block_st_transaction: &mut StorageTransaction, execution_kind: ExecutionKind, - ) -> ExecutorResult { + ) -> ExecutorResult + where + T: KeyValueInspect, + { execution_data.found_mint = true; if checked_mint.transaction().tx_pointer().tx_index() != execution_data.tx_count { @@ -843,7 +878,7 @@ where if self.options.utxo_validation { // validate utxos exist self.verify_input_state( - block_st_transaction.as_ref(), + block_st_transaction, inputs.as_mut_slice(), header.da_height, )?; @@ -851,24 +886,23 @@ where match execution_kind { ExecutionKind::DryRun | ExecutionKind::Production => { - self.compute_inputs( - inputs.as_mut_slice(), - block_st_transaction.as_mut(), - )?; + self.compute_inputs(inputs.as_mut_slice(), block_st_transaction)?; } ExecutionKind::Validation => { self.validate_inputs_state( inputs.as_mut_slice(), coinbase_id, - block_st_transaction.as_mut(), + block_st_transaction, )?; } } - let mut sub_block_db_commit = block_st_transaction.transaction(); + let mut sub_block_db_commit = block_st_transaction + .write_transaction() + .with_policy(ConflictPolicy::Overwrite); let mut vm_db = VmStorage::new( - sub_block_db_commit.as_mut(), + &mut sub_block_db_commit, &header.consensus, self.config.coinbase_recipient, ); @@ -887,7 +921,7 @@ where block_height, execution_data, &coinbase_id, - block_st_transaction.as_mut(), + block_st_transaction, inputs.as_slice(), outputs.as_slice(), )?; @@ -895,7 +929,7 @@ where outputs.as_mut_slice(), inputs.as_slice(), coinbase_id, - block_st_transaction.as_mut(), + block_st_transaction, )?; let Input::Contract(input) = core::mem::take(&mut inputs[0]) else { unreachable!() @@ -925,7 +959,6 @@ where }); if block_st_transaction - .as_mut() .storage::() .insert(&coinbase_id, &())? .is_some() @@ -936,18 +969,19 @@ where } #[allow(clippy::too_many_arguments)] - fn execute_create_or_script( + fn execute_create_or_script( &self, mut checked_tx: Checked, header: &PartialBlockHeader, gas_price: Word, execution_data: &mut ExecutionData, - tx_st_transaction: &mut StorageTransaction, + tx_st_transaction: &mut StorageTransaction, execution_kind: ExecutionKind, ) -> ExecutorResult where Tx: ExecutableTransaction + PartialEq + Cacheable + Send + Sync + 'static, ::Metadata: CheckedMetadata + Clone + Send + Sync, + T: KeyValueInspect, { let tx_id = checked_tx.id(); let max_fee = checked_tx.transaction().max_fee_limit(); @@ -966,7 +1000,7 @@ where // validate utxos exist and maturity is properly set self.verify_input_state( - tx_st_transaction.as_ref(), + tx_st_transaction, checked_tx.transaction().inputs(), header.da_height, )?; @@ -981,18 +1015,19 @@ where self.validate_inputs_state( checked_tx.transaction().inputs(), tx_id, - tx_st_transaction.as_mut(), + tx_st_transaction, )?; } // execute transaction // setup database view that only lives for the duration of vm execution - let mut sub_block_db_commit = tx_st_transaction.transaction(); - let sub_db_view = sub_block_db_commit.as_mut(); + let mut sub_block_db_commit = tx_st_transaction + .read_transaction() + .with_policy(ConflictPolicy::Overwrite); // execution vm let vm_db = VmStorage::new( - sub_db_view.clone(), + &mut sub_block_db_commit, &header.consensus, self.config.coinbase_recipient, ); @@ -1061,11 +1096,13 @@ where // We always need to update inputs with storage state before execution, // because VM zeroes malleable fields during the execution. - self.compute_inputs(tx.inputs_mut(), tx_st_transaction.as_mut())?; + self.compute_inputs(tx.inputs_mut(), tx_st_transaction)?; // only commit state changes if execution was a success if !reverted { - sub_block_db_commit.commit()?; + self.log_backtrace(&vm, &receipts); + let changes = sub_block_db_commit.into_changes(); + tx_st_transaction.commit_changes(changes)?; } // update block commitment @@ -1073,19 +1110,14 @@ where self.total_fee_paid(&tx, max_fee, &receipts, gas_price)?; // change the spent status of the tx inputs - self.spend_input_utxos( - tx.inputs(), - tx_st_transaction.as_mut(), - reverted, - execution_data, - )?; + self.spend_input_utxos(tx.inputs(), tx_st_transaction, reverted, execution_data)?; // Persist utxos first and after calculate the not utxo outputs self.persist_output_utxos( *header.height(), execution_data, &tx_id, - tx_st_transaction.as_mut(), + tx_st_transaction, tx.inputs(), tx.outputs(), )?; @@ -1096,7 +1128,7 @@ where &mut outputs, tx.inputs(), tx_id, - tx_st_transaction.as_mut(), + tx_st_transaction, )?; *tx.outputs_mut() = outputs; @@ -1117,7 +1149,6 @@ where .contract_id; let salt = *create.salt(); tx_st_transaction - .as_mut() .storage::() .insert(&contract_id, &(salt.into()))?; } @@ -1126,7 +1157,6 @@ where // Store tx into the block db transaction tx_st_transaction - .as_mut() .storage::() .insert(&tx_id, &())?; @@ -1141,7 +1171,6 @@ where .extend(receipts.iter().filter_map(|r| r.message_id())); let status = if reverted { - self.log_backtrace(&vm, &receipts); TransactionExecutionResult::Failed { result: Some(state), receipts, @@ -1163,12 +1192,15 @@ where Ok(final_tx) } - fn verify_input_state( + fn verify_input_state( &self, - db: &D, + db: &StorageTransaction, inputs: &[Input], block_da_height: DaBlockHeight, - ) -> ExecutorResult<()> { + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { for input in inputs { match input { Input::CoinSigned(CoinSigned { utxo_id, .. }) @@ -1238,13 +1270,16 @@ where } /// Mark input utxos as spent - fn spend_input_utxos( + fn spend_input_utxos( &self, inputs: &[Input], - db: &mut D, + db: &mut StorageTransaction, reverted: bool, execution_data: &mut ExecutionData, - ) -> ExecutorResult<()> { + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { for input in inputs { match input { Input::CoinSigned(CoinSigned { @@ -1347,7 +1382,14 @@ where /// Computes all zeroed or variable inputs. /// In production mode, updates the inputs with computed values. /// In validation mode, compares the inputs with computed inputs. - fn compute_inputs(&self, inputs: &mut [Input], db: &mut D) -> ExecutorResult<()> { + fn compute_inputs( + &self, + inputs: &mut [Input], + db: &StorageTransaction, + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { for input in inputs { match input { Input::CoinSigned(CoinSigned { @@ -1378,7 +1420,8 @@ where ref contract_id, .. }) => { - let mut contract = ContractRef::new(&mut *db, *contract_id); + let contract = + ContractRef::new(StructuredStorage::new(db), *contract_id); let utxo_info = contract.validated_utxo(self.options.utxo_validation)?; *utxo_id = *utxo_info.utxo_id(); @@ -1392,12 +1435,15 @@ where Ok(()) } - fn validate_inputs_state( + fn validate_inputs_state( &self, inputs: &[Input], tx_id: TxId, - db: &mut D, - ) -> ExecutorResult<()> { + db: &StorageTransaction, + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { for input in inputs { match input { Input::CoinSigned(CoinSigned { @@ -1432,7 +1478,8 @@ where tx_pointer, .. }) => { - let mut contract = ContractRef::new(&mut *db, *contract_id); + let contract = + ContractRef::new(StructuredStorage::new(db), *contract_id); let provided_info = ContractUtxoInfo::V1((*utxo_id, *tx_pointer).into()); if provided_info @@ -1464,13 +1511,16 @@ where /// Computes all zeroed or variable outputs. /// In production mode, updates the outputs with computed values. /// In validation mode, compares the outputs with computed inputs. - fn compute_state_of_not_utxo_outputs( + fn compute_state_of_not_utxo_outputs( &self, outputs: &mut [Output], inputs: &[Input], tx_id: TxId, - db: &mut D, - ) -> ExecutorResult<()> { + db: &StorageTransaction, + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { for output in outputs { if let Output::Contract(contract_output) = output { let contract_id = @@ -1484,7 +1534,7 @@ where }) }; - let mut contract = ContractRef::new(&mut *db, *contract_id); + let contract = ContractRef::new(StructuredStorage::new(db), *contract_id); contract_output.balance_root = contract.balance_root()?; contract_output.state_root = contract.state_root()?; } @@ -1493,14 +1543,17 @@ where } #[allow(clippy::too_many_arguments)] - pub fn get_coin_or_default( + pub fn get_coin_or_default( &self, - db: &mut D, + db: &StorageTransaction, utxo_id: UtxoId, owner: Address, amount: u64, asset_id: AssetId, - ) -> ExecutorResult { + ) -> ExecutorResult + where + T: KeyValueInspect, + { if self.options.utxo_validation { db.storage::() .get(&utxo_id)? @@ -1522,9 +1575,9 @@ where } /// Log a VM backtrace if configured to do so - fn log_backtrace( + fn log_backtrace( &self, - vm: &Interpreter, Tx>, + vm: &Interpreter, Tx>, receipts: &[Receipt], ) { if self.config.backtrace { @@ -1551,15 +1604,18 @@ where } } - fn persist_output_utxos( + fn persist_output_utxos( &self, block_height: BlockHeight, execution_data: &mut ExecutionData, tx_id: &Bytes32, - db: &mut D, + db: &mut StorageTransaction, inputs: &[Input], outputs: &[Output], - ) -> ExecutorResult<()> { + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { let tx_idx = execution_data.tx_count; for (output_index, output) in outputs.iter().enumerate() { let index = u8::try_from(output_index) @@ -1632,15 +1688,18 @@ where Ok(()) } - fn insert_coin( + fn insert_coin( block_height: BlockHeight, execution_data: &mut ExecutionData, utxo_id: UtxoId, amount: &Word, asset_id: &AssetId, to: &Address, - db: &mut D, - ) -> ExecutorResult<()> { + db: &mut StorageTransaction, + ) -> ExecutorResult<()> + where + T: KeyValueInspect, + { // Only insert a coin output if it has some amount. // This is because variable or transfer outputs won't have any value // if there's a revert or panic and shouldn't be added to the utxo set. diff --git a/crates/services/executor/src/ports.rs b/crates/services/executor/src/ports.rs index f831d5031d3..40304a4d375 100644 --- a/crates/services/executor/src/ports.rs +++ b/crates/services/executor/src/ports.rs @@ -1,31 +1,7 @@ -use fuel_core_storage::{ - tables::{ - Coins, - ContractsAssets, - ContractsInfo, - ContractsLatestUtxo, - ContractsRawCode, - ContractsState, - FuelBlocks, - Messages, - ProcessedTransactions, - SpentMessages, - }, - transactional::Transactional, - Error as StorageError, - MerkleRootStorage, - StorageBatchMutate, - StorageMutate, - StorageRead, - StorageSize, - StorageWrite, -}; use fuel_core_types::{ blockchain::primitives::DaBlockHeight, - fuel_merkle::storage::StorageInspect, fuel_tx, fuel_tx::{ - ContractId, TxId, UniqueIdentifier, }, @@ -69,27 +45,3 @@ pub trait RelayerPort { /// Get events from the relayer at a given da height. fn get_events(&self, da_height: &DaBlockHeight) -> anyhow::Result>; } - -// TODO: Remove `Clone` bound -pub trait ExecutorDatabaseTrait: - StorageInspect - + StorageMutate - + StorageMutate - + StorageMutate - + StorageMutate - + StorageMutate - + StorageWrite - + StorageSize - + StorageRead - + StorageWrite - + StorageSize - + StorageRead - + MerkleRootStorage - + StorageMutate - + StorageMutate - + MerkleRootStorage - + StorageBatchMutate - + Transactional - + Clone -{ -} diff --git a/crates/services/executor/src/refs/contract.rs b/crates/services/executor/src/refs/contract.rs index 373e6ebc880..8917c702297 100644 --- a/crates/services/executor/src/refs/contract.rs +++ b/crates/services/executor/src/refs/contract.rs @@ -7,6 +7,7 @@ use fuel_core_storage::{ ContractsLatestUtxo, ContractsState, }, + Error as StorageError, Mappable, MerkleRoot, MerkleRootStorage, @@ -92,7 +93,7 @@ where Database: MerkleRootStorage, { pub fn balance_root( - &mut self, + &self, ) -> Result>::Error> { self.database.root(&self.contract_id).map(Into::into) } @@ -103,7 +104,7 @@ where Database: MerkleRootStorage, { pub fn state_root( - &mut self, + &self, ) -> Result>::Error> { self.database.root(&self.contract_id).map(Into::into) } @@ -117,6 +118,15 @@ pub trait ContractStorageTrait: type InnerError: fmt::Debug + fmt::Display + Send + Sync + 'static; } +impl ContractStorageTrait for D +where + D: StorageInspect + + MerkleRootStorage + + MerkleRootStorage, +{ + type InnerError = StorageError; +} + impl<'a, Database> GenesisCommitment for ContractRef<&'a Database> where Database: ContractStorageTrait, diff --git a/crates/services/importer/src/importer.rs b/crates/services/importer/src/importer.rs index cde1c539ee5..638d36f4ff0 100644 --- a/crates/services/importer/src/importer.rs +++ b/crates/services/importer/src/importer.rs @@ -1,7 +1,7 @@ use crate::{ - ports, ports::{ BlockVerifier, + DatabaseTransaction, Executor, ImporterDatabase, }, @@ -10,8 +10,9 @@ use crate::{ use fuel_core_metrics::importer::importer_metrics; use fuel_core_storage::{ not_found, - transactional::StorageTransaction, + transactional::Changes, Error as StorageError, + MerkleRoot, }; use fuel_core_types::{ blockchain::{ @@ -38,7 +39,10 @@ use fuel_core_types::{ }, }; use std::{ - ops::Deref, + ops::{ + Deref, + DerefMut, + }, sync::{ Arc, Mutex, @@ -66,9 +70,9 @@ pub enum Error { fmt = "The wrong state of database during insertion of the genesis block." )] InvalidUnderlyingDatabaseGenesisState, - #[display(fmt = "The wrong state of database after execution of the block.\ - The actual height is {_1:?}, when the next expected height is {_0:?}.")] - InvalidDatabaseStateAfterExecution(Option, Option), + #[display(fmt = "The wrong state of storage after execution of the block.\ + The actual root is {_1:?}, when the expected root is {_0:?}.")] + InvalidDatabaseStateAfterExecution(Option, Option), #[display(fmt = "Got overflow during increasing the height.")] Overflow, #[display(fmt = "The non-generic block can't have zero height.")] @@ -110,7 +114,7 @@ impl PartialEq for Error { } pub struct Importer { - database: D, + database: Mutex, executor: Arc, verifier: Arc, chain_id: ChainId, @@ -127,7 +131,7 @@ impl Importer { let (broadcast, _) = broadcast::channel(config.max_block_notify_buffer); Self { - database, + database: Mutex::new(database), executor: Arc::new(executor), verifier: Arc::new(verifier), chain_id: config.chain_id, @@ -173,13 +177,10 @@ where /// /// Only one commit may be in progress at the time. All other calls will fail. /// Returns an error if called while another call is in progress. - pub async fn commit_result( + pub async fn commit_result( &self, - result: UncommittedResult>, - ) -> Result<(), Error> - where - ExecutorDatabase: ports::ExecutorDatabase, - { + result: UncommittedResult, + ) -> Result<(), Error> { let _guard = self.lock()?; // It is safe to unwrap the channel because we have the `_guard`. let previous_block_result = self @@ -192,8 +193,13 @@ where if let Some(channel) = previous_block_result { let _ = channel.await; } + let mut guard = self + .database + .try_lock() + .expect("Semaphore prevents concurrent access to the database"); + let database = guard.deref_mut(); - self._commit_result(result) + self._commit_result(result, database) } /// The method commits the result of the block execution and notifies about a new imported block. @@ -206,14 +212,12 @@ where ), err )] - fn _commit_result( + fn _commit_result( &self, - result: UncommittedResult>, - ) -> Result<(), Error> - where - ExecutorDatabase: ports::ExecutorDatabase, - { - let (result, mut db_tx) = result.into(); + result: UncommittedResult, + database: &mut D, + ) -> Result<(), Error> { + let (result, changes) = result.into(); let block = &result.sealed_block.entity; let consensus = &result.sealed_block.consensus; let actual_next_height = *block.header().height(); @@ -224,7 +228,7 @@ where // database height + 1. let expected_next_height = match consensus { Consensus::Genesis(_) => { - let result = self.database.latest_block_height()?; + let result = database.latest_block_height()?; let found = result.is_some(); // Because the genesis block is not committed, it should return `None`. // If we find the latest height, something is wrong with the state of the database. @@ -238,8 +242,7 @@ where return Err(Error::ZeroNonGenericHeight) } - let last_db_height = self - .database + let last_db_height = database .latest_block_height()? .ok_or(not_found!("Latest block height"))?; last_db_height @@ -262,16 +265,16 @@ where )) } - let db_after_execution = db_tx.as_mut(); - // Importer expects that `UncommittedResult` contains the result of block // execution without block itself. - let expected_height = self.database.latest_block_height()?; - let actual_height = db_after_execution.latest_block_height()?; - if expected_height != actual_height { + let expected_block_root = database.latest_block_root()?; + + let mut db_after_execution = database.storage_transaction(changes); + let actual_block_root = db_after_execution.latest_block_root()?; + if actual_block_root != expected_block_root { return Err(Error::InvalidDatabaseStateAfterExecution( - expected_height, - actual_height, + expected_block_root, + actual_block_root, )) } @@ -279,7 +282,7 @@ where return Err(Error::NotUnique(expected_next_height)) } - db_tx.commit()?; + db_after_execution.commit()?; // update the importer metrics after the block is successfully committed importer_metrics() @@ -313,6 +316,8 @@ where // correctly in more mission critical areas (such as _commit_result) let current_block_height = self .database + .try_lock() + .expect("Init function is the first to access the database") .latest_block_height() .unwrap_or_default() .unwrap_or_default(); @@ -349,7 +354,7 @@ where pub fn verify_and_execute_block( &self, sealed_block: SealedBlock, - ) -> Result>, Error> { + ) -> Result, Error> { Self::verify_and_execute_block_inner( self.executor.clone(), self.verifier.clone(), @@ -361,7 +366,7 @@ where executor: Arc, verifier: Arc, sealed_block: SealedBlock, - ) -> Result>, Error> { + ) -> Result, Error> { let consensus = sealed_block.consensus; let block = sealed_block.entity; let sealed_block_id = block.id(); @@ -386,7 +391,7 @@ where tx_status, events, }, - db_tx, + changes, ) = executor .execute_without_commit(block) .map_err(Error::FailedExecution)? @@ -411,7 +416,7 @@ where let import_result = ImportResult::new_from_network(sealed_block, tx_status, events); - Ok(Uncommitted::new(import_result, db_tx)) + Ok(Uncommitted::new(import_result, changes)) } } @@ -455,7 +460,13 @@ where } let start = Instant::now(); - let commit_result = self._commit_result(result); + + let mut guard = self + .database + .try_lock() + .expect("Semaphore prevents concurrent access to the database"); + let database = guard.deref_mut(); + let commit_result = self._commit_result(result, database); let commit_time = start.elapsed().as_secs_f64(); let time = execute_time + commit_time; importer_metrics().execute_and_commit_duration.observe(time); diff --git a/crates/services/importer/src/importer/test.rs b/crates/services/importer/src/importer/test.rs index 5e2f64da752..b52a0772652 100644 --- a/crates/services/importer/src/importer/test.rs +++ b/crates/services/importer/src/importer/test.rs @@ -1,20 +1,19 @@ use crate::{ importer::Error, ports::{ - ExecutorDatabase, ImporterDatabase, MockBlockVerifier, + MockDatabaseTransaction, MockExecutor, + Transactional, }, Importer, }; use anyhow::anyhow; use fuel_core_storage::{ - transactional::{ - StorageTransaction, - Transaction as TransactionTrait, - }, + transactional::Changes, Error as StorageError, + MerkleRoot, Result as StorageResult, }; use fuel_core_types::{ @@ -24,10 +23,7 @@ use fuel_core_types::{ SealedBlock, }, fuel_tx::TxId, - fuel_types::{ - BlockHeight, - ChainId, - }, + fuel_types::BlockHeight, services::{ block_importer::{ ImportResult, @@ -50,33 +46,25 @@ use tokio::sync::{ mockall::mock! { pub Database {} - impl ImporterDatabase for Database { - fn latest_block_height(&self) -> StorageResult>; - } + impl Transactional for Database { + type Transaction<'a> = MockDatabaseTransaction + where + Self: 'a; - impl ExecutorDatabase for Database { - fn store_new_block( - &mut self, - chain_id: &ChainId, - block: &SealedBlock, - ) -> StorageResult; + fn storage_transaction(&mut self, changes: Changes) -> MockDatabaseTransaction; } - impl TransactionTrait for Database { - fn commit(&mut self) -> StorageResult<()>; - } -} + impl ImporterDatabase for Database { + fn latest_block_height(&self) -> StorageResult>; -impl AsMut for MockDatabase { - fn as_mut(&mut self) -> &mut MockDatabase { - self + fn latest_block_root(&self) -> StorageResult>; } } -impl AsRef for MockDatabase { - fn as_ref(&self) -> &MockDatabase { - self - } +fn u32_to_merkle_root(number: u32) -> MerkleRoot { + let mut root = [0; 32]; + root[0..4].copy_from_slice(&number.to_be_bytes()); + MerkleRoot::from(root) } #[derive(Clone, Debug)] @@ -112,19 +100,22 @@ where R: Fn() -> StorageResult> + Send + Clone + 'static, { move || { - let result = result.clone(); + let result_height = result.clone(); + let result_root = result.clone(); let mut db = MockDatabase::default(); db.expect_latest_block_height() - .returning(move || result().map(|v| v.map(Into::into))); + .returning(move || result_height().map(|v| v.map(Into::into))); + db.expect_latest_block_root() + .returning(move || result_root().map(|v| v.map(u32_to_merkle_root))); db } } -fn executor_db( +fn db_transaction( height: H, store_block: B, commits: usize, -) -> impl Fn() -> MockDatabase +) -> impl Fn() -> MockDatabaseTransaction where H: Fn() -> StorageResult> + Send + Clone + 'static, B: Fn() -> StorageResult + Send + Clone + 'static, @@ -132,9 +123,9 @@ where move || { let height = height.clone(); let store_block = store_block.clone(); - let mut db = MockDatabase::default(); - db.expect_latest_block_height() - .returning(move || height().map(|v| v.map(Into::into))); + let mut db = MockDatabaseTransaction::default(); + db.expect_latest_block_root() + .returning(move || height().map(|v| v.map(u32_to_merkle_root))); db.expect_store_new_block() .returning(move |_, _| store_block()); db.expect_commit().times(commits).returning(|| Ok(())); @@ -169,7 +160,7 @@ fn execution_failure_error() -> Error { Error::FailedExecution(ExecutorError::InvalidBlockId) } -fn executor(result: R, database: MockDatabase) -> MockExecutor +fn executor(result: R) -> MockExecutor where R: Fn() -> ExecutorResult + Send + 'static, { @@ -188,7 +179,7 @@ where tx_status: vec![], events: vec![], }, - StorageTransaction::new(database), + Default::default(), )) }); @@ -220,42 +211,42 @@ where #[test_case( genesis(0), underlying_db(ok(None)), - executor_db(ok(None), ok(true), 1) + db_transaction(ok(None), ok(true), 1) => Ok(()); "successfully imports genesis block when latest block not found" )] #[test_case( genesis(113), underlying_db(ok(None)), - executor_db(ok(None), ok(true), 1) + db_transaction(ok(None), ok(true), 1) => Ok(()); "successfully imports block at arbitrary height when executor db expects it and last block not found" )] #[test_case( genesis(0), underlying_db(storage_failure), - executor_db(ok(Some(0)), ok(true), 0) + db_transaction(ok(Some(0)), ok(true), 0) => Err(storage_failure_error()); "fails to import genesis when underlying database fails" )] #[test_case( genesis(0), underlying_db(ok(Some(0))), - executor_db(ok(Some(0)), ok(true), 0) + db_transaction(ok(Some(0)), ok(true), 0) => Err(Error::InvalidUnderlyingDatabaseGenesisState); "fails to import genesis block when already exists" )] #[test_case( genesis(1), underlying_db(ok(None)), - executor_db(ok(Some(0)), ok(true), 0) - => Err(Error::InvalidDatabaseStateAfterExecution(None, Some(0u32.into()))); + db_transaction(ok(Some(0)), ok(true), 0) + => Err(Error::InvalidDatabaseStateAfterExecution(None, Some(u32_to_merkle_root(0)))); "fails to import genesis block when next height is not 0" )] #[test_case( genesis(0), underlying_db(ok(None)), - executor_db(ok(None), ok(false), 0) + db_transaction(ok(None), ok(false), 0) => Err(Error::NotUnique(0u32.into())); "fails to import genesis block when block exists for height 0" )] @@ -263,72 +254,72 @@ where async fn commit_result_genesis( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, - executor_db: impl Fn() -> MockDatabase, + db_transaction: impl Fn() -> MockDatabaseTransaction, ) -> Result<(), Error> { - commit_result_assert(sealed_block, underlying_db(), executor_db()).await + commit_result_assert(sealed_block, underlying_db(), db_transaction()).await } //////////////////////////// PoA Block //////////////////////////// #[test_case( poa_block(1), underlying_db(ok(Some(0))), - executor_db(ok(Some(0)), ok(true), 1) + db_transaction(ok(Some(0)), ok(true), 1) => Ok(()); "successfully imports block at height 1 when latest block is genesis" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(true), 1) + db_transaction(ok(Some(112)), ok(true), 1) => Ok(()); "successfully imports block at arbitrary height when latest block height is one fewer and executor db expects it" )] #[test_case( poa_block(0), underlying_db(ok(Some(0))), - executor_db(ok(Some(1)), ok(true), 0) + db_transaction(ok(Some(1)), ok(true), 0) => Err(Error::ZeroNonGenericHeight); "fails to import PoA block with height 0" )] #[test_case( poa_block(113), underlying_db(ok(Some(111))), - executor_db(ok(Some(113)), ok(true), 0) + db_transaction(ok(Some(113)), ok(true), 0) => Err(Error::IncorrectBlockHeight(112u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 111" )] #[test_case( poa_block(113), underlying_db(ok(Some(114))), - executor_db(ok(Some(113)), ok(true), 0) + db_transaction(ok(Some(113)), ok(true), 0) => Err(Error::IncorrectBlockHeight(115u32.into(), 113u32.into())); "fails to import block at height 113 when latest block height is 114" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(114)), ok(true), 0) - => Err(Error::InvalidDatabaseStateAfterExecution(Some(112u32.into()), Some(114u32.into()))); + db_transaction(ok(Some(114)), ok(true), 0) + => Err(Error::InvalidDatabaseStateAfterExecution(Some(u32_to_merkle_root(112u32)), Some(u32_to_merkle_root(114u32)))); "fails to import block 113 when executor db expects height 114" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(storage_failure, ok(true), 0) + db_transaction(storage_failure, ok(true), 0) => Err(storage_failure_error()); "fails to import block when executor db fails to find latest block" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), ok(false), 0) + db_transaction(ok(Some(112)), ok(false), 0) => Err(Error::NotUnique(113u32.into())); "fails to import block when block exists" )] #[test_case( poa_block(113), underlying_db(ok(Some(112))), - executor_db(ok(Some(112)), storage_failure, 0) + db_transaction(ok(Some(112)), storage_failure, 0) => Err(storage_failure_error()); "fails to import block when executor db fails to find block" )] @@ -336,17 +327,21 @@ async fn commit_result_genesis( async fn commit_result_and_execute_and_commit_poa( sealed_block: SealedBlock, underlying_db: impl Fn() -> MockDatabase, - executor_db: impl Fn() -> MockDatabase, + db_transaction: impl Fn() -> MockDatabaseTransaction, ) -> Result<(), Error> { // `execute_and_commit` and `commit_result` should have the same // validation rules(-> test cases) during committing the result. let height = *sealed_block.entity.header().height(); + let transaction = db_transaction(); let commit_result = - commit_result_assert(sealed_block.clone(), underlying_db(), executor_db()).await; + commit_result_assert(sealed_block.clone(), underlying_db(), transaction).await; + let transaction = db_transaction(); + let mut db = underlying_db(); + db.expect_storage_transaction().return_once(|_| transaction); let execute_and_commit_result = execute_and_commit_assert( sealed_block, - underlying_db(), - executor(ok(ex_result(height.into(), 0)), executor_db()), + db, + executor(ok(ex_result(height.into(), 0))), verifier(ok(())), ) .await; @@ -356,14 +351,17 @@ async fn commit_result_and_execute_and_commit_poa( async fn commit_result_assert( sealed_block: SealedBlock, - underlying_db: MockDatabase, - executor_db: MockDatabase, + mut underlying_db: MockDatabase, + db_transaction: MockDatabaseTransaction, ) -> Result<(), Error> { + underlying_db + .expect_storage_transaction() + .return_once(|_| db_transaction); let expected_to_broadcast = sealed_block.clone(); let importer = Importer::new(Default::default(), underlying_db, (), ()); let uncommitted_result = UncommittedResult::new( ImportResult::new_from_local(sealed_block, vec![], vec![]), - StorageTransaction::new(executor_db), + Default::default(), ); let mut imported_blocks = importer.subscribe(); @@ -411,10 +409,8 @@ async fn execute_and_commit_assert( #[tokio::test] async fn commit_result_fail_when_locked() { let importer = Importer::new(Default::default(), MockDatabase::default(), (), ()); - let uncommitted_result = UncommittedResult::new( - ImportResult::default(), - StorageTransaction::new(MockDatabase::default()), - ); + let uncommitted_result = + UncommittedResult::new(ImportResult::default(), Default::default()); let _guard = importer.lock(); assert_eq!( @@ -514,13 +510,14 @@ where // databases to always pass the committing part. let expected_height: u32 = (*sealed_block.entity.header().height()).into(); let previous_height = expected_height.checked_sub(1).unwrap_or_default(); + let mut db = underlying_db(ok(Some(previous_height)))(); + db.expect_storage_transaction().return_once(move |_| { + db_transaction(ok(Some(previous_height)), ok(true), commits)() + }); let execute_and_commit_result = execute_and_commit_assert( sealed_block, - underlying_db(ok(Some(previous_height)))(), - executor( - block_after_execution, - executor_db(ok(Some(previous_height)), ok(true), commits)(), - ), + db, + executor(block_after_execution), verifier(verifier_result), ) .await; @@ -540,7 +537,7 @@ where let importer = Importer::new( Default::default(), MockDatabase::default(), - executor(block_after_execution, MockDatabase::default()), + executor(block_after_execution), verifier(verifier_result), ); @@ -552,7 +549,7 @@ fn verify_and_execute_allowed_when_locked() { let importer = Importer::new( Default::default(), MockDatabase::default(), - executor(ok(ex_result(13, 0)), MockDatabase::default()), + executor(ok(ex_result(13, 0))), verifier(ok(())), ); diff --git a/crates/services/importer/src/ports.rs b/crates/services/importer/src/ports.rs index a8947ab8c3f..c4a3c809f43 100644 --- a/crates/services/importer/src/ports.rs +++ b/crates/services/importer/src/ports.rs @@ -1,6 +1,26 @@ use fuel_core_storage::{ - transactional::StorageTransaction, + column::Column, + kv_store::KeyValueInspect, + tables::{ + merkle::{ + DenseMetadataKey, + FuelBlockMerkleMetadata, + }, + FuelBlocks, + SealedBlockConsensus, + Transactions, + }, + transactional::{ + Changes, + ConflictPolicy, + Modifiable, + StorageTransaction, + WriteTransaction, + }, + MerkleRoot, Result as StorageResult, + StorageAsMut, + StorageAsRef, }; use fuel_core_types::{ blockchain::{ @@ -8,6 +28,7 @@ use fuel_core_types::{ consensus::Consensus, SealedBlock, }, + fuel_tx::UniqueIdentifier, fuel_types::{ BlockHeight, ChainId, @@ -21,25 +42,40 @@ use fuel_core_types::{ #[cfg_attr(test, mockall::automock(type Database = crate::importer::test::MockDatabase;))] /// The executors port. pub trait Executor: Send + Sync { - /// The database used by the executor. - type Database: ExecutorDatabase; - /// Executes the block and returns the result of execution with uncommitted database /// transaction. fn execute_without_commit( &self, block: Block, - ) -> ExecutorResult>>; + ) -> ExecutorResult>; +} + +/// The trait indicates that the type supports storage transactions. +pub trait Transactional { + /// The type of the storage transaction; + type Transaction<'a>: DatabaseTransaction + where + Self: 'a; + + /// Returns the storage transaction based on the `Changes`. + fn storage_transaction(&mut self, changes: Changes) -> Self::Transaction<'_>; } -/// The database port used by the block importer. -pub trait ImporterDatabase: Send + Sync { +/// The alias port used by the block importer. +pub trait ImporterDatabase: Transactional + Send + Sync { /// Returns the latest block height. fn latest_block_height(&self) -> StorageResult>; + + /// Returns the latest block root. + fn latest_block_root(&self) -> StorageResult>; } -/// The port for returned database from the executor. -pub trait ExecutorDatabase: ImporterDatabase { +/// The port of the storage transaction required by the importer. +#[cfg_attr(test, mockall::automock)] +pub trait DatabaseTransaction { + /// Returns the latest block root. + fn latest_block_root(&self) -> StorageResult>; + /// Inserts the `SealedBlock`. /// /// The method returns `true` if the block is a new, otherwise `false`. @@ -50,6 +86,9 @@ pub trait ExecutorDatabase: ImporterDatabase { chain_id: &ChainId, block: &SealedBlock, ) -> StorageResult; + + /// Commits the changes to the underlying storage. + fn commit(self) -> StorageResult<()>; } #[cfg_attr(test, mockall::automock)] @@ -66,3 +105,60 @@ pub trait BlockVerifier: Send + Sync { block: &Block, ) -> anyhow::Result<()>; } + +impl Transactional for S +where + S: KeyValueInspect + Modifiable, +{ + type Transaction<'a> = StorageTransaction<&'a mut S> where Self: 'a; + + fn storage_transaction(&mut self, changes: Changes) -> Self::Transaction<'_> { + self.write_transaction() + .with_changes(changes) + .with_policy(ConflictPolicy::Fail) + } +} + +impl DatabaseTransaction for StorageTransaction +where + S: KeyValueInspect + Modifiable, +{ + fn latest_block_root(&self) -> StorageResult> { + Ok(self + .storage_as_ref::() + .get(&DenseMetadataKey::Latest)? + .map(|cow| *cow.root())) + } + + fn store_new_block( + &mut self, + chain_id: &ChainId, + block: &SealedBlock, + ) -> StorageResult { + let mut storage = self.write_transaction(); + let height = block.entity.header().height(); + let mut found = storage + .storage_as_mut::() + .insert(height, &block.entity.compress(chain_id))? + .is_some(); + found |= storage + .storage_as_mut::() + .insert(height, &block.consensus)? + .is_some(); + + // TODO: Use `batch_insert` from https://github.com/FuelLabs/fuel-core/pull/1576 + for tx in block.entity.transactions() { + found |= storage + .storage_as_mut::() + .insert(&tx.id(chain_id), tx)? + .is_some(); + } + storage.commit()?; + Ok(!found) + } + + fn commit(self) -> StorageResult<()> { + self.commit()?; + Ok(()) + } +} diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 8b28e6f27f4..f9be6da2af2 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -895,8 +895,8 @@ pub mod tests { type Height = BlockHeight; - fn latest_height(&self) -> Self::Height { - BlockHeight::default() + fn latest_height(&self) -> Option { + Some(BlockHeight::default()) } fn view_at(&self, _: &BlockHeight) -> StorageResult { @@ -1024,8 +1024,8 @@ pub mod tests { type Height = BlockHeight; - fn latest_height(&self) -> Self::Height { - BlockHeight::default() + fn latest_height(&self) -> Option { + Some(BlockHeight::default()) } fn view_at(&self, _: &BlockHeight) -> StorageResult { diff --git a/crates/services/producer/src/block_producer.rs b/crates/services/producer/src/block_producer.rs index 11693baea57..7326b647d21 100644 --- a/crates/services/producer/src/block_producer.rs +++ b/crates/services/producer/src/block_producer.rs @@ -9,7 +9,7 @@ use anyhow::{ }; use fuel_core_storage::transactional::{ AtomicView, - StorageTransaction, + Changes, }; use fuel_core_types::{ blockchain::{ @@ -44,6 +44,8 @@ mod tests; #[derive(Debug, derive_more::Display)] pub enum Error { + #[display(fmt = "Genesis block is absent")] + NoGenesisBlock, #[display( fmt = "The block height {height} should be higher than the previous block height {previous_block}" )] @@ -85,15 +87,15 @@ where ViewProvider::View: BlockProducerDatabase, { /// Produces and execute block for the specified height. - async fn produce_and_execute( + async fn produce_and_execute( &self, height: BlockHeight, block_time: Tai64, tx_source: impl FnOnce(BlockHeight) -> TxSource, max_gas: Word, - ) -> anyhow::Result>> + ) -> anyhow::Result> where - Executor: ports::Executor + 'static, + Executor: ports::Executor + 'static, { // - get previous block info (hash, root, etc) // - select best da_height from relayer @@ -132,13 +134,12 @@ where } } -impl - Producer +impl Producer where ViewProvider: AtomicView + 'static, ViewProvider::View: BlockProducerDatabase, TxPool: ports::TxPool + 'static, - Executor: ports::Executor + 'static, + Executor: ports::Executor + 'static, { /// Produces and execute block for the specified height with transactions from the `TxPool`. pub async fn produce_and_execute_block_txpool( @@ -146,7 +147,7 @@ where height: BlockHeight, block_time: Tai64, max_gas: Word, - ) -> anyhow::Result>> { + ) -> anyhow::Result> { self.produce_and_execute( height, block_time, @@ -157,11 +158,11 @@ where } } -impl Producer +impl Producer where ViewProvider: AtomicView + 'static, ViewProvider::View: BlockProducerDatabase, - Executor: ports::Executor, Database = ExecutorDB> + 'static, + Executor: ports::Executor> + 'static, { /// Produces and execute block for the specified height with `transactions`. pub async fn produce_and_execute_block_transactions( @@ -170,7 +171,7 @@ where block_time: Tai64, transactions: Vec, max_gas: Word, - ) -> anyhow::Result>> { + ) -> anyhow::Result> { self.produce_and_execute(height, block_time, |_| transactions, max_gas) .await } @@ -195,6 +196,7 @@ where let height = height.unwrap_or_else(|| { self.view_provider .latest_height() + .unwrap_or_default() .succ() .expect("It is impossible to overflow the current block height") }); @@ -298,7 +300,10 @@ where &self, height: BlockHeight, ) -> anyhow::Result { - let latest_height = self.view_provider.latest_height(); + let latest_height = self + .view_provider + .latest_height() + .ok_or(Error::NoGenesisBlock)?; // block 0 is reserved for genesis if height <= latest_height { Err(Error::BlockHeightShouldBeHigherThanPrevious { diff --git a/crates/services/producer/src/mocks.rs b/crates/services/producer/src/mocks.rs index 5ae5740db44..1409bbabc6b 100644 --- a/crates/services/producer/src/mocks.rs +++ b/crates/services/producer/src/mocks.rs @@ -8,8 +8,7 @@ use fuel_core_storage::{ not_found, transactional::{ AtomicView, - StorageTransaction, - Transaction, + Changes, }, Result as StorageResult, }; @@ -87,12 +86,6 @@ struct DatabaseTransaction { database: MockDb, } -impl Transaction for DatabaseTransaction { - fn commit(&mut self) -> StorageResult<()> { - Ok(()) - } -} - impl AsMut for DatabaseTransaction { fn as_mut(&mut self) -> &mut MockDb { &mut self.database @@ -105,12 +98,6 @@ impl AsRef for DatabaseTransaction { } } -impl Transaction for MockDb { - fn commit(&mut self) -> StorageResult<()> { - Ok(()) - } -} - impl AsMut for MockDb { fn as_mut(&mut self) -> &mut MockDb { self @@ -133,12 +120,10 @@ fn to_block(component: Components>) -> Block { } impl Executor> for MockExecutor { - type Database = MockDb; - fn execute_without_commit( &self, component: Components>, - ) -> ExecutorResult>> { + ) -> ExecutorResult> { let block = to_block(component); // simulate executor inserting a block let mut block_db = self.0.blocks.lock().unwrap(); @@ -153,7 +138,7 @@ impl Executor> for MockExecutor { tx_status: vec![], events: vec![], }, - StorageTransaction::new(self.0.clone()), + Default::default(), )) } } @@ -161,12 +146,10 @@ impl Executor> for MockExecutor { pub struct FailingMockExecutor(pub Mutex>); impl Executor> for FailingMockExecutor { - type Database = MockDb; - fn execute_without_commit( &self, component: Components>, - ) -> ExecutorResult>> { + ) -> ExecutorResult> { // simulate an execution failure let mut err = self.0.lock().unwrap(); if let Some(err) = err.take() { @@ -180,7 +163,7 @@ impl Executor> for FailingMockExecutor { tx_status: vec![], events: vec![], }, - StorageTransaction::new(MockDb::default()), + Default::default(), )) } } @@ -196,10 +179,10 @@ impl AtomicView for MockDb { type Height = BlockHeight; - fn latest_height(&self) -> BlockHeight { + fn latest_height(&self) -> Option { let blocks = self.blocks.lock().unwrap(); - blocks.keys().max().cloned().unwrap_or_default() + blocks.keys().max().cloned() } fn view_at(&self, _: &BlockHeight) -> StorageResult { diff --git a/crates/services/producer/src/ports.rs b/crates/services/producer/src/ports.rs index 99fef9447c3..b1827e8c7a7 100644 --- a/crates/services/producer/src/ports.rs +++ b/crates/services/producer/src/ports.rs @@ -1,6 +1,6 @@ use async_trait::async_trait; use fuel_core_storage::{ - transactional::StorageTransaction, + transactional::Changes, Result as StorageResult, }; use fuel_core_types::{ @@ -56,15 +56,12 @@ pub trait Relayer: Send + Sync { } pub trait Executor: Send + Sync { - /// The database used by the executor. - type Database; - /// Executes the block and returns the result of execution with uncommitted database /// transaction. fn execute_without_commit( &self, component: Components, - ) -> ExecutorResult>>; + ) -> ExecutorResult>; } pub trait DryRunner: Send + Sync { diff --git a/crates/services/relayer/src/ports.rs b/crates/services/relayer/src/ports.rs index f1c1a2d8d81..77447023b34 100644 --- a/crates/services/relayer/src/ports.rs +++ b/crates/services/relayer/src/ports.rs @@ -32,3 +32,21 @@ pub trait RelayerDb: Send + Sync { /// Panics if height is not set as of initialization of database. fn get_finalized_da_height(&self) -> StorageResult; } + +/// The trait that should be implemented by the database transaction returned by the database. +#[cfg_attr(test, mockall::automock)] +pub trait DatabaseTransaction { + /// Commits the changes to the underlying storage. + fn commit(self) -> StorageResult<()>; +} + +/// The trait indicates that the type supports storage transactions. +pub trait Transactional { + /// The type of the storage transaction; + type Transaction<'a>: DatabaseTransaction + where + Self: 'a; + + /// Returns the storage transaction. + fn transaction(&mut self) -> Self::Transaction<'_>; +} diff --git a/crates/services/relayer/src/ports/tests.rs b/crates/services/relayer/src/ports/tests.rs index 50c9f11af52..57a2757124a 100644 --- a/crates/services/relayer/src/ports/tests.rs +++ b/crates/services/relayer/src/ports/tests.rs @@ -1,31 +1,68 @@ use crate::{ - ports::RelayerDb, + ports::{ + DatabaseTransaction, + MockDatabaseTransaction, + RelayerDb, + Transactional, + }, storage::{ DaHeightTable, EventsHistory, }, }; -use fuel_core_storage::test_helpers::MockStorage; +use fuel_core_storage::test_helpers::{ + MockBasic, + MockStorage, +}; use fuel_core_types::entities::message::Message; use std::borrow::Cow; use test_case::test_case; +type DBTx = MockStorage; +type ReturnDB = Box DBTx + Send + Sync>; + +impl DatabaseTransaction for DBTx { + fn commit(self) -> fuel_core_storage::Result<()> { + self.data.commit() + } +} + +type MockDatabase = MockStorage; + +impl Transactional for MockDatabase { + type Transaction<'a> = DBTx; + + fn transaction(&mut self) -> Self::Transaction<'_> { + (self.data)() + } +} + #[test] fn test_insert_events() { let same_height = 12; - let mut db = MockStorage::default(); - db.expect_insert::() - .times(1) - .returning(|_, _| Ok(None)); - db.expect_insert::() - .times(1) - .withf(move |_, v| **v == same_height) - .returning(|_, _| Ok(None)); - db.expect_commit().returning(|| Ok(())); - db.expect_get::() - .once() - .returning(|_| Ok(Some(std::borrow::Cow::Owned(9u64.into())))); - let mut db = db.into_transactional(); + let return_db_tx = move || { + let mut db = DBTx::default(); + db.storage + .expect_insert::() + .times(1) + .returning(|_, _| Ok(None)); + db.storage + .expect_insert::() + .times(1) + .withf(move |_, v| **v == same_height) + .returning(|_, _| Ok(None)); + db.data.expect_commit().returning(|| Ok(())); + db.storage + .expect_get::() + .once() + .returning(|_| Ok(Some(std::borrow::Cow::Owned(9u64.into())))); + db + }; + + let mut db = MockDatabase { + data: Box::new(return_db_tx), + storage: Default::default(), + }; let mut m = Message::default(); m.set_amount(10); @@ -52,20 +89,29 @@ fn insert_always_raises_da_height_monotonically() { .map(Into::into) .collect(); - let mut db = MockStorage::default(); - db.expect_insert::() - .returning(|_, _| Ok(None)); - db.expect_insert::() - .once() - .withf(move |_, v| *v == same_height) - .returning(|_, _| Ok(None)); - db.expect_commit().returning(|| Ok(())); - db.expect_get::() - .once() - .returning(|_| Ok(None)); + let return_db_tx = move || { + let mut db = DBTx::default(); + db.storage + .expect_insert::() + .returning(|_, _| Ok(None)); + db.storage + .expect_insert::() + .once() + .withf(move |_, v| *v == same_height) + .returning(|_, _| Ok(None)); + db.data.expect_commit().returning(|| Ok(())); + db.storage + .expect_get::() + .once() + .returning(|_| Ok(None)); + db + }; // When - let mut db = db.into_transactional(); + let mut db = MockDatabase { + data: Box::new(return_db_tx), + storage: Default::default(), + }; let result = db.insert_events(&same_height, &events); // Then @@ -85,10 +131,12 @@ fn insert_fails_for_messages_with_different_height() { }) .collect(); - let db = MockStorage::default(); + let mut db = MockDatabase { + data: Box::new(DBTx::default), + storage: Default::default(), + }; // When - let mut db = db.into_transactional(); let result = db.insert_events(&last_height.into(), &events); // Then @@ -111,10 +159,11 @@ fn insert_fails_for_messages_same_height_but_on_different_height() { }) .collect(); - let db = MockStorage::default(); - // When - let mut db = db.into_transactional(); + let mut db = MockDatabase { + data: Box::new(DBTx::default), + storage: Default::default(), + }; let next_height = last_height + 1; let result = db.insert_events(&next_height.into(), &events); @@ -136,20 +185,30 @@ fn set_raises_da_height_monotonically( inserts: impl Into>, new_height: u64, ) { - let mut db = MockStorage::default(); - if let Some(h) = inserts.into() { - db.expect_insert::() + let inserts = inserts.into(); + let get = get.into(); + let return_db_tx = move || { + let mut db = DBTx::default(); + if let Some(h) = inserts { + db.storage + .expect_insert::() + .once() + .withf(move |_, v| **v == h) + .returning(|_, _| Ok(None)); + } + let get = get.map(|g| Cow::Owned(g.into())); + db.storage + .expect_get::() .once() - .withf(move |_, v| **v == h) - .returning(|_, _| Ok(None)); - } - let get = get.into().map(|g| Cow::Owned(g.into())); - db.expect_get::() - .once() - .returning(move |_| Ok(get.clone())); - db.expect_commit().returning(|| Ok(())); - - let mut db = db.into_transactional(); + .returning(move |_| Ok(get.clone())); + db.data.expect_commit().returning(|| Ok(())); + db + }; + + let mut db = MockDatabase { + data: Box::new(return_db_tx), + storage: Default::default(), + }; db.set_finalized_da_height_to_at_least(&new_height.into()) .unwrap(); } diff --git a/crates/services/relayer/src/service/get_logs.rs b/crates/services/relayer/src/service/get_logs.rs index 8ceb079f576..515def95fd9 100644 --- a/crates/services/relayer/src/service/get_logs.rs +++ b/crates/services/relayer/src/service/get_logs.rs @@ -95,6 +95,8 @@ where } } + // TODO: For https://github.com/FuelLabs/fuel-core/issues/451 we need to write each height + // (not only the last height), even if it's empty. if !inserted_last_height { database.insert_events(&last_height, &[])?; } diff --git a/crates/services/relayer/src/storage.rs b/crates/services/relayer/src/storage.rs index bbe50e530da..3ef5438d46f 100644 --- a/crates/services/relayer/src/storage.rs +++ b/crates/services/relayer/src/storage.rs @@ -1,6 +1,10 @@ //! The module provides definition and implementation of the relayer storage. -use crate::ports::RelayerDb; +use crate::ports::{ + DatabaseTransaction, + RelayerDb, + Transactional, +}; use fuel_core_storage::{ blueprint::plain::Plain, codec::{ @@ -9,12 +13,15 @@ use fuel_core_storage::{ }, kv_store::StorageColumn, structured_storage::TableWithBlueprint, - transactional::Transactional, + transactional::{ + Modifiable, + StorageTransaction, + }, Error as StorageError, Mappable, Result as StorageResult, StorageAsMut, - StorageAsRef, + StorageInspect, StorageMutate, }; use fuel_core_types::{ @@ -78,6 +85,7 @@ impl Mappable for DaHeightTable { /// changed from a unit value. const METADATA_KEY: () = (); +// TODO: Remove `DaHeightTable` and logic associated with it, since the height tracking is controlled by the database impl TableWithBlueprint for DaHeightTable { type Blueprint = Plain>; type Column = Column; @@ -108,12 +116,12 @@ impl TableWithBlueprint for EventsHistory { } } -impl RelayerDb for T +impl RelayerDb for T where T: Send + Sync, - T: Transactional, - T: StorageMutate, - Storage: StorageMutate + T: Transactional, + T: StorageInspect, + for<'a> T::Transaction<'a>: StorageMutate + StorageMutate, { fn insert_events( @@ -126,7 +134,6 @@ where // height. Also so that the messages are inserted atomically // with the height. let mut db_tx = self.transaction(); - let db = db_tx.as_mut(); for event in events { if da_height != &event.da_height() { @@ -134,9 +141,9 @@ where } } - db.storage::().insert(da_height, events)?; + db_tx.storage::().insert(da_height, events)?; - grow_monotonically(db, da_height)?; + grow_monotonically(&mut db_tx, da_height)?; db_tx.commit()?; // TODO: Think later about how to clean up the history of the relayer. // Since we don't have too much information on the relayer and it can be useful @@ -153,19 +160,30 @@ where // set atomically with the insertion based on the current // height. let mut db_tx = self.transaction(); - let db = db_tx.as_mut(); - grow_monotonically(db, height)?; + grow_monotonically(&mut db_tx, height)?; db_tx.commit()?; Ok(()) } fn get_finalized_da_height(&self) -> StorageResult { - Ok(*StorageAsRef::storage::(&self) + use fuel_core_storage::StorageAsRef; + Ok(*self + .storage::() .get(&METADATA_KEY)? .unwrap_or_default()) } } +impl DatabaseTransaction for StorageTransaction +where + S: Modifiable, +{ + fn commit(self) -> StorageResult<()> { + self.commit()?; + Ok(()) + } +} + fn grow_monotonically( s: &mut Storage, height: &DaBlockHeight, diff --git a/crates/services/src/service.rs b/crates/services/src/service.rs index 14cb155fa4e..76a0f31b000 100644 --- a/crates/services/src/service.rs +++ b/crates/services/src/service.rs @@ -3,6 +3,7 @@ use crate::state::{ StateWatcher, }; use anyhow::anyhow; +use core::ops::Deref; use fuel_core_metrics::{ future_tracker::FutureTracker, services::{ @@ -27,6 +28,14 @@ impl Clone for SharedMutex { } } +impl Deref for SharedMutex { + type Target = Shared>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + /// Used if services have no asynchronously shared data #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct EmptyShared; diff --git a/crates/services/txpool/src/mock_db.rs b/crates/services/txpool/src/mock_db.rs index 0da650294e5..bbaa5bc15e9 100644 --- a/crates/services/txpool/src/mock_db.rs +++ b/crates/services/txpool/src/mock_db.rs @@ -103,8 +103,8 @@ impl AtomicView for MockDBProvider { type Height = BlockHeight; - fn latest_height(&self) -> Self::Height { - BlockHeight::default() + fn latest_height(&self) -> Option { + Some(BlockHeight::default()) } fn view_at(&self, _: &BlockHeight) -> StorageResult { diff --git a/crates/storage/src/blueprint.rs b/crates/storage/src/blueprint.rs index 1afc45cf66c..c561254b11c 100644 --- a/crates/storage/src/blueprint.rs +++ b/crates/storage/src/blueprint.rs @@ -11,7 +11,8 @@ use crate::{ }, kv_store::{ BatchOperations, - KeyValueStore, + KeyValueInspect, + KeyValueMutate, }, Mappable, Result as StorageResult, @@ -31,43 +32,16 @@ pub mod sparse; /// /// The blueprint is responsible for encoding/decoding(usually it is done via `KeyCodec` and `ValueCodec`) /// the key and value and putting/extracting it to/from the storage. -pub trait Blueprint +pub trait BlueprintInspect where M: Mappable, - S: KeyValueStore, + S: KeyValueInspect, { /// The codec used to encode and decode storage key. type KeyCodec: Encode + Decode; /// The codec used to encode and decode storage value. type ValueCodec: Encode + Decode; - /// Puts the key-value pair into the storage. - fn put( - storage: &mut S, - key: &M::Key, - column: S::Column, - value: &M::Value, - ) -> StorageResult<()>; - - /// Puts the key-value pair into the storage and returns the old value. - fn replace( - storage: &mut S, - key: &M::Key, - column: S::Column, - value: &M::Value, - ) -> StorageResult>; - - /// Takes the value from the storage and returns it. - /// The value is removed from the storage. - fn take( - storage: &mut S, - key: &M::Key, - column: S::Column, - ) -> StorageResult>; - - /// Removes the value from the storage. - fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()>; - /// Checks if the value exists in the storage. fn exists(storage: &S, key: &M::Key, column: S::Column) -> StorageResult { let key_encoder = Self::KeyCodec::encode(key); @@ -103,9 +77,43 @@ where } } +/// It is an extension of the [`BlueprintInspect`] that allows mutating the storage. +pub trait BlueprintMutate: BlueprintInspect +where + M: Mappable, + S: KeyValueMutate, +{ + /// Puts the key-value pair into the storage. + fn put( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult<()>; + + /// Puts the key-value pair into the storage and returns the old value. + fn replace( + storage: &mut S, + key: &M::Key, + column: S::Column, + value: &M::Value, + ) -> StorageResult>; + + /// Takes the value from the storage and returns it. + /// The value is removed from the storage. + fn take( + storage: &mut S, + key: &M::Key, + column: S::Column, + ) -> StorageResult>; + + /// Removes the value from the storage. + fn delete(storage: &mut S, key: &M::Key, column: S::Column) -> StorageResult<()>; +} + /// It is an extension of the blueprint that allows supporting batch operations. /// Usually, they are more performant than initializing/inserting/removing values one by one. -pub trait SupportsBatching: Blueprint +pub trait SupportsBatching: BlueprintMutate where M: Mappable, S: BatchOperations, @@ -141,11 +149,11 @@ where } /// It is an extension of the blueprint that supporting creation of the Merkle tree over the storage. -pub trait SupportsMerkle: Blueprint +pub trait SupportsMerkle: BlueprintInspect where Key: ?Sized, M: Mappable, - S: KeyValueStore, + S: KeyValueInspect, { /// Returns the root of the Merkle tree. fn root(storage: &S, key: &Key) -> StorageResult; diff --git a/crates/storage/src/blueprint/merklized.rs b/crates/storage/src/blueprint/merklized.rs index 5583eced49e..81cd86d1842 100644 --- a/crates/storage/src/blueprint/merklized.rs +++ b/crates/storage/src/blueprint/merklized.rs @@ -4,7 +4,8 @@ use crate::{ blueprint::{ - Blueprint, + BlueprintInspect, + BlueprintMutate, SupportsBatching, SupportsMerkle, }, @@ -15,7 +16,8 @@ use crate::{ }, kv_store::{ BatchOperations, - KeyValueStore, + KeyValueInspect, + KeyValueMutate, }, not_found, structured_storage::StructuredStorage, @@ -53,7 +55,11 @@ impl where Nodes: Mappable, { - fn insert_into_tree(storage: &mut S, key: K, value: &V) -> StorageResult<()> + fn insert_into_tree( + mut storage: &mut S, + key: K, + value: &V, + ) -> StorageResult<()> where V: ?Sized, Metadata: Mappable< @@ -61,11 +67,12 @@ where Value = DenseMerkleMetadata, OwnedValue = DenseMerkleMetadata, >, + S: StorageMutate + + StorageMutate, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, Encoder: Encode, { - let mut storage = StructuredStorage::new(storage); // Get latest metadata entry let prev_metadata = storage .storage::() @@ -99,7 +106,7 @@ where fn remove(storage: &mut S, key: &[u8], column: S::Column) -> StorageResult<()> where - S: KeyValueStore, + S: KeyValueMutate, { if storage.exists(key, column)? { Err(anyhow::anyhow!( @@ -112,11 +119,23 @@ where } } -impl Blueprint +impl BlueprintInspect for Merklized where M: Mappable, - S: KeyValueStore, + S: KeyValueInspect, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, +{ + type KeyCodec = KeyCodec; + type ValueCodec = ValueCodec; +} + +impl BlueprintMutate + for Merklized +where + M: Mappable, + S: KeyValueMutate, KeyCodec: Encode + Decode, ValueCodec: Encode + Decode, Encoder: Encode, @@ -127,12 +146,11 @@ where OwnedValue = DenseMerkleMetadata, >, Nodes: Mappable, + S: StorageMutate + + StorageMutate, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, { - type KeyCodec = KeyCodec; - type ValueCodec = ValueCodec; - fn put( storage: &mut S, key: &M::Key, @@ -200,19 +218,18 @@ impl SupportsMerkle where M: Mappable, - S: KeyValueStore, + S: KeyValueInspect, Metadata: Mappable< Key = DenseMetadataKey, OwnedKey = DenseMetadataKey, Value = DenseMerkleMetadata, OwnedValue = DenseMerkleMetadata, >, - Self: Blueprint, - for<'a> StructuredStorage<&'a S>: StorageInspect, + Self: BlueprintInspect, + S: StorageInspect, { fn root(storage: &S, key: &M::Key) -> StorageResult { use crate::StorageAsRef; - let storage = StructuredStorage::new(storage); let key = key.to_owned().into(); let metadata = storage .storage_as_ref::() @@ -237,6 +254,8 @@ where OwnedValue = DenseMerkleMetadata, >, Nodes: Mappable, + S: StorageMutate + + StorageMutate, for<'a> StructuredStorage<&'a mut S>: StorageMutate + StorageMutate, { @@ -260,7 +279,7 @@ where M::Value: 'a, { for (key, value) in set { - >::replace(storage, key, column, value)?; + >::replace(storage, key, column, value)?; } Ok(()) @@ -296,10 +315,8 @@ macro_rules! basic_merklelized_storage_tests { mod [< $table:snake _basic_tests >] { use super::*; use $crate::{ - structured_storage::{ - test::InMemoryStorage, - StructuredStorage, - }, + structured_storage::test::InMemoryStorage, + transactional::WriteTransaction, StorageAsMut, }; use $crate::StorageInspect; @@ -321,16 +338,16 @@ macro_rules! basic_merklelized_storage_tests { #[test] fn get() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); assert_eq!( - structured_storage + storage_transaction .storage_as_mut::<$table>() .get(&key) .expect("Should get without errors") @@ -343,15 +360,15 @@ macro_rules! basic_merklelized_storage_tests { #[test] fn insert() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); - let returned = structured_storage + let returned = storage_transaction .storage_as_mut::<$table>() .get(&key) .unwrap() @@ -363,15 +380,15 @@ macro_rules! basic_merklelized_storage_tests { #[test] fn remove_returns_error() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); - let result = structured_storage.storage_as_mut::<$table>().remove(&key); + let result = storage_transaction.storage_as_mut::<$table>().remove(&key); assert!(result.is_err()); } @@ -379,23 +396,23 @@ macro_rules! basic_merklelized_storage_tests { #[test] fn exists() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; // Given - assert!(!structured_storage + assert!(!storage_transaction .storage_as_mut::<$table>() .contains_key(&key) .unwrap()); // When - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); // Then - assert!(structured_storage + assert!(storage_transaction .storage_as_mut::<$table>() .contains_key(&key) .unwrap()); @@ -413,7 +430,7 @@ macro_rules! basic_merklelized_storage_tests { let empty_storage = InMemoryStorage::default(); let mut init_storage = InMemoryStorage::default(); - let mut init_structured_storage = StructuredStorage::new(&mut init_storage); + let mut init_structured_storage = init_storage.write_transaction(); let mut rng = &mut StdRng::seed_from_u64(1234); let gen = || Some($random_key(&mut rng)); @@ -427,9 +444,10 @@ macro_rules! basic_merklelized_storage_tests { (k, value) }) ).expect("Should initialize the storage successfully"); + init_structured_storage.commit().expect("Should commit the storage"); let mut insert_storage = InMemoryStorage::default(); - let mut insert_structured_storage = StructuredStorage::new(&mut insert_storage); + let mut insert_structured_storage = insert_storage.write_transaction(); <_ as $crate::StorageBatchMutate<$table>>::insert_batch( &mut insert_structured_storage, @@ -438,6 +456,7 @@ macro_rules! basic_merklelized_storage_tests { (k, value) }) ).expect("Should insert batch successfully"); + insert_structured_storage.commit().expect("Should commit the storage"); assert_eq!(init_storage, insert_storage); assert_ne!(init_storage, empty_storage); @@ -454,7 +473,7 @@ macro_rules! basic_merklelized_storage_tests { }; let mut init_storage = InMemoryStorage::default(); - let mut init_structured_storage = StructuredStorage::new(&mut init_storage); + let mut init_structured_storage = init_storage.write_transaction(); let mut rng = &mut StdRng::seed_from_u64(1234); let gen = || Some($random_key(&mut rng)); @@ -480,9 +499,9 @@ macro_rules! basic_merklelized_storage_tests { #[test] fn root_returns_error_empty_metadata() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); - let root = structured_storage + let root = storage_transaction .storage_as_mut::<$table>() .root(&$key); assert!(root.is_err()) @@ -491,15 +510,15 @@ macro_rules! basic_merklelized_storage_tests { #[test] fn update_produces_non_zero_root() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let mut rng = rand::rngs::StdRng::seed_from_u64(1234); let key = $random_key(&mut rng); let value = $value_insert; - structured_storage.storage_as_mut::<$table>().insert(&key, &value) + storage_transaction.storage_as_mut::<$table>().insert(&key, &value) .unwrap(); - let root = structured_storage.storage_as_mut::<$table>().root(&key) + let root = storage_transaction.storage_as_mut::<$table>().root(&key) .expect("Should get the root"); let empty_root = fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new().root(); assert_ne!(root, empty_root); @@ -508,7 +527,7 @@ macro_rules! basic_merklelized_storage_tests { #[test] fn has_different_root_after_each_update() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let mut rng = rand::rngs::StdRng::seed_from_u64(1234); @@ -517,10 +536,10 @@ macro_rules! basic_merklelized_storage_tests { for _ in 0..10 { let key = $random_key(&mut rng); let value = $value_insert; - structured_storage.storage_as_mut::<$table>().insert(&key, &value) + storage_transaction.storage_as_mut::<$table>().insert(&key, &value) .unwrap(); - let root = structured_storage.storage_as_mut::<$table>().root(&key) + let root = storage_transaction.storage_as_mut::<$table>().root(&key) .expect("Should get the root"); assert_ne!(root, prev_root); prev_root = root; diff --git a/crates/storage/src/blueprint/plain.rs b/crates/storage/src/blueprint/plain.rs index 51a5dcc7d31..509fc1e9dab 100644 --- a/crates/storage/src/blueprint/plain.rs +++ b/crates/storage/src/blueprint/plain.rs @@ -5,7 +5,8 @@ use crate::{ blueprint::{ - Blueprint, + BlueprintInspect, + BlueprintMutate, SupportsBatching, }, codec::{ @@ -15,7 +16,8 @@ use crate::{ }, kv_store::{ BatchOperations, - KeyValueStore, + KeyValueInspect, + KeyValueMutate, StorageColumn, WriteOperation, }, @@ -31,16 +33,24 @@ pub struct Plain { _marker: core::marker::PhantomData<(KeyCodec, ValueCodec)>, } -impl Blueprint for Plain +impl BlueprintInspect for Plain where M: Mappable, - S: KeyValueStore, + S: KeyValueInspect, KeyCodec: Encode + Decode, ValueCodec: Encode + Decode, { type KeyCodec = KeyCodec; type ValueCodec = ValueCodec; +} +impl BlueprintMutate for Plain +where + M: Mappable, + S: KeyValueMutate, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, +{ fn put( storage: &mut S, key: &M::Key, @@ -97,9 +107,8 @@ impl SupportsBatching where Column: StorageColumn, S: BatchOperations, - M: Mappable - + TableWithBlueprint, Column = Column>, - M::Blueprint: Blueprint, + M: TableWithBlueprint, Column = Column>, + M::Blueprint: BlueprintMutate, { fn init<'a, Iter>(storage: &mut S, column: S::Column, set: Iter) -> StorageResult<()> where @@ -120,13 +129,19 @@ where M::Key: 'a, M::Value: 'a, { - storage.batch_write(&mut set.map(|(key, value)| { - let key_encoder = >::KeyCodec::encode(key); - let key_bytes = key_encoder.as_bytes().to_vec(); - let value = - >::ValueCodec::encode_as_value(value); - (key_bytes, column, WriteOperation::Insert(value)) - })) + storage.batch_write( + column, + set.map(|(key, value)| { + let key_encoder = + >::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes().to_vec(); + let value = + >::ValueCodec::encode_as_value( + value, + ); + (key_bytes, WriteOperation::Insert(value)) + }), + ) } fn remove<'a, Iter>( @@ -138,11 +153,15 @@ where Iter: 'a + Iterator, M::Key: 'a, { - storage.batch_write(&mut set.map(|key| { - let key_encoder = >::KeyCodec::encode(key); - let key_bytes = key_encoder.as_bytes().to_vec(); - (key_bytes, column, WriteOperation::Remove) - })) + storage.batch_write( + column, + set.map(|key| { + let key_encoder = + >::KeyCodec::encode(key); + let key_bytes = key_encoder.as_bytes().to_vec(); + (key_bytes, WriteOperation::Remove) + }), + ) } } @@ -158,10 +177,8 @@ macro_rules! basic_storage_tests { mod [< $table:snake _basic_tests >] { use super::*; use $crate::{ - structured_storage::{ - test::InMemoryStorage, - StructuredStorage, - }, + structured_storage::test::InMemoryStorage, + transactional::WriteTransaction, StorageAsMut, }; use $crate::StorageInspect; @@ -181,16 +198,16 @@ macro_rules! basic_storage_tests { #[test] fn get() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); assert_eq!( - structured_storage + storage_transaction .storage_as_mut::<$table>() .get(&key) .expect("Should get without errors") @@ -203,15 +220,15 @@ macro_rules! basic_storage_tests { #[test] fn insert() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); - let returned = structured_storage + let returned = storage_transaction .storage_as_mut::<$table>() .get(&key) .unwrap() @@ -223,17 +240,17 @@ macro_rules! basic_storage_tests { #[test] fn remove() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); - structured_storage.storage_as_mut::<$table>().remove(&key).unwrap(); + storage_transaction.storage_as_mut::<$table>().remove(&key).unwrap(); - assert!(!structured_storage + assert!(!storage_transaction .storage_as_mut::<$table>() .contains_key(&key) .unwrap()); @@ -242,23 +259,23 @@ macro_rules! basic_storage_tests { #[test] fn exists() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; // Given - assert!(!structured_storage + assert!(!storage_transaction .storage_as_mut::<$table>() .contains_key(&key) .unwrap()); // When - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); // Then - assert!(structured_storage + assert!(storage_transaction .storage_as_mut::<$table>() .contains_key(&key) .unwrap()); @@ -267,23 +284,23 @@ macro_rules! basic_storage_tests { #[test] fn exists_false_after_removing() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let key = $key; // Given - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &$value_insert) .unwrap(); // When - structured_storage + storage_transaction .storage_as_mut::<$table>() .remove(&key) .unwrap(); // Then - assert!(!structured_storage + assert!(!storage_transaction .storage_as_mut::<$table>() .contains_key(&key) .unwrap()); @@ -301,7 +318,7 @@ macro_rules! basic_storage_tests { let empty_storage = InMemoryStorage::default(); let mut init_storage = InMemoryStorage::default(); - let mut init_structured_storage = StructuredStorage::new(&mut init_storage); + let mut init_structured_storage = init_storage.write_transaction(); let mut rng = &mut StdRng::seed_from_u64(1234); let gen = || Some($random_key(&mut rng)); @@ -315,9 +332,10 @@ macro_rules! basic_storage_tests { (k, value) }) ).expect("Should initialize the storage successfully"); + init_structured_storage.commit().expect("Should commit changes"); let mut insert_storage = InMemoryStorage::default(); - let mut insert_structured_storage = StructuredStorage::new(&mut insert_storage); + let mut insert_structured_storage = insert_storage.write_transaction(); <_ as $crate::StorageBatchMutate<$table>>::insert_batch( &mut insert_structured_storage, @@ -326,24 +344,27 @@ macro_rules! basic_storage_tests { (k, value) }) ).expect("Should insert batch successfully"); + insert_structured_storage.commit().expect("Should commit changes"); assert_eq!(init_storage, insert_storage); assert_ne!(init_storage, empty_storage); assert_ne!(insert_storage, empty_storage); - let mut remove_from_insert_structured_storage = StructuredStorage::new(&mut insert_storage); + let mut remove_from_insert_structured_storage = insert_storage.write_transaction(); <_ as $crate::StorageBatchMutate<$table>>::remove_batch( &mut remove_from_insert_structured_storage, &mut data.iter() ).expect("Should remove all entries successfully from insert storage"); + remove_from_insert_structured_storage.commit().expect("Should commit changes"); assert_ne!(init_storage, insert_storage); assert_eq!(insert_storage, empty_storage); - let mut remove_from_init_structured_storage = StructuredStorage::new(&mut init_storage); + let mut remove_from_init_structured_storage = init_storage.write_transaction(); <_ as $crate::StorageBatchMutate<$table>>::remove_batch( &mut remove_from_init_structured_storage, &mut data.iter() ).expect("Should remove all entries successfully from init storage"); + remove_from_init_structured_storage.commit().expect("Should commit changes"); assert_eq!(init_storage, insert_storage); assert_eq!(init_storage, empty_storage); } diff --git a/crates/storage/src/blueprint/sparse.rs b/crates/storage/src/blueprint/sparse.rs index 9bcaaeaf124..0e77cf4a8cc 100644 --- a/crates/storage/src/blueprint/sparse.rs +++ b/crates/storage/src/blueprint/sparse.rs @@ -5,7 +5,8 @@ use crate::{ blueprint::{ - Blueprint, + BlueprintInspect, + BlueprintMutate, SupportsBatching, SupportsMerkle, }, @@ -16,14 +17,12 @@ use crate::{ }, kv_store::{ BatchOperations, - KeyValueStore, + KeyValueInspect, + KeyValueMutate, StorageColumn, WriteOperation, }, - structured_storage::{ - StructuredStorage, - TableWithBlueprint, - }, + structured_storage::TableWithBlueprint, tables::merkle::SparseMerkleMetadata, Error as StorageError, Mappable, @@ -91,11 +90,10 @@ where ) -> StorageResult<()> where K: ?Sized, - for<'a> StructuredStorage<&'a mut S>: StorageMutate + S: StorageMutate + StorageMutate, KeyConverter: PrimaryKey, { - let mut storage = StructuredStorage::new(storage); let primary_key = KeyConverter::primary_key(key); // Get latest metadata entry for this `primary_key` let prev_metadata: Cow = storage @@ -104,7 +102,7 @@ where .unwrap_or_default(); let root = *prev_metadata.root(); - let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + let mut tree: MerkleTree = MerkleTree::load(storage, &root) .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; tree.update(MerkleTreeKey::new(key_bytes), value_bytes) @@ -112,6 +110,7 @@ where // Generate new metadata for the updated tree let root = tree.root(); + let storage = tree.into_storage(); let metadata = SparseMerkleMetadata::new(root); storage .storage::() @@ -126,11 +125,10 @@ where ) -> StorageResult<()> where K: ?Sized, - for<'a> StructuredStorage<&'a mut S>: StorageMutate + S: StorageMutate + StorageMutate, KeyConverter: PrimaryKey, { - let mut storage = StructuredStorage::new(storage); let primary_key = KeyConverter::primary_key(key); // Get latest metadata entry for this `primary_key` let prev_metadata: Option> = @@ -139,13 +137,14 @@ where if let Some(prev_metadata) = prev_metadata { let root = *prev_metadata.root(); - let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + let mut tree: MerkleTree = MerkleTree::load(storage, &root) .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; tree.delete(MerkleTreeKey::new(key_bytes)) .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; let root = tree.root(); + let storage = tree.into_storage(); if &root == MerkleTree::::empty_root() { // The tree is now empty; remove the metadata storage.storage::().remove(primary_key)?; @@ -162,11 +161,23 @@ where } } -impl Blueprint +impl BlueprintInspect for Sparse where M: Mappable, - S: KeyValueStore, + S: KeyValueInspect, + KeyCodec: Encode + Decode, + ValueCodec: Encode + Decode, +{ + type KeyCodec = KeyCodec; + type ValueCodec = ValueCodec; +} + +impl BlueprintMutate + for Sparse +where + M: Mappable, + S: KeyValueMutate, KeyCodec: Encode + Decode, ValueCodec: Encode + Decode, Metadata: Mappable, @@ -176,12 +187,9 @@ where OwnedValue = sparse::Primitive, >, KeyConverter: PrimaryKey, - for<'a> StructuredStorage<&'a mut S>: StorageMutate + S: StorageMutate + StorageMutate, { - type KeyCodec = KeyCodec; - type ValueCodec = ValueCodec; - fn put( storage: &mut S, key: &M::Key, @@ -245,14 +253,13 @@ impl for Sparse where M: Mappable, - S: KeyValueStore, + S: KeyValueInspect, Metadata: Mappable, - Self: Blueprint, - for<'a> StructuredStorage<&'a S>: StorageInspect, + Self: BlueprintInspect, + S: StorageInspect, { fn root(storage: &S, key: &Metadata::Key) -> StorageResult { use crate::StorageAsRef; - let storage = StructuredStorage::new(storage); let metadata: Option> = storage.storage_as_ref::().get(key)?; let root = metadata @@ -263,20 +270,19 @@ where } type NodeKeyCodec = - <::Blueprint as Blueprint>::KeyCodec; + <::Blueprint as BlueprintInspect>::KeyCodec; type NodeValueCodec = - <::Blueprint as Blueprint>::ValueCodec; + <::Blueprint as BlueprintInspect>::ValueCodec; impl SupportsBatching for Sparse where Column: StorageColumn, S: BatchOperations, - M: Mappable - + TableWithBlueprint< - Blueprint = Sparse, - Column = Column, - >, + M: TableWithBlueprint< + Blueprint = Sparse, + Column = Column, + >, KeyCodec: Encode + Decode, ValueCodec: Encode + Decode, Metadata: Mappable, @@ -286,8 +292,8 @@ where OwnedValue = sparse::Primitive, > + TableWithBlueprint, KeyConverter: PrimaryKey, - Nodes::Blueprint: Blueprint, - for<'a> StructuredStorage<&'a mut S>: StorageMutate + Nodes::Blueprint: BlueprintInspect, + S: StorageMutate + StorageMutate + StorageMutate, { @@ -306,8 +312,6 @@ where return Ok(()) } - let mut storage = StructuredStorage::new(storage); - if storage.storage::().contains_key(primary_key)? { return Err(anyhow::anyhow!( "The {} is already initialized", @@ -330,20 +334,21 @@ where .map(|(key, value)| (MerkleTreeKey::new(key), value)), ); - storage.as_mut().batch_write( - &mut encoded_set + storage.batch_write( + column, + encoded_set .into_iter() - .map(|(key, value)| (key, column, WriteOperation::Insert(value.into()))), + .map(|(key, value)| (key, WriteOperation::Insert(value.into()))), )?; - let mut nodes = nodes.iter().map(|(key, value)| { + let nodes = nodes.iter().map(|(key, value)| { let key = NodeKeyCodec::::encode(key) .as_bytes() .into_owned(); let value = NodeValueCodec::::encode_as_value(value); - (key, Nodes::column(), WriteOperation::Insert(value)) + (key, WriteOperation::Insert(value)) }); - storage.as_mut().batch_write(&mut nodes)?; + storage.batch_write(Nodes::column(), nodes)?; let metadata = SparseMerkleMetadata::new(root); storage @@ -372,14 +377,13 @@ where return Ok(()) } - let mut storage = StructuredStorage::new(storage); let prev_metadata: Cow = storage .storage::() .get(primary_key)? .unwrap_or_default(); let root = *prev_metadata.root(); - let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + let mut tree: MerkleTree = MerkleTree::load(storage, &root) .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; let encoded_set = set @@ -395,11 +399,13 @@ where .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; } let root = tree.root(); + let storage = tree.into_storage(); - storage.as_mut().batch_write( - &mut encoded_set + storage.batch_write( + column, + encoded_set .into_iter() - .map(|(key, value)| (key, column, WriteOperation::Insert(value.into()))), + .map(|(key, value)| (key, WriteOperation::Insert(value.into()))), )?; // Generate new metadata for the updated tree @@ -429,14 +435,13 @@ where return Ok(()) } - let mut storage = StructuredStorage::new(storage); let prev_metadata: Cow = storage .storage::() .get(primary_key)? .unwrap_or_default(); let root = *prev_metadata.root(); - let mut tree: MerkleTree = MerkleTree::load(&mut storage, &root) + let mut tree: MerkleTree = MerkleTree::load(storage, &root) .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; let encoded_set = set @@ -448,11 +453,13 @@ where .map_err(|err| StorageError::Other(anyhow::anyhow!("{err:?}")))?; } let root = tree.root(); + let storage = tree.into_storage(); - storage.as_mut().batch_write( - &mut encoded_set + storage.batch_write( + column, + encoded_set .into_iter() - .map(|key| (key, column, WriteOperation::Remove)), + .map(|key| (key, WriteOperation::Remove)), )?; if &root == MerkleTree::::empty_root() { @@ -480,10 +487,8 @@ macro_rules! root_storage_tests { mod [< $table:snake _root_tests >] { use super::*; use $crate::{ - structured_storage::{ - test::InMemoryStorage, - StructuredStorage, - }, + structured_storage::test::InMemoryStorage, + transactional::WriteTransaction, StorageAsMut, }; use $crate::rand::{ @@ -494,25 +499,25 @@ macro_rules! root_storage_tests { #[test] fn root() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let rng = &mut StdRng::seed_from_u64(1234); let key = $generate_key(&$current_key, rng); let value = $generate_value(rng); - structured_storage.storage_as_mut::<$table>().insert(&key, &value) + storage_transaction.storage_as_mut::<$table>().insert(&key, &value) .unwrap(); - let root = structured_storage.storage_as_mut::<$table>().root(&$current_key); + let root = storage_transaction.storage_as_mut::<$table>().root(&$current_key); assert!(root.is_ok()) } #[test] fn root_returns_empty_root_for_empty_metadata() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let empty_root = fuel_core_types::fuel_merkle::sparse::in_memory::MerkleTree::new().root(); - let root = structured_storage + let root = storage_transaction .storage_as_mut::<$table>() .root(&$current_key) .unwrap(); @@ -522,20 +527,20 @@ macro_rules! root_storage_tests { #[test] fn put_updates_the_state_merkle_root_for_the_given_metadata() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let rng = &mut StdRng::seed_from_u64(1234); let key = $generate_key(&$current_key, rng); let state = $generate_value(rng); // Write the first contract state - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &state) .unwrap(); // Read the first Merkle root - let root_1 = structured_storage + let root_1 = storage_transaction .storage_as_mut::<$table>() .root(&$current_key) .unwrap(); @@ -543,13 +548,13 @@ macro_rules! root_storage_tests { // Write the second contract state let key = $generate_key(&$current_key, rng); let state = $generate_value(rng); - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &state) .unwrap(); // Read the second Merkle root - let root_2 = structured_storage + let root_2 = storage_transaction .storage_as_mut::<$table>() .root(&$current_key) .unwrap(); @@ -560,18 +565,18 @@ macro_rules! root_storage_tests { #[test] fn remove_updates_the_state_merkle_root_for_the_given_metadata() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let rng = &mut StdRng::seed_from_u64(1234); // Write the first contract state let first_key = $generate_key(&$current_key, rng); let first_state = $generate_value(rng); - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&first_key, &first_state) .unwrap(); - let root_0 = structured_storage + let root_0 = storage_transaction .storage_as_mut::<$table>() .root(&$current_key) .unwrap(); @@ -579,22 +584,22 @@ macro_rules! root_storage_tests { // Write the second contract state let second_key = $generate_key(&$current_key, rng); let second_state = $generate_value(rng); - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&second_key, &second_state) .unwrap(); // Read the first Merkle root - let root_1 = structured_storage + let root_1 = storage_transaction .storage_as_mut::<$table>() .root(&$current_key) .unwrap(); // Remove the second contract state - structured_storage.storage_as_mut::<$table>().remove(&second_key).unwrap(); + storage_transaction.storage_as_mut::<$table>().remove(&second_key).unwrap(); // Read the second Merkle root - let root_2 = structured_storage + let root_2 = storage_transaction .storage_as_mut::<$table>() .root(&$current_key) .unwrap(); @@ -609,7 +614,7 @@ macro_rules! root_storage_tests { let foreign_primary_key = $foreign_key; let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let rng = &mut StdRng::seed_from_u64(1234); @@ -618,23 +623,23 @@ macro_rules! root_storage_tests { // Given let given_key = $generate_key(&given_primary_key, rng); let foreign_key = $generate_key(&foreign_primary_key, rng); - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&given_key, &state_value) .unwrap(); // When - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&foreign_key, &state_value) .unwrap(); - structured_storage + storage_transaction .storage_as_mut::<$table>() .remove(&foreign_key) .unwrap(); // Then - let result = structured_storage + let result = storage_transaction .storage_as_mut::<$table>() .insert(&given_key, &state_value) .unwrap(); @@ -645,7 +650,7 @@ macro_rules! root_storage_tests { #[test] fn put_creates_merkle_metadata_when_empty() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let rng = &mut StdRng::seed_from_u64(1234); @@ -654,13 +659,13 @@ macro_rules! root_storage_tests { let state = $generate_value(rng); // Write a contract state - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &state) .unwrap(); // Read the Merkle metadata - let metadata = structured_storage + let metadata = storage_transaction .storage_as_mut::<$metadata_table>() .get(&$current_key) .unwrap(); @@ -671,7 +676,7 @@ macro_rules! root_storage_tests { #[test] fn remove_deletes_merkle_metadata_when_empty() { let mut storage = InMemoryStorage::default(); - let mut structured_storage = StructuredStorage::new(&mut storage); + let mut storage_transaction = storage.write_transaction(); let rng = &mut StdRng::seed_from_u64(1234); @@ -680,23 +685,23 @@ macro_rules! root_storage_tests { let state = $generate_value(rng); // Write a contract state - structured_storage + storage_transaction .storage_as_mut::<$table>() .insert(&key, &state) .unwrap(); // Read the Merkle metadata - structured_storage + storage_transaction .storage_as_mut::<$metadata_table>() .get(&$current_key) .unwrap() .expect("Expected Merkle metadata to be present"); // Remove the contract asset - structured_storage.storage_as_mut::<$table>().remove(&key).unwrap(); + storage_transaction.storage_as_mut::<$table>().remove(&key).unwrap(); // Read the Merkle metadata - let metadata = structured_storage + let metadata = storage_transaction .storage_as_mut::<$metadata_table>() .get(&$current_key) .unwrap(); diff --git a/crates/storage/src/column.rs b/crates/storage/src/column.rs index 8f496aee5fc..695ef191e60 100644 --- a/crates/storage/src/column.rs +++ b/crates/storage/src/column.rs @@ -67,14 +67,6 @@ pub enum Column { /// Table for genesis state import progress tracking. GenesisMetadata = 19, - /// Table for coin roots during genesis state import. - GenesisCoinRoots = 20, - /// Table for message roots during genesis state import. - GenesisMessageRoots = 21, - /// Table for contract roots during genesis state import. - GenesisContractRoots = 22, - /// Table for contract ids during genesis state import. - GenesisContractIds = 23, } impl Column { diff --git a/crates/storage/src/iter.rs b/crates/storage/src/iter.rs index ee07b3a32d6..a66d6a3ff0a 100644 --- a/crates/storage/src/iter.rs +++ b/crates/storage/src/iter.rs @@ -1,8 +1,22 @@ //! The module defines primitives that allow iterating of the storage. -use crate::kv_store::{ - KVItem, - KeyValueStore, +use crate::{ + blueprint::BlueprintInspect, + codec::{ + Decode, + Encode, + Encoder, + }, + kv_store::{ + KVItem, + KeyValueInspect, + }, + structured_storage::TableWithBlueprint, +}; +use fuel_vm_private::fuel_storage::Mappable; +use std::{ + collections::BTreeMap, + sync::Arc, }; // TODO: BoxedIter to be used until RPITIT lands in stable rust. @@ -51,10 +65,11 @@ impl Default for IterDirection { } } -/// A trait for iterating over the storage of [`KeyValueStore`]. -pub trait IteratorableStore: KeyValueStore { +/// A trait for iterating over the storage of [`KeyValueInspect`]. +#[impl_tools::autoimpl(for &T, &mut T, Box, Arc)] +pub trait IterableStore: KeyValueInspect { /// Returns an iterator over the values in the storage. - fn iter_all( + fn iter_store( &self, column: Self::Column, prefix: Option<&[u8]>, @@ -62,3 +77,178 @@ pub trait IteratorableStore: KeyValueStore { direction: IterDirection, ) -> BoxedIter; } + +/// A trait for iterating over the `Mappable` table. +pub trait IterableTable +where + M: Mappable, +{ + /// Returns an iterator over the all entries in the table with a prefix after a specific start key. + fn iter_table

( + &self, + prefix: Option

, + start: Option<&M::Key>, + direction: Option, + ) -> BoxedIter> + where + P: AsRef<[u8]>; +} + +impl IterableTable for S +where + M: TableWithBlueprint, + M::Blueprint: BlueprintInspect, + S: IterableStore, +{ + fn iter_table

( + &self, + prefix: Option

, + start: Option<&M::Key>, + direction: Option, + ) -> BoxedIter> + where + P: AsRef<[u8]>, + { + let encoder = start.map(|start| { + >::KeyCodec::encode(start) + }); + + let start = encoder.as_ref().map(|encoder| encoder.as_bytes()); + + IterableStore::iter_store( + self, + M::column(), + prefix.as_ref().map(|p| p.as_ref()), + start.as_ref().map(|cow| cow.as_ref()), + direction.unwrap_or_default(), + ) + .map(|val| { + val.and_then(|(key, value)| { + let key = >::KeyCodec::decode( + key.as_slice(), + ) + .map_err(|e| crate::Error::Codec(anyhow::anyhow!(e)))?; + let value = + >::ValueCodec::decode( + value.as_slice(), + ) + .map_err(|e| crate::Error::Codec(anyhow::anyhow!(e)))?; + Ok((key, value)) + }) + }) + .into_boxed() + } +} + +/// A helper trait to provide a user-friendly API over table iteration. +pub trait IteratorOverTable { + /// Returns an iterator over the all entries in the table. + fn iter_all( + &self, + direction: Option, + ) -> BoxedIter> + where + M: Mappable, + Self: IterableTable, + { + self.iter_all_filtered::(None, None, direction) + } + + /// Returns an iterator over the all entries in the table with the specified prefix. + fn iter_all_by_prefix( + &self, + prefix: Option

, + ) -> BoxedIter> + where + M: Mappable, + P: AsRef<[u8]>, + Self: IterableTable, + { + self.iter_all_filtered::(prefix, None, None) + } + + /// Returns an iterator over the all entries in the table after a specific start key. + fn iter_all_by_start( + &self, + start: Option<&M::Key>, + direction: Option, + ) -> BoxedIter> + where + M: Mappable, + Self: IterableTable, + { + self.iter_all_filtered::(None, start, direction) + } + + /// Returns an iterator over the all entries in the table with a prefix after a specific start key. + fn iter_all_filtered( + &self, + prefix: Option

, + start: Option<&M::Key>, + direction: Option, + ) -> BoxedIter> + where + M: Mappable, + P: AsRef<[u8]>, + Self: IterableTable, + { + self.iter_table(prefix, start, direction) + } +} + +impl IteratorOverTable for S {} + +/// Returns an iterator over the values in the `BTreeMap`. +pub fn iterator<'a, V>( + tree: &'a BTreeMap, V>, + prefix: Option<&[u8]>, + start: Option<&[u8]>, + direction: IterDirection, +) -> impl Iterator, &'a V)> + 'a { + match (prefix, start) { + (None, None) => { + if direction == IterDirection::Forward { + tree.iter().into_boxed() + } else { + tree.iter().rev().into_boxed() + } + } + (Some(prefix), None) => { + let prefix = prefix.to_vec(); + if direction == IterDirection::Forward { + tree.range(prefix.clone()..) + .take_while(move |(key, _)| key.starts_with(prefix.as_slice())) + .into_boxed() + } else { + let mut vec: Vec<_> = tree + .range(prefix.clone()..) + .into_boxed() + .take_while(|(key, _)| key.starts_with(prefix.as_slice())) + .collect(); + + vec.reverse(); + vec.into_iter().into_boxed() + } + } + (None, Some(start)) => { + if direction == IterDirection::Forward { + tree.range(start.to_vec()..).into_boxed() + } else { + tree.range(..=start.to_vec()).rev().into_boxed() + } + } + (Some(prefix), Some(start)) => { + let prefix = prefix.to_vec(); + if direction == IterDirection::Forward { + tree.range(start.to_vec()..) + .take_while(move |(key, _)| key.starts_with(prefix.as_slice())) + .into_boxed() + } else { + tree.range(..=start.to_vec()) + .rev() + .take_while(move |(key, _)| key.starts_with(prefix.as_slice())) + .into_boxed() + } + } + } +} diff --git a/crates/storage/src/kv_store.rs b/crates/storage/src/kv_store.rs index cb51499d0fe..ae60b9a012b 100644 --- a/crates/storage/src/kv_store.rs +++ b/crates/storage/src/kv_store.rs @@ -25,47 +25,12 @@ pub trait StorageColumn: Copy + core::fmt::Debug { } } -// TODO: Use `&mut self` for all mutable methods. -// It requires refactoring of all services because right now, most of them work with `&self` storage. -/// The definition of the key-value store. +/// The definition of the key-value inspection store. #[impl_tools::autoimpl(for &T, &mut T, Box, Arc)] -pub trait KeyValueStore { +pub trait KeyValueInspect { /// The type of the column. type Column: StorageColumn; - /// Inserts the `Value` into the storage. - fn put(&self, key: &[u8], column: Self::Column, value: Value) -> StorageResult<()> { - self.write(key, column, value.as_ref()).map(|_| ()) - } - - /// Put the `Value` into the storage and return the old value. - fn replace( - &self, - key: &[u8], - column: Self::Column, - value: Value, - ) -> StorageResult> { - // FIXME: This is a race condition. We should use a transaction. - let old_value = self.get(key, column)?; - self.put(key, column, value)?; - Ok(old_value) - } - - /// Writes the `buf` into the storage and returns the number of written bytes. - fn write(&self, key: &[u8], column: Self::Column, buf: &[u8]) - -> StorageResult; - - /// Removes the value from the storage and returns it. - fn take(&self, key: &[u8], column: Self::Column) -> StorageResult> { - // FIXME: This is a race condition. We should use a transaction. - let old_value = self.get(key, column)?; - self.delete(key, column)?; - Ok(old_value) - } - - /// Removes the value from the storage. - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()>; - /// Checks if the value exists in the storage. fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { Ok(self.size_of_value(key, column)?.is_some()) @@ -105,8 +70,52 @@ pub trait KeyValueStore { } } +/// The definition of the key-value mutation store. +#[impl_tools::autoimpl(for &mut T, Box)] +pub trait KeyValueMutate: KeyValueInspect { + /// Inserts the `Value` into the storage. + fn put( + &mut self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult<()> { + self.write(key, column, value.as_ref()).map(|_| ()) + } + + /// Put the `Value` into the storage and return the old value. + fn replace( + &mut self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult> { + let old_value = self.get(key, column)?; + self.put(key, column, value)?; + Ok(old_value) + } + + /// Writes the `buf` into the storage and returns the number of written bytes. + fn write( + &mut self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult; + + /// Removes the value from the storage and returns it. + fn take(&mut self, key: &[u8], column: Self::Column) -> StorageResult> { + let old_value = self.get(key, column)?; + self.delete(key, column)?; + Ok(old_value) + } + + /// Removes the value from the storage. + fn delete(&mut self, key: &[u8], column: Self::Column) -> StorageResult<()>; +} + /// The operation to write into the storage. -#[derive(Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum WriteOperation { /// Insert the value into the storage. Insert(Value), @@ -115,28 +124,10 @@ pub enum WriteOperation { } /// The definition of the key-value store with batch operations. -#[impl_tools::autoimpl(for &T, &mut T, Box, Arc)] -pub trait BatchOperations: KeyValueStore { +#[impl_tools::autoimpl(for &mut T, Box)] +pub trait BatchOperations: KeyValueMutate { /// Writes the batch of the entries into the storage. - // TODO: Replace `dyn Iterator` with a generic iterator when `Database` will not use `dyn BatchOperations`. - fn batch_write( - &self, - entries: &mut dyn Iterator, Self::Column, WriteOperation)>, - ) -> StorageResult<()> { - // TODO: Optimize implementation for in-memory storages. - for (key, column, op) in entries { - match op { - WriteOperation::Insert(value) => { - self.put(&key, column, value)?; - } - WriteOperation::Remove => { - self.delete(&key, column)?; - } - } - } - Ok(()) - } - - /// Deletes all values from the storage. - fn delete_all(&self, column: Self::Column) -> StorageResult<()>; + fn batch_write(&mut self, column: Self::Column, entries: I) -> StorageResult<()> + where + I: Iterator, WriteOperation)>; } diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index ac1b437b668..4d25f4d4b97 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -119,6 +119,7 @@ impl IsNotFound for Result { /// The traits allow work with the storage in batches. /// Some implementations can perform batch operations faster than one by one. +#[impl_tools::autoimpl(for &mut T)] pub trait StorageBatchMutate: StorageMutate { /// Initialize the storage with batch insertion. This method is more performant than /// [`Self::insert_batch`] in some cases. diff --git a/crates/storage/src/structured_storage.rs b/crates/storage/src/structured_storage.rs index 5b0e186d666..727c43f8852 100644 --- a/crates/storage/src/structured_storage.rs +++ b/crates/storage/src/structured_storage.rs @@ -3,7 +3,8 @@ use crate::{ blueprint::{ - Blueprint, + BlueprintInspect, + BlueprintMutate, SupportsBatching, SupportsMerkle, }, @@ -12,15 +13,29 @@ use crate::{ Encode, Encoder, }, + iter::{ + BoxedIter, + IterDirection, + IterableStore, + }, kv_store::{ BatchOperations, - KeyValueStore, + KVItem, + KeyValueInspect, + KeyValueMutate, StorageColumn, + Value, + WriteOperation, + }, + transactional::{ + Changes, + Modifiable, }, Error as StorageError, Mappable, MerkleRoot, MerkleRootStorage, + Result as StorageResult, StorageBatchMutate, StorageInspect, StorageMutate, @@ -58,92 +73,191 @@ pub trait TableWithBlueprint: Mappable + Sized { /// The wrapper around the key-value storage that implements the storage traits for the tables /// with blueprint. -#[derive(Clone, Debug)] +#[derive(Default, Debug, Clone)] pub struct StructuredStorage { - pub(crate) storage: S, + pub(crate) inner: S, } impl StructuredStorage { /// Creates a new instance of the structured storage. pub fn new(storage: S) -> Self { - Self { storage } + Self { inner: storage } } } impl AsRef for StructuredStorage { fn as_ref(&self) -> &S { - &self.storage + &self.inner } } impl AsMut for StructuredStorage { fn as_mut(&mut self) -> &mut S { - &mut self.storage + &mut self.inner + } +} + +impl KeyValueInspect for StructuredStorage +where + S: KeyValueInspect, +{ + type Column = S::Column; + + fn exists(&self, key: &[u8], column: Self::Column) -> StorageResult { + self.inner.exists(key, column) + } + + fn size_of_value( + &self, + key: &[u8], + column: Self::Column, + ) -> StorageResult> { + self.inner.size_of_value(key, column) + } + + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + self.inner.get(key, column) + } + + fn read( + &self, + key: &[u8], + column: Self::Column, + buf: &mut [u8], + ) -> StorageResult> { + self.inner.read(key, column, buf) + } +} + +impl KeyValueMutate for StructuredStorage +where + S: KeyValueMutate, +{ + fn put( + &mut self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult<()> { + self.inner.put(key, column, value) + } + + fn replace( + &mut self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult> { + self.inner.replace(key, column, value) + } + + fn write( + &mut self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + self.inner.write(key, column, buf) + } + + fn take(&mut self, key: &[u8], column: Self::Column) -> StorageResult> { + self.inner.take(key, column) + } + + fn delete(&mut self, key: &[u8], column: Self::Column) -> StorageResult<()> { + self.inner.delete(key, column) + } +} + +impl BatchOperations for StructuredStorage +where + S: BatchOperations, +{ + fn batch_write(&mut self, column: Self::Column, entries: I) -> StorageResult<()> + where + I: Iterator, WriteOperation)>, + { + self.inner.batch_write(column, entries) + } +} + +impl IterableStore for StructuredStorage +where + S: IterableStore, +{ + fn iter_store( + &self, + column: Self::Column, + prefix: Option<&[u8]>, + start: Option<&[u8]>, + direction: IterDirection, + ) -> BoxedIter { + self.inner.iter_store(column, prefix, start, direction) + } +} + +impl Modifiable for StructuredStorage +where + S: Modifiable, +{ + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + self.inner.commit_changes(changes) } } impl StorageInspect for StructuredStorage where - S: KeyValueStore, - M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, + S: KeyValueInspect, + M: TableWithBlueprint, + M::Blueprint: BlueprintInspect>, { type Error = StorageError; fn get(&self, key: &M::Key) -> Result>, Self::Error> { - ::Blueprint::get(&self.storage, key, M::column()) + ::Blueprint::get(self, key, M::column()) .map(|value| value.map(Cow::Owned)) } fn contains_key(&self, key: &M::Key) -> Result { - ::Blueprint::exists(&self.storage, key, M::column()) + ::Blueprint::exists(self, key, M::column()) } } impl StorageMutate for StructuredStorage where - S: KeyValueStore, - M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, + S: KeyValueMutate, + M: TableWithBlueprint, + M::Blueprint: BlueprintMutate>, { fn insert( &mut self, key: &M::Key, value: &M::Value, ) -> Result, Self::Error> { - ::Blueprint::replace( - &mut self.storage, - key, - M::column(), - value, - ) + ::Blueprint::replace(self, key, M::column(), value) } fn remove(&mut self, key: &M::Key) -> Result, Self::Error> { - ::Blueprint::take(&mut self.storage, key, M::column()) + ::Blueprint::take(self, key, M::column()) } } impl StorageSize for StructuredStorage where - S: KeyValueStore, - M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, + S: KeyValueInspect, + M: TableWithBlueprint, + M::Blueprint: BlueprintInspect>, { fn size_of_value(&self, key: &M::Key) -> Result, Self::Error> { - ::Blueprint::size_of_value( - &self.storage, - key, - M::column(), - ) + ::Blueprint::size_of_value(self, key, M::column()) } } impl StorageBatchMutate for StructuredStorage where S: BatchOperations, - M: Mappable + TableWithBlueprint, - M::Blueprint: SupportsBatching, + M: TableWithBlueprint, + M::Blueprint: SupportsBatching>, { fn init_storage<'a, Iter>(&mut self, set: Iter) -> Result<(), Self::Error> where @@ -151,7 +265,7 @@ where M::Key: 'a, M::Value: 'a, { - ::Blueprint::init(&mut self.storage, M::column(), set) + ::Blueprint::init(self, M::column(), set) } fn insert_batch<'a, Iter>(&mut self, set: Iter) -> Result<(), Self::Error> @@ -160,7 +274,7 @@ where M::Key: 'a, M::Value: 'a, { - ::Blueprint::insert(&mut self.storage, M::column(), set) + ::Blueprint::insert(self, M::column(), set) } fn remove_batch<'a, Iter>(&mut self, set: Iter) -> Result<(), Self::Error> @@ -168,35 +282,38 @@ where Iter: 'a + Iterator, M::Key: 'a, { - ::Blueprint::remove(&mut self.storage, M::column(), set) + ::Blueprint::remove(self, M::column(), set) } } impl MerkleRootStorage for StructuredStorage where - S: KeyValueStore, - M: Mappable + TableWithBlueprint, - M::Blueprint: SupportsMerkle, + S: KeyValueInspect, + M: TableWithBlueprint, + M::Blueprint: SupportsMerkle>, { fn root(&self, key: &Key) -> Result { - ::Blueprint::root(&self.storage, key) + ::Blueprint::root(self, key) } } impl StorageRead for StructuredStorage where - S: KeyValueStore, + S: KeyValueInspect, M: Mappable + TableWithBlueprint, - M::Blueprint: Blueprint, + M::Blueprint: BlueprintInspect, ValueCodec = Raw>, { fn read( &self, key: &::Key, buf: &mut [u8], ) -> Result, Self::Error> { - let key_encoder = >::KeyCodec::encode(key); + let key_encoder = + >>::KeyCodec::encode( + key, + ); let key_bytes = key_encoder.as_bytes(); - self.storage + self.inner .read(key_bytes.as_ref(), ::column(), buf) } @@ -204,9 +321,12 @@ where &self, key: &::Key, ) -> Result>, Self::Error> { - let key_encoder = >::KeyCodec::encode(key); + let key_encoder = + >>::KeyCodec::encode( + key, + ); let key_bytes = key_encoder.as_bytes(); - self.storage + self.inner .get(key_bytes.as_ref(), ::column()) // TODO: Return `Value` instead of cloned `Vec`. .map(|value| value.map(|value| value.deref().clone())) @@ -215,21 +335,16 @@ where impl StorageWrite for StructuredStorage where - S: KeyValueStore, + S: KeyValueMutate, M: TableWithBlueprint, - M::Blueprint: Blueprint, + M::Blueprint: BlueprintMutate, ValueCodec = Raw>, // TODO: Add new methods to the `Blueprint` that allows work with bytes directly // without deserialization into `OwnedValue`. M::OwnedValue: Into>, { fn write(&mut self, key: &M::Key, buf: &[u8]) -> Result { - ::Blueprint::put( - &mut self.storage, - key, - M::column(), - buf, - ) - .map(|_| buf.len()) + ::Blueprint::put(self, key, M::column(), buf) + .map(|_| buf.len()) } fn replace( @@ -238,24 +353,16 @@ where buf: &[u8], ) -> Result<(usize, Option>), Self::Error> { let bytes_written = buf.len(); - let prev = ::Blueprint::replace( - &mut self.storage, - key, - M::column(), - buf, - )? - .map(|prev| prev.into()); + let prev = + ::Blueprint::replace(self, key, M::column(), buf)? + .map(|prev| prev.into()); let result = (bytes_written, prev); Ok(result) } fn take(&mut self, key: &M::Key) -> Result>, Self::Error> { - let take = ::Blueprint::take( - &mut self.storage, - key, - M::column(), - )? - .map(|value| value.into()); + let take = ::Blueprint::take(self, key, M::column())? + .map(|value| value.into()); Ok(take) } } @@ -264,80 +371,50 @@ where #[cfg(feature = "test-helpers")] pub mod test { use crate as fuel_core_storage; - use crate::kv_store::StorageColumn; + use crate::kv_store::{ + KeyValueInspect, + StorageColumn, + }; use fuel_core_storage::{ - kv_store::{ - BatchOperations, - KeyValueStore, - Value, - }, + kv_store::Value, Result as StorageResult, }; - use std::{ - cell::RefCell, - collections::HashMap, - }; + use std::collections::HashMap; - type Storage = RefCell), Vec>>; + type Storage = HashMap<(u32, Vec), Value>; /// The in-memory storage for testing purposes. #[derive(Debug, PartialEq, Eq)] pub struct InMemoryStorage { - storage: Storage, + pub(crate) storage: Storage, _marker: core::marker::PhantomData, } + impl InMemoryStorage { + /// Returns the inner storage. + pub fn storage(&self) -> &Storage { + &self.storage + } + } + impl Default for InMemoryStorage { fn default() -> Self { Self { - storage: Storage::default(), + storage: Default::default(), _marker: Default::default(), } } } - impl KeyValueStore for InMemoryStorage + impl KeyValueInspect for InMemoryStorage where Column: StorageColumn, { type Column = Column; - fn write( - &self, - key: &[u8], - column: Self::Column, - buf: &[u8], - ) -> StorageResult { - let write = buf.len(); - self.storage - .borrow_mut() - .insert((column.id(), key.to_vec()), buf.to_vec()); - Ok(write) - } - - fn delete(&self, key: &[u8], column: Self::Column) -> StorageResult<()> { - self.storage - .borrow_mut() - .remove(&(column.id(), key.to_vec())); - Ok(()) - } - fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { - Ok(self - .storage - .borrow_mut() - .get(&(column.id(), key.to_vec())) - .map(|v| v.clone().into())) - } - } - - impl BatchOperations for InMemoryStorage - where - Column: StorageColumn, - { - fn delete_all(&self, column: Self::Column) -> StorageResult<()> { - self.storage.borrow_mut().retain(|k, _| k.0 != column.id()); - Ok(()) + let value = self.storage.get(&(column.id(), key.to_vec())).cloned(); + Ok(value) } } } diff --git a/crates/storage/src/structured_storage/blocks.rs b/crates/storage/src/structured_storage/blocks.rs index a7edac6cf92..b1720c12715 100644 --- a/crates/storage/src/structured_storage/blocks.rs +++ b/crates/storage/src/structured_storage/blocks.rs @@ -52,10 +52,10 @@ mod tests { use crate::{ structured_storage::{ test::InMemoryStorage, - StructuredStorage, TableWithBlueprint, }, tables::FuelBlocks, + transactional::ReadTransaction, StorageAsMut, StorageMutate, }; @@ -85,9 +85,9 @@ mod tests { #[test_case::test_case(&[0, 2, 5, 7, 11]; "five non-sequential blocks starting from height 0")] #[test_case::test_case(&[100, 102, 105, 107, 111]; "five non-sequential blocks starting from height 100")] fn can_get_merkle_root_of_inserted_blocks(heights: &[u32]) { - let mut storage = + let storage = InMemoryStorage::<::Column>::default(); - let mut database = StructuredStorage::new(&mut storage); + let mut storage = storage.read_transaction(); let blocks = heights .iter() .copied() @@ -108,7 +108,7 @@ mod tests { // metadata, including a new root. for block in &blocks { StorageMutate::::insert( - &mut database, + &mut storage, block.header().height(), &block.compress(&ChainId::default()), ) @@ -124,7 +124,7 @@ mod tests { // Check that root for the version is present let last_block = blocks.last().unwrap(); - let actual_root = database + let actual_root = storage .storage::() .root(last_block.header().height()) .expect("root to exist") @@ -140,7 +140,7 @@ mod tests { let storage = InMemoryStorage::<::Column>::default(); - let database = StructuredStorage::new(&storage); + let database = storage.read_transaction(); // check that root is not present let err = database diff --git a/crates/storage/src/structured_storage/state.rs b/crates/storage/src/structured_storage/state.rs index 2c6a8a55682..68d5d79ef65 100644 --- a/crates/storage/src/structured_storage/state.rs +++ b/crates/storage/src/structured_storage/state.rs @@ -98,10 +98,8 @@ mod test { mod structured_storage_tests { use crate::{ column::Column, - structured_storage::{ - test::InMemoryStorage, - StructuredStorage, - }, + structured_storage::test::InMemoryStorage, + transactional::ReadTransaction, StorageAsMut, StorageMutate, StorageWrite, @@ -125,7 +123,6 @@ mod structured_storage_tests { #[test] fn storage_write__write__generates_the_same_merkle_root_as_storage_insert() { type Storage = InMemoryStorage; - type Structure = StructuredStorage; let mut rng = StdRng::seed_from_u64(1234); @@ -140,18 +137,14 @@ mod structured_storage_tests { // When let merkle_root_write = { let storage = Storage::default(); - let mut structure = StructuredStorage::new(storage); + let mut structure = storage.read_transaction(); let mut merkle_root = structure .storage::() .root(&contract_id) .expect("Unable to retrieve Merkle root"); for key in keys.iter() { - >::write( - &mut structure, - key, - &value, - ) - .expect("Unable to write storage"); + <_ as StorageWrite>::write(&mut structure, key, &value) + .expect("Unable to write storage"); let new_merkle_root = structure .storage::() .root(&contract_id) @@ -169,14 +162,10 @@ mod structured_storage_tests { // Then let merkle_root_insert = { let storage = Storage::default(); - let mut structure = StructuredStorage::new(storage); + let mut structure = storage.read_transaction(); for key in keys.iter() { - >::insert( - &mut structure, - key, - &value, - ) - .expect("Unable to write storage"); + <_ as StorageMutate>::insert(&mut structure, key, &value) + .expect("Unable to write storage"); } structure @@ -191,7 +180,6 @@ mod structured_storage_tests { #[test] fn storage_write__replace__generates_the_same_merkle_root_as_storage_insert() { type Storage = InMemoryStorage; - type Structure = StructuredStorage; let mut rng = StdRng::seed_from_u64(1234); @@ -206,18 +194,14 @@ mod structured_storage_tests { // When let merkle_root_replace = { let storage = Storage::default(); - let mut structure = StructuredStorage::new(storage); + let mut structure = storage.read_transaction(); let mut merkle_root = structure .storage::() .root(&contract_id) .expect("Unable to retrieve Merkle root"); for key in keys.iter() { - >::replace( - &mut structure, - key, - &value, - ) - .expect("Unable to write storage"); + <_ as StorageWrite>::replace(&mut structure, key, &value) + .expect("Unable to write storage"); let new_merkle_root = structure .storage::() .root(&contract_id) @@ -235,14 +219,10 @@ mod structured_storage_tests { // Then let merkle_root_insert = { let storage = Storage::default(); - let mut structure = StructuredStorage::new(storage); + let mut structure = storage.read_transaction(); for key in keys.iter() { - >::insert( - &mut structure, - key, - &value, - ) - .expect("Unable to write storage"); + <_ as StorageMutate>::insert(&mut structure, key, &value) + .expect("Unable to write storage"); } structure @@ -257,7 +237,6 @@ mod structured_storage_tests { #[test] fn storage_write__take__generates_the_same_merkle_root_as_storage_remove() { type Storage = InMemoryStorage; - type Structure = StructuredStorage; let mut rng = StdRng::seed_from_u64(1234); @@ -270,18 +249,14 @@ mod structured_storage_tests { let value = vec![0u8; 32]; let storage = Storage::default(); - let mut structure = StructuredStorage::new(storage); + let mut structure = storage.read_transaction(); let mut merkle_root = structure .storage::() .root(&contract_id) .expect("Unable to retrieve Merkle root"); for key in keys.iter() { - >::replace( - &mut structure, - key, - &value, - ) - .expect("Unable to write storage"); + <_ as StorageWrite>::replace(&mut structure, key, &value) + .expect("Unable to write storage"); let new_merkle_root = structure .storage::() @@ -296,14 +271,10 @@ mod structured_storage_tests { let key = ContractsStateKey::from((&contract_id, &state_key)); let merkle_root_replace = { - >::write( - &mut structure, - &key, - &value, - ) - .expect("Unable to write storage"); - - >::take(&mut structure, &key) + <_ as StorageWrite>::write(&mut structure, &key, &value) + .expect("Unable to write storage"); + + <_ as StorageWrite>::take(&mut structure, &key) .expect("Unable to take value from storage"); structure @@ -314,12 +285,8 @@ mod structured_storage_tests { // Then let merkle_root_remove = { - >::write( - &mut structure, - &key, - &value, - ) - .expect("Unable to write storage"); + <_ as StorageWrite>::write(&mut structure, &key, &value) + .expect("Unable to write storage"); structure .storage::() diff --git a/crates/storage/src/test_helpers.rs b/crates/storage/src/test_helpers.rs index 40b82ad0054..9e11db66f08 100644 --- a/crates/storage/src/test_helpers.rs +++ b/crates/storage/src/test_helpers.rs @@ -2,9 +2,8 @@ use crate::{ transactional::{ - StorageTransaction, - Transaction, - Transactional, + Changes, + Modifiable, }, Error as StorageError, Mappable, @@ -15,36 +14,6 @@ use crate::{ StorageMutate, }; -/// The empty transactional storage. -#[derive(Default, Clone, Copy, Debug)] -pub struct EmptyStorage; - -impl AsRef for EmptyStorage { - fn as_ref(&self) -> &EmptyStorage { - self - } -} - -impl AsMut for EmptyStorage { - fn as_mut(&mut self) -> &mut EmptyStorage { - self - } -} - -impl Transactional for EmptyStorage { - type Storage = EmptyStorage; - - fn transaction(&self) -> StorageTransaction { - StorageTransaction::new(EmptyStorage) - } -} - -impl Transaction for EmptyStorage { - fn commit(&mut self) -> StorageResult<()> { - Ok(()) - } -} - /// The trait is used to provide a generic mocked implementation for all possible `StorageInspect`, /// `StorageMutate`, and `MerkleRootStorage` traits. pub trait MockStorageMethods { @@ -77,12 +46,21 @@ pub trait MockStorageMethods { ) -> StorageResult; } +/// The mocked storage is useful to test functionality build on top of the `StorageInspect`, +/// `StorageMutate`, and `MerkleRootStorage` traits. +#[derive(Default, Debug, Clone)] +pub struct MockStorage { + /// The mocked storage. + pub storage: Storage, + /// Additional data to be used in the tests. + pub data: Data, +} + mockall::mock! { - /// The mocked storage is useful to test functionality build on top of the `StorageInspect`, - /// `StorageMutate`, and `MerkleRootStorage` traits. - pub Storage {} + /// The basic mocked storage + pub Basic {} - impl MockStorageMethods for Storage { + impl MockStorageMethods for Basic { fn get( &self, key: &M::Key, @@ -104,42 +82,15 @@ mockall::mock! { fn root(&self, key: &Key) -> StorageResult; } - impl Transactional for Storage { - type Storage = Self; - - fn transaction(&self) -> StorageTransaction; - } - - impl Transaction for Storage { - fn commit(&mut self) -> StorageResult<()>; - } -} - -impl MockStorage { - /// Packs `self` into one more `MockStorage` and implements `Transactional` trait by this move. - pub fn into_transactional(self) -> MockStorage { - let mut db = MockStorage::default(); - db.expect_transaction() - .return_once(move || StorageTransaction::new(self)); - db - } -} - -impl AsRef for MockStorage { - fn as_ref(&self) -> &MockStorage { - self - } -} - -impl AsMut for MockStorage { - fn as_mut(&mut self) -> &mut MockStorage { - self + impl Modifiable for Basic { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()>; } } -impl StorageInspect for MockStorage +impl StorageInspect for MockStorage where M: Mappable + 'static, + Storage: MockStorageMethods, { type Error = StorageError; @@ -147,37 +98,39 @@ where &self, key: &M::Key, ) -> StorageResult>> { - MockStorageMethods::get::(self, key) + MockStorageMethods::get::(&self.storage, key) } fn contains_key(&self, key: &M::Key) -> StorageResult { - MockStorageMethods::contains_key::(self, key) + MockStorageMethods::contains_key::(&self.storage, key) } } -impl StorageMutate for MockStorage +impl StorageMutate for MockStorage where M: Mappable + 'static, + Storage: MockStorageMethods, { fn insert( &mut self, key: &M::Key, value: &M::Value, ) -> StorageResult> { - MockStorageMethods::insert::(self, key, value) + MockStorageMethods::insert::(&mut self.storage, key, value) } fn remove(&mut self, key: &M::Key) -> StorageResult> { - MockStorageMethods::remove::(self, key) + MockStorageMethods::remove::(&mut self.storage, key) } } -impl MerkleRootStorage for MockStorage +impl MerkleRootStorage for MockStorage where Key: 'static, M: Mappable + 'static, + Storage: MockStorageMethods, { fn root(&self, key: &Key) -> StorageResult { - MockStorageMethods::root::(self, key) + MockStorageMethods::root::(&self.storage, key) } } diff --git a/crates/storage/src/transactional.rs b/crates/storage/src/transactional.rs index f533c9a48f3..4351fde6d3b 100644 --- a/crates/storage/src/transactional.rs +++ b/crates/storage/src/transactional.rs @@ -1,96 +1,810 @@ //! The primitives to work with storage in transactional mode. -use crate::Result as StorageResult; +use crate::{ + kv_store::{ + BatchOperations, + KeyValueInspect, + KeyValueMutate, + StorageColumn, + Value, + WriteOperation, + }, + structured_storage::StructuredStorage, + Result as StorageResult, +}; +use std::{ + collections::{ + btree_map::Entry, + BTreeMap, + HashMap, + }, + sync::Arc, +}; -#[cfg_attr(feature = "test-helpers", mockall::automock(type Storage = crate::test_helpers::EmptyStorage;))] -/// The types is transactional and may create `StorageTransaction`. -pub trait Transactional { - /// The storage used when creating the transaction. - type Storage: ?Sized; - /// Creates and returns the storage transaction. - fn transaction(&self) -> StorageTransaction; +#[cfg(feature = "test-helpers")] +use crate::{ + iter::{ + BoxedIter, + IterDirection, + IterableStore, + }, + kv_store::KVItem, +}; + +/// Provides a view of the storage at the given height. +/// It guarantees to be atomic, meaning the view is immutable to outside modifications. +pub trait AtomicView: Send + Sync { + /// The type of the storage view. + type View; + + /// The type used by the storage to track the commitments at a specific height. + type Height; + + /// Returns the latest block height. + fn latest_height(&self) -> Option; + + /// Returns the view of the storage at the given `height`. + fn view_at(&self, height: &Self::Height) -> StorageResult; + + /// Returns the view of the storage for the latest block height. + fn latest_view(&self) -> Self::View; +} + +/// Storage transaction on top of the storage. +pub type StorageTransaction = StructuredStorage>; + +/// In memory transaction accumulates `Changes` over the storage. +#[derive(Default, Debug, Clone)] +pub struct InMemoryTransaction { + pub(crate) changes: Changes, + pub(crate) policy: ConflictPolicy, + pub(crate) storage: S, +} + +impl StorageTransaction { + /// Creates a new instance of the storage transaction. + pub fn transaction(storage: S, policy: ConflictPolicy, changes: Changes) -> Self { + StructuredStorage::new(InMemoryTransaction { + changes, + policy, + storage, + }) + } + + /// Creates a new instance of the structured storage with a `ConflictPolicy`. + pub fn with_policy(self, policy: ConflictPolicy) -> Self { + StructuredStorage::new(InMemoryTransaction { + changes: self.inner.changes, + policy, + storage: self.inner.storage, + }) + } + + /// Creates a new instance of the structured storage with a `Changes`. + pub fn with_changes(self, changes: Changes) -> Self { + StructuredStorage::new(InMemoryTransaction { + changes, + policy: self.inner.policy, + storage: self.inner.storage, + }) + } + + /// Returns the changes to the storage. + pub fn into_changes(self) -> Changes { + self.inner.changes + } + + /// Resets the changes to the storage. + pub fn reset_changes(&mut self) { + self.inner.changes = Default::default(); + } +} + +/// The policy to resolve the conflict during committing of the changes. +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)] +pub enum ConflictPolicy { + /// The transaction will fail if there is a conflict. + Fail, + /// The transaction will overwrite the conflicting data. + #[default] + Overwrite, +} + +/// The type is modifiable and may commit the changes into the storage. +#[impl_tools::autoimpl(for &mut T, Box)] +pub trait Modifiable { + /// Commits the changes into the storage. + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()>; +} + +/// The type describing the list of changes to the storage. +pub type Changes = HashMap, WriteOperation>>; + +impl From> for Changes { + fn from(transaction: StorageTransaction) -> Self { + transaction.into_changes() + } +} + +/// The trait to convert the type into the storage transaction. +pub trait IntoTransaction: Sized { + /// Converts the type into the storage transaction consuming it. + fn into_transaction(self) -> StorageTransaction; } -/// The type is storage transaction and holds uncommitted state. -pub trait Transaction: - AsRef + AsMut + Send + Sync +impl IntoTransaction for S +where + S: KeyValueInspect, { - /// Commits the pending state changes into the storage. - fn commit(&mut self) -> StorageResult<()>; + fn into_transaction(self) -> StorageTransaction { + StorageTransaction::transaction( + self, + ConflictPolicy::Overwrite, + Default::default(), + ) + } } -/// The storage transaction for the `Storage` type. -pub struct StorageTransaction { - transaction: Box>, +/// Creates a new instance of the storage read transaction. +pub trait ReadTransaction { + /// Returns the read transaction without ability to commit the changes. + fn read_transaction(&self) -> StorageTransaction<&Self>; } -impl StorageTransaction { - /// Create a new storage transaction. - pub fn new + 'static>(t: T) -> Self { - Self { - transaction: Box::new(t), - } +impl ReadTransaction for S +where + S: KeyValueInspect, +{ + fn read_transaction(&self) -> StorageTransaction<&Self> { + StorageTransaction::transaction( + self, + ConflictPolicy::Overwrite, + Default::default(), + ) } } -impl Transactional for StorageTransaction { - type Storage = Storage::Storage; +/// Creates a new instance of the storage write transaction. +pub trait WriteTransaction { + /// Returns the write transaction that can commit the changes. + fn write_transaction(&mut self) -> StorageTransaction<&mut Self>; +} - fn transaction(&self) -> StorageTransaction { - self.as_ref().transaction() +impl WriteTransaction for S +where + S: KeyValueInspect + Modifiable, +{ + fn write_transaction(&mut self) -> StorageTransaction<&mut Self> { + StorageTransaction::transaction( + self, + ConflictPolicy::Overwrite, + Default::default(), + ) } } -impl Transaction for StorageTransaction { - fn commit(&mut self) -> StorageResult<()> { - self.transaction.commit() +impl StorageTransaction +where + Storage: Modifiable, +{ + /// Commits the changes into the storage. + pub fn commit(mut self) -> StorageResult { + let changes = core::mem::take(&mut self.inner.changes); + self.inner.storage.commit_changes(changes)?; + Ok(self.inner.storage) + } +} + +impl Modifiable for InMemoryTransaction { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + for (column, value) in changes.into_iter() { + let btree = self.changes.entry(column).or_default(); + for (k, v) in value { + match &self.policy { + ConflictPolicy::Fail => { + let entry = btree.entry(k); + + match entry { + Entry::Occupied(occupied) => { + return Err(anyhow::anyhow!( + "Conflicting operation {v:?} for the {:?}", + occupied.key() + ) + .into()); + } + Entry::Vacant(vacant) => { + vacant.insert(v); + } + } + } + ConflictPolicy::Overwrite => { + btree.insert(k, v); + } + } + } + } + Ok(()) } } -impl core::fmt::Debug - for StorageTransaction +impl KeyValueInspect for InMemoryTransaction +where + Column: StorageColumn, + S: KeyValueInspect, { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("StorageTransaction") - .field("database", &self.transaction.as_ref().as_ref()) - .finish() + type Column = Column; + + fn get(&self, key: &[u8], column: Self::Column) -> StorageResult> { + let k = key.to_vec(); + if let Some(operation) = self + .changes + .get(&column.id()) + .and_then(|btree| btree.get(&k)) + { + match operation { + WriteOperation::Insert(value) => Ok(Some(value.clone())), + WriteOperation::Remove => Ok(None), + } + } else { + self.storage.get(key, column) + } } } -impl AsRef for StorageTransaction { - fn as_ref(&self) -> &Storage { - (*self.transaction).as_ref() +impl KeyValueMutate for InMemoryTransaction +where + Column: StorageColumn, + S: KeyValueInspect, +{ + fn put( + &mut self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult<()> { + let k = key.to_vec(); + self.changes + .entry(column.id()) + .or_default() + .insert(k, WriteOperation::Insert(value)); + Ok(()) + } + + fn replace( + &mut self, + key: &[u8], + column: Self::Column, + value: Value, + ) -> StorageResult> { + let k = key.to_vec(); + let entry = self.changes.entry(column.id()).or_default().entry(k); + + match entry { + Entry::Occupied(mut occupied) => { + let old = occupied.insert(WriteOperation::Insert(value)); + + match old { + WriteOperation::Insert(value) => Ok(Some(value)), + WriteOperation::Remove => Ok(None), + } + } + Entry::Vacant(vacant) => { + vacant.insert(WriteOperation::Insert(value)); + self.storage.get(key, column) + } + } + } + + fn write( + &mut self, + key: &[u8], + column: Self::Column, + buf: &[u8], + ) -> StorageResult { + let k = key.to_vec(); + self.changes + .entry(column.id()) + .or_default() + .insert(k, WriteOperation::Insert(Arc::new(buf.to_vec()))); + Ok(buf.len()) + } + + fn take(&mut self, key: &[u8], column: Self::Column) -> StorageResult> { + let k = key.to_vec(); + let entry = self.changes.entry(column.id()).or_default().entry(k); + + match entry { + Entry::Occupied(mut occupied) => { + let old = occupied.insert(WriteOperation::Remove); + + match old { + WriteOperation::Insert(value) => Ok(Some(value)), + WriteOperation::Remove => Ok(None), + } + } + Entry::Vacant(vacant) => { + vacant.insert(WriteOperation::Remove); + self.storage.get(key, column) + } + } + } + + fn delete(&mut self, key: &[u8], column: Self::Column) -> StorageResult<()> { + let k = key.to_vec(); + self.changes + .entry(column.id()) + .or_default() + .insert(k, WriteOperation::Remove); + Ok(()) } } -impl AsMut for StorageTransaction { - fn as_mut(&mut self) -> &mut Storage { - (*self.transaction).as_mut() +impl BatchOperations for InMemoryTransaction +where + Column: StorageColumn, + S: KeyValueInspect, +{ + fn batch_write(&mut self, column: Column, entries: I) -> StorageResult<()> + where + I: Iterator, WriteOperation)>, + { + let btree = self.changes.entry(column.id()).or_default(); + entries.for_each(|(key, operation)| { + btree.insert(key, operation); + }); + Ok(()) } } -impl StorageTransaction { - /// Committing of the state consumes `Self`. - pub fn commit(mut self) -> StorageResult<()> { - self.transaction.commit() +// The `IterableStore` should only be implemented for the actual storage, +// not the storage transaction, to maximize the performance. +// +// Another reason is that only actual storage with finalized updates should be +// used to iterate over its entries to avoid inconsistent results. +// +// We implement `IterableStore` for `StorageTransactionInner` only to allow +// using it in the tests and benchmarks as a type(not even implementation of it). +#[cfg(feature = "test-helpers")] +impl IterableStore for InMemoryTransaction +where + S: IterableStore, +{ + fn iter_store( + &self, + _: Self::Column, + _: Option<&[u8]>, + _: Option<&[u8]>, + _: IterDirection, + ) -> BoxedIter { + unimplemented!() } } -/// Provides a view of the storage at the given height. -/// It guarantees to be atomic, meaning the view is immutable to outside modifications. -pub trait AtomicView: Send + Sync { - /// The type of the storage view. - type View; +#[cfg(feature = "test-helpers")] +mod test { + use super::*; + use crate::structured_storage::test::InMemoryStorage; + #[cfg(test)] + use crate::{ + tables::Messages, + StorageAsMut, + }; - /// The type used by the storage to track the commitments at a specific height. - type Height; + impl Modifiable for InMemoryStorage { + fn commit_changes(&mut self, changes: Changes) -> StorageResult<()> { + for (column, value) in changes.into_iter() { + for (key, value) in value { + match value { + WriteOperation::Insert(value) => { + self.storage.insert((column, key), value); + } + WriteOperation::Remove => { + self.storage.remove(&(column, key)); + } + } + } + } + Ok(()) + } + } - /// Returns the latest block height. - fn latest_height(&self) -> Self::Height; + #[test] + fn committing_changes_modifies_underlying_storage() { + let mut storage = InMemoryStorage::default(); + let mut transaction = storage.write_transaction(); - /// Returns the view of the storage at the given `height`. - fn view_at(&self, height: &Self::Height) -> StorageResult; + let mut sub_transaction = transaction.write_transaction(); - /// Returns the view of the storage for the latest block height. - fn latest_view(&self) -> Self::View; + sub_transaction + .storage_as_mut::() + .insert(&Default::default(), &Default::default()) + .expect("Should work"); + + sub_transaction.commit().unwrap(); + transaction.commit().unwrap(); + assert_eq!(storage.storage().len(), 1); + } + + #[test] + fn committing_changes_from_concurrent_independent_transactions_works() { + let storage = InMemoryStorage::default(); + let mut transactions = + storage.into_transaction().with_policy(ConflictPolicy::Fail); + + let mut sub_transaction1 = transactions.read_transaction(); + let mut sub_transaction2 = transactions.read_transaction(); + + sub_transaction1 + .storage_as_mut::() + .insert(&[0u8; 32].into(), &Default::default()) + .expect("Should work"); + sub_transaction2 + .storage_as_mut::() + .insert(&[1u8; 32].into(), &Default::default()) + .expect("Should work"); + + let changes1 = sub_transaction1.into(); + let changes2 = sub_transaction2.into(); + transactions.commit_changes(changes1).unwrap(); + transactions.commit_changes(changes2).unwrap(); + } + + #[test] + fn committing_changes_from_concurrent_overlapping_transactions_fails() { + let storage = InMemoryStorage::default(); + let mut transactions = + storage.into_transaction().with_policy(ConflictPolicy::Fail); + + let mut sub_transaction1 = transactions.read_transaction(); + let mut sub_transaction2 = transactions.read_transaction(); + + sub_transaction1 + .storage_as_mut::() + .insert(&[0u8; 32].into(), &Default::default()) + .expect("Should work"); + sub_transaction2 + .storage_as_mut::() + .insert(&[0u8; 32].into(), &Default::default()) + .expect("Should work"); + + let changes1 = sub_transaction1.into(); + let changes2 = sub_transaction2.into(); + transactions.commit_changes(changes1).unwrap(); + transactions + .commit_changes(changes2) + .expect_err("Should fails because of the modification for the same key"); + } + + #[cfg(test)] + mod key_value_functionality { + use super::*; + use crate::column::Column; + + #[test] + fn get_returns_from_view() { + // setup + let storage = InMemoryStorage::::default(); + let mut view = storage.read_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + view.put(&key, Column::Metadata, expected.clone()).unwrap(); + // test + let ret = view.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret, Some(expected)) + } + + #[test] + fn get_returns_from_data_store_when_key_not_in_view() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + write.commit().unwrap(); + + let view = storage.read_transaction(); + // test + let ret = view.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret, Some(expected)) + } + + #[test] + fn get_does_not_fetch_from_datastore_if_intentionally_deleted_from_view() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + write.commit().unwrap(); + + let mut view = storage.read_transaction(); + view.delete(&key, Column::Metadata).unwrap(); + // test + let ret = view.get(&key, Column::Metadata).unwrap(); + let original = storage.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret, None); + // also ensure the original value is still intact and we aren't just passing + // through None from the data store + assert_eq!(original, Some(expected)) + } + + #[test] + fn can_insert_value_into_view() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let expected = Arc::new(vec![1, 2, 3]); + write + .put(&[0xA, 0xB, 0xC], Column::Metadata, expected.clone()) + .unwrap(); + // test + let ret = write + .replace(&[0xA, 0xB, 0xC], Column::Metadata, Arc::new(vec![2, 4, 6])) + .unwrap(); + // verify + assert_eq!(ret, Some(expected)) + } + + #[test] + fn delete_value_from_view_returns_value() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + // test + let ret = write.take(&key, Column::Metadata).unwrap(); + let get = write.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret, Some(expected)); + assert_eq!(get, None) + } + + #[test] + fn delete_returns_datastore_value_when_not_in_view() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + write.commit().unwrap(); + + let mut view = storage.read_transaction(); + // test + let ret = view.take(&key, Column::Metadata).unwrap(); + let get = view.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret, Some(expected)); + assert_eq!(get, None) + } + + #[test] + fn delete_does_not_return_datastore_value_when_deleted_twice() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + write.commit().unwrap(); + + let mut view = storage.read_transaction(); + // test + let ret1 = view.take(&key, Column::Metadata).unwrap(); + let ret2 = view.take(&key, Column::Metadata).unwrap(); + let get = view.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret1, Some(expected)); + assert_eq!(ret2, None); + assert_eq!(get, None) + } + + #[test] + fn exists_checks_view_values() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected).unwrap(); + // test + let ret = write.exists(&key, Column::Metadata).unwrap(); + // verify + assert!(ret) + } + + #[test] + fn exists_checks_data_store_when_not_in_view() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected).unwrap(); + write.commit().unwrap(); + + let view = storage.read_transaction(); + // test + let ret = view.exists(&key, Column::Metadata).unwrap(); + // verify + assert!(ret) + } + + #[test] + fn exists_does_not_check_data_store_after_intentional_removal_from_view() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected).unwrap(); + write.commit().unwrap(); + + let mut view = storage.read_transaction(); + view.delete(&key, Column::Metadata).unwrap(); + // test + let ret = view.exists(&key, Column::Metadata).unwrap(); + let original = storage.exists(&key, Column::Metadata).unwrap(); + // verify + assert!(!ret); + // also ensure the original value is still intact and we aren't just passing + // through None from the data store + assert!(original) + } + + #[test] + fn commit_applies_puts() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + // test + write.commit().unwrap(); + let ret = storage.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret, Some(expected)) + } + + #[test] + fn commit_applies_deletes() { + // setup + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let key = vec![0xA, 0xB, 0xC]; + let expected = Arc::new(vec![1, 2, 3]); + write.put(&key, Column::Metadata, expected).unwrap(); + write.commit().unwrap(); + + let mut view = storage.write_transaction(); + // test + view.delete(&key, Column::Metadata).unwrap(); + view.commit().unwrap(); + let ret = storage.get(&key, Column::Metadata).unwrap(); + // verify + assert_eq!(ret, None) + } + + #[test] + fn can_use_unit_value() { + let key = vec![0x00]; + + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let expected = Arc::new(vec![]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + + assert_eq!( + write.get(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + + assert!(write.exists(&key, Column::Metadata).unwrap()); + + assert_eq!( + write.take(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + + assert!(!write.exists(&key, Column::Metadata).unwrap()); + + write.commit().unwrap(); + + assert!(!storage.exists(&key, Column::Metadata).unwrap()); + + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + write.commit().unwrap(); + + assert_eq!( + storage.get(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + } + + #[test] + fn can_use_unit_key() { + let key: Vec = Vec::with_capacity(0); + + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let expected = Arc::new(vec![]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + + assert_eq!( + write.get(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + + assert!(write.exists(&key, Column::Metadata).unwrap()); + + assert_eq!( + write.take(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + + assert!(!write.exists(&key, Column::Metadata).unwrap()); + + write.commit().unwrap(); + + assert!(!storage.exists(&key, Column::Metadata).unwrap()); + + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + write.commit().unwrap(); + + assert_eq!( + storage.get(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + } + + #[test] + fn can_use_unit_key_and_value() { + let key: Vec = Vec::with_capacity(0); + + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + let expected = Arc::new(vec![]); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + + assert_eq!( + write.get(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + + assert!(write.exists(&key, Column::Metadata).unwrap()); + + assert_eq!( + write.take(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + + assert!(!write.exists(&key, Column::Metadata).unwrap()); + + write.commit().unwrap(); + + assert!(!storage.exists(&key, Column::Metadata).unwrap()); + + let mut storage = InMemoryStorage::::default(); + let mut write = storage.write_transaction(); + write.put(&key, Column::Metadata, expected.clone()).unwrap(); + write.commit().unwrap(); + + assert_eq!( + storage.get(&key, Column::Metadata).unwrap().unwrap(), + expected + ); + } + } } diff --git a/crates/types/src/services.rs b/crates/types/src/services.rs index 0cbaf04827c..aa5bc8ebd35 100644 --- a/crates/types/src/services.rs +++ b/crates/types/src/services.rs @@ -10,23 +10,21 @@ pub mod txpool; // TODO: Define a one common error for all services like -/// The uncommitted `Result` of some action with database transaction. +/// The uncommitted `Result` of some action with storage changes. /// The user should commit the result by itself. #[derive(Debug)] -pub struct Uncommitted { +#[must_use] +pub struct Uncommitted { /// The result of the action. result: Result, - /// The database transaction with not committed state. - database_transaction: DatabaseTransaction, + /// The storage changes. + changes: Changes, } -impl Uncommitted { +impl Uncommitted { /// Create a new instance of `Uncommitted`. - pub fn new(result: Result, database_transaction: DatabaseTransaction) -> Self { - Self { - result, - database_transaction, - } + pub fn new(result: Result, changes: Changes) -> Self { + Self { result, changes } } /// Returns a reference to the `Result`. @@ -34,13 +32,9 @@ impl Uncommitted { &self.result } - /// Return the result and database transaction. - /// - /// The caller can unpack the `Uncommitted`, apply some changes and pack it again into - /// `UncommittedResult`. Because `commit` of the database transaction consumes `self`, - /// after committing it is not possible create `Uncommitted`. - pub fn into(self) -> (Result, DatabaseTransaction) { - (self.result, self.database_transaction) + /// Return the result and storage changes. + pub fn into(self) -> (Result, Changes) { + (self.result, self.changes) } /// Discards the database transaction and returns only the result of the action. @@ -48,8 +42,8 @@ impl Uncommitted { self.result } - /// Discards the result and return database transaction. - pub fn into_transaction(self) -> DatabaseTransaction { - self.database_transaction + /// Discards the result and return storage changes. + pub fn into_changes(self) -> Changes { + self.changes } } diff --git a/tests/tests/blocks.rs b/tests/tests/blocks.rs index 1a6f718a1a3..b9d5867f925 100644 --- a/tests/tests/blocks.rs +++ b/tests/tests/blocks.rs @@ -23,6 +23,7 @@ use fuel_core_storage::{ FuelBlocks, SealedBlockConsensus, }, + transactional::WriteTransaction, vm_storage::VmStorageRequirements, StorageAsMut, }; @@ -58,10 +59,16 @@ async fn block() { .unwrap(); let client = FuelClient::from(srv.bound_address); - db.storage::().insert(&height, &block).unwrap(); - db.storage::() + let mut transaction = db.write_transaction(); + transaction + .storage::() + .insert(&height, &block) + .unwrap(); + transaction + .storage::() .insert(&height, &Consensus::PoA(Default::default())) .unwrap(); + transaction.commit().unwrap(); // run test let block = client.block_by_height(height).await.unwrap(); diff --git a/tests/tests/health.rs b/tests/tests/health.rs index 65115b93151..ce795f9680d 100644 --- a/tests/tests/health.rs +++ b/tests/tests/health.rs @@ -27,7 +27,7 @@ async fn can_restart_node() { // start node once { use fuel_core::service::ServiceTrait; - let database = Database::open(tmp_dir.path(), None).unwrap(); + let database = Database::open_rocksdb(tmp_dir.path(), None).unwrap(); let first_startup = FuelService::from_database(database, Config::local_node()) .await .unwrap(); @@ -35,7 +35,7 @@ async fn can_restart_node() { } { - let database = Database::open(tmp_dir.path(), None).unwrap(); + let database = Database::open_rocksdb(tmp_dir.path(), None).unwrap(); let _second_startup = FuelService::from_database(database, Config::local_node()) .await .unwrap();