diff --git a/Cargo.lock b/Cargo.lock
index e9007b857..6341e67da 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1227,6 +1227,7 @@ name = "fc-consensus"
version = "0.1.0"
dependencies = [
"derive_more",
+ "fc-db",
"fp-consensus",
"fp-rpc",
"futures 0.3.12",
@@ -1244,6 +1245,19 @@ dependencies = [
"substrate-prometheus-endpoint",
]
+[[package]]
+name = "fc-db"
+version = "0.1.0"
+dependencies = [
+ "kvdb",
+ "kvdb-rocksdb",
+ "parity-scale-codec",
+ "parking_lot 0.11.1",
+ "sp-core",
+ "sp-database",
+ "sp-runtime",
+]
+
[[package]]
name = "fc-rpc"
version = "0.1.0"
@@ -1251,6 +1265,7 @@ dependencies = [
"ethereum",
"ethereum-types",
"fc-consensus",
+ "fc-db",
"fc-rpc-core",
"fp-rpc",
"futures 0.3.12",
@@ -1546,6 +1561,7 @@ name = "frontier-template-node"
version = "2.0.0-dev"
dependencies = [
"fc-consensus",
+ "fc-db",
"fc-rpc",
"fc-rpc-core",
"fp-consensus",
diff --git a/Cargo.toml b/Cargo.toml
index 48ad5dc9d..31a0566e8 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,6 +12,7 @@ members = [
"client/consensus",
"client/rpc-core",
"client/rpc",
+ "client/db",
"primitives/consensus",
"primitives/evm",
"primitives/rpc",
diff --git a/client/consensus/Cargo.toml b/client/consensus/Cargo.toml
index 800961038..3f47f83d2 100644
--- a/client/consensus/Cargo.toml
+++ b/client/consensus/Cargo.toml
@@ -18,6 +18,7 @@ sp-block-builder = { version = "2.0.0", git = "https://github.com/paritytech/sub
sp-inherents = { version = "2.0.0", git = "https://github.com/paritytech/substrate.git", branch = "frontier" }
fp-consensus = { version = "0.1.0", path = "../../primitives/consensus" }
fp-rpc = { path = "../../primitives/rpc" }
+fc-db = { path = "../db" }
sp-consensus = { version = "0.8.0", git = "https://github.com/paritytech/substrate.git", branch = "frontier" }
log = "0.4.8"
futures = { version = "0.3.1", features = ["compat"] }
diff --git a/client/consensus/src/aux_schema.rs b/client/consensus/src/aux_schema.rs
deleted file mode 100644
index 5781a41f9..000000000
--- a/client/consensus/src/aux_schema.rs
+++ /dev/null
@@ -1,108 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
-// This file is part of Frontier.
-//
-// Copyright (c) 2020 Parity Technologies (UK) Ltd.
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see .
-
-use codec::{Encode, Decode};
-use sp_core::H256;
-use sp_runtime::traits::Block as BlockT;
-use sc_client_api::backend::AuxStore;
-use sp_blockchain::{Result as ClientResult, Error as ClientError};
-
-fn load_decode(backend: &B, key: &[u8]) -> ClientResult> {
- let corrupt = |e: codec::Error| {
- ClientError::Backend(format!("Frontier DB is corrupted. Decode error: {}", e.what()))
- };
- match backend.get_aux(key)? {
- None => Ok(None),
- Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt)
- }
-}
-
-/// Map an Ethereum block hash into a Substrate block hash.
-pub fn block_hash_key(ethereum_block_hash: H256) -> Vec {
- let mut ret = b"ethereum_block_hash:".to_vec();
- ret.append(&mut ethereum_block_hash.as_ref().to_vec());
- ret
-}
-
-/// Given an Ethereum block hash, get the corresponding Substrate block hash from AuxStore.
-pub fn load_block_hash(
- backend: &B,
- hash: H256,
-) -> ClientResult>> {
- let key = block_hash_key(hash);
- load_decode(backend, &key)
-}
-
-/// Update Aux block hash.
-pub fn write_block_hash(
- client: &Backend,
- ethereum_hash: H256,
- block_hash: Hash,
- write_aux: F,
-) -> ClientResult where
- F: FnOnce(&[(&[u8], &[u8])]) -> R,
-{
- let key = block_hash_key(ethereum_hash);
-
- let mut data: Vec = match load_decode(client, &key) {
- Ok(Some(hashes)) => hashes,
- Ok(None) => Vec::new(),
- Err(e) => return Err(e)
-
- };
- data.push(block_hash);
-
- Ok(write_aux(&[(&key, &data.encode()[..])]))
-}
-
-/// Map an Ethereum transaction hash into its corresponding Ethereum block hash and index.
-pub fn transaction_metadata_key(ethereum_transaction_hash: H256) -> Vec {
- let mut ret = b"ethereum_transaction_hash:".to_vec();
- ret.append(&mut ethereum_transaction_hash.as_ref().to_vec());
- ret
-}
-
-/// Given an Ethereum transaction hash, get the corresponding Ethereum block hash and index.
-pub fn load_transaction_metadata(
- backend: &B,
- hash: H256,
-) -> ClientResult>> {
- let key = transaction_metadata_key(hash);
- load_decode(backend, &key)
-}
-
-/// Update Aux transaction metadata.
-pub fn write_transaction_metadata(
- client: &Backend,
- hash: H256,
- metadata: (H256, u32),
- write_aux: F,
-) -> ClientResult where
- F: FnOnce(&[(&[u8], &[u8])]) -> R,
-{
- let key = transaction_metadata_key(hash);
-
- let mut data: Vec<(H256, u32)> = match load_decode(client, &key) {
- Ok(Some(metadata)) => metadata,
- Ok(None) => Vec::new(),
- Err(e) => return Err(e)
- };
- data.push(metadata);
-
- Ok(write_aux(&[(&key, &data.encode()[..])]))
-}
diff --git a/client/consensus/src/lib.rs b/client/consensus/src/lib.rs
index 16fe30074..64cc65a7b 100644
--- a/client/consensus/src/lib.rs
+++ b/client/consensus/src/lib.rs
@@ -16,10 +16,6 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see .
-mod aux_schema;
-
-pub use crate::aux_schema::{load_block_hash, load_transaction_metadata};
-
use std::sync::Arc;
use std::collections::HashMap;
use std::marker::PhantomData;
@@ -63,6 +59,7 @@ impl std::convert::From for ConsensusError {
pub struct FrontierBlockImport {
inner: I,
client: Arc,
+ backend: Arc>,
enabled: bool,
_marker: PhantomData,
}
@@ -72,6 +69,7 @@ impl, C> Clone for FrontierBlockImp
FrontierBlockImport {
inner: self.inner.clone(),
client: self.client.clone(),
+ backend: self.backend.clone(),
enabled: self.enabled,
_marker: PhantomData,
}
@@ -89,11 +87,13 @@ impl FrontierBlockImport where
pub fn new(
inner: I,
client: Arc,
+ backend: Arc>,
enabled: bool,
) -> Self {
Self {
inner,
client,
+ backend,
enabled,
_marker: PhantomData,
}
@@ -142,32 +142,27 @@ impl BlockImport for FrontierBlockImport where
ConsensusLog::PostBlock(block) => fp_consensus::PostHashes::from_block(block),
};
- let res = aux_schema::write_block_hash(client.as_ref(), post_hashes.block_hash, hash, insert_closure!());
+ let mapping_commitment = fc_db::MappingCommitment {
+ block_hash: hash,
+ ethereum_block_hash: post_hashes.block_hash,
+ ethereum_transaction_hashes: post_hashes.transaction_hashes,
+ };
+ let res = self.backend.mapping_db().write_hashes(mapping_commitment);
if res.is_err() { trace!(target: "frontier-consensus", "{:?}", res); }
- for (index, transaction_hash) in post_hashes.transaction_hashes.into_iter().enumerate() {
- let res = aux_schema::write_transaction_metadata(
- client.as_ref(),
- transaction_hash,
- (post_hashes.block_hash, index as u32),
- insert_closure!(),
- );
- if res.is_err() { trace!(target: "frontier-consensus", "{:?}", res); }
- }
-
// On importing block 1 we also map the genesis block in the auxiliary.
if block.header.number().clone() == One::one() {
let id = BlockId::Number(Zero::zero());
if let Ok(Some(header)) = client.header(id) {
let block = self.client.runtime_api().current_block(&id)
.map_err(|_| Error::RuntimeApiCallFailed)?;
- let block_hash = block.unwrap().header.hash();
- let res = aux_schema::write_block_hash(
- client.as_ref(),
- block_hash,
- header.hash(),
- insert_closure!()
- );
+ let block_hash = block.unwrap().header.hash(); // TODO: shouldn't use unwrap
+ let mapping_commitment = fc_db::MappingCommitment:: {
+ block_hash: header.hash(),
+ ethereum_block_hash: block_hash,
+ ethereum_transaction_hashes: Vec::new(),
+ };
+ let res = self.backend.mapping_db().write_hashes(mapping_commitment);
if res.is_err() { trace!(target: "frontier-consensus", "{:?}", res); }
}
}
diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml
new file mode 100644
index 000000000..63c93cfbd
--- /dev/null
+++ b/client/db/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "fc-db"
+version = "0.1.0"
+authors = ["Parity Technologies "]
+description = "Frontier database backend"
+edition = "2018"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+repository = "https://github.com/paritytech/frontier/"
+
+[dependencies]
+sp-core = { version = "2.0.0", git = "https://github.com/paritytech/substrate.git", branch = "frontier" }
+sp-database = { version = "2.0.0", git = "https://github.com/paritytech/substrate.git", branch = "frontier" }
+sp-runtime = { version = "2.0.0", git = "https://github.com/paritytech/substrate.git", branch = "frontier" }
+kvdb = "0.8.0"
+kvdb-rocksdb = "0.10.0"
+codec = { package = "parity-scale-codec", version = "1.3.6", features = ["derive"] }
+parking_lot = "0.11.1"
\ No newline at end of file
diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs
new file mode 100644
index 000000000..676491b65
--- /dev/null
+++ b/client/db/src/lib.rs
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+// This file is part of Frontier.
+//
+// Copyright (c) 2021 Parity Technologies (UK) Ltd.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+mod utils;
+
+pub use sp_database::Database;
+
+use std::{sync::Arc, path::{Path, PathBuf}, marker::PhantomData};
+use sp_core::H256;
+use sp_runtime::traits::Block as BlockT;
+use parking_lot::Mutex;
+use codec::{Encode, Decode};
+
+const DB_HASH_LEN: usize = 32;
+/// Hash type that this backend uses for the database.
+pub type DbHash = [u8; DB_HASH_LEN];
+
+/// Database settings.
+pub struct DatabaseSettings {
+ /// Where to find the database.
+ pub source: DatabaseSettingsSrc,
+}
+
+/// Where to find the database.
+#[derive(Debug, Clone)]
+pub enum DatabaseSettingsSrc {
+ /// Load a RocksDB database from a given path. Recommended for most uses.
+ RocksDb {
+ /// Path to the database.
+ path: PathBuf,
+ /// Cache size in MiB.
+ cache_size: usize,
+ },
+}
+
+impl DatabaseSettingsSrc {
+ /// Return dabase path for databases that are on the disk.
+ pub fn path(&self) -> Option<&Path> {
+ match self {
+ DatabaseSettingsSrc::RocksDb { path, .. } => Some(path.as_path()),
+ }
+ }
+}
+
+pub(crate) mod columns {
+ pub const NUM_COLUMNS: u32 = 3;
+
+ pub const META: u32 = 0;
+ pub const BLOCK_MAPPING: u32 = 1;
+ pub const TRANSACTION_MAPPING: u32 = 2;
+}
+
+pub struct Backend {
+ mapping_db: Arc>,
+}
+
+impl Backend {
+ pub fn new(config: &DatabaseSettings) -> Result {
+ let db = utils::open_database(config)?;
+
+ Ok(Self {
+ mapping_db: Arc::new(MappingDb {
+ db: db.clone(),
+ write_lock: Arc::new(Mutex::new(())),
+ _marker: PhantomData,
+ })
+ })
+ }
+
+ pub fn mapping_db(&self) -> &Arc> {
+ &self.mapping_db
+ }
+}
+
+pub struct MappingCommitment {
+ pub block_hash: Block::Hash,
+ pub ethereum_block_hash: H256,
+ pub ethereum_transaction_hashes: Vec,
+}
+
+#[derive(Clone, Encode, Decode)]
+pub struct TransactionMetadata {
+ pub block_hash: Block::Hash,
+ pub ethereum_block_hash: H256,
+ pub ethereum_index: u32,
+}
+
+pub struct MappingDb {
+ db: Arc>,
+ write_lock: Arc>,
+ _marker: PhantomData,
+}
+
+impl MappingDb {
+ pub fn block_hashes(
+ &self,
+ ethereum_block_hash: &H256,
+ ) -> Result, String> {
+ match self.db.get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) {
+ Some(raw) => Ok(Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?),
+ None => Ok(Vec::new()),
+ }
+ }
+
+ pub fn transaction_metadata(
+ &self,
+ ethereum_transaction_hash: &H256,
+ ) -> Result>, String> {
+ match self.db.get(crate::columns::TRANSACTION_MAPPING, ðereum_transaction_hash.encode()) {
+ Some(raw) => Ok(Vec::>::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?),
+ None => Ok(Vec::new()),
+ }
+ }
+
+ pub fn write_hashes(
+ &self,
+ commitment: MappingCommitment,
+ ) -> Result<(), String> {
+ let _lock = self.write_lock.lock();
+
+ let mut transaction = sp_database::Transaction::new();
+
+ let mut block_hashes = self.block_hashes(&commitment.ethereum_block_hash)?;
+ block_hashes.push(commitment.block_hash);
+ transaction.set(
+ crate::columns::BLOCK_MAPPING,
+ &commitment.ethereum_block_hash.encode(),
+ &block_hashes.encode()
+ );
+
+ for (i, ethereum_transaction_hash) in commitment.ethereum_transaction_hashes.into_iter().enumerate() {
+ let mut metadata = self.transaction_metadata(ðereum_transaction_hash)?;
+ metadata.push(TransactionMetadata:: {
+ block_hash: commitment.block_hash,
+ ethereum_block_hash: commitment.ethereum_block_hash,
+ ethereum_index: i as u32,
+ });
+ transaction.set(
+ crate::columns::TRANSACTION_MAPPING,
+ ðereum_transaction_hash.encode(),
+ &metadata.encode(),
+ );
+ }
+
+ self.db.commit(transaction).map_err(|e| format!("{:?}", e))?;
+
+ Ok(())
+ }
+}
diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs
new file mode 100644
index 000000000..2a66a22dc
--- /dev/null
+++ b/client/db/src/utils.rs
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+// This file is part of Frontier.
+//
+// Copyright (c) 2020 Parity Technologies (UK) Ltd.
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use std::sync::Arc;
+use crate::{Database, DbHash, DatabaseSettings, DatabaseSettingsSrc};
+
+pub fn open_database(
+ config: &DatabaseSettings,
+) -> Result>, String> {
+ let db: Arc> = match &config.source {
+ DatabaseSettingsSrc::RocksDb { path, cache_size: _ } => {
+ let db_config = kvdb_rocksdb::DatabaseConfig::with_columns(crate::columns::NUM_COLUMNS);
+ let path = path.to_str()
+ .ok_or_else(|| "Invalid database path".to_string())?;
+
+ let db = kvdb_rocksdb::Database::open(&db_config, &path)
+ .map_err(|err| format!("{}", err))?;
+ sp_database::as_database(db)
+ }
+ };
+
+ Ok(db)
+}
diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml
index 06f4a640f..6242d6407 100644
--- a/client/rpc/Cargo.toml
+++ b/client/rpc/Cargo.toml
@@ -14,6 +14,7 @@ jsonrpc-pubsub = "15.0.0"
log = "0.4.8"
ethereum-types = "0.10.0"
fc-consensus = { path = "../consensus" }
+fc-db = { path = "../db" }
fc-rpc-core = { path = "../rpc-core" }
fp-rpc = { path = "../../primitives/rpc" }
sp-io = { git = "https://github.com/paritytech/substrate.git", branch = "frontier" }
diff --git a/client/rpc/src/eth.rs b/client/rpc/src/eth.rs
index 7dac05c40..d878f20cb 100644
--- a/client/rpc/src/eth.rs
+++ b/client/rpc/src/eth.rs
@@ -55,6 +55,7 @@ pub struct EthApi {
is_authority: bool,
signers: Vec>,
pending_transactions: PendingTransactions,
+ backend: Arc>,
_marker: PhantomData<(B, BE)>,
}
@@ -66,6 +67,7 @@ impl EthApi {
network: Arc>,
pending_transactions: PendingTransactions,
signers: Vec>,
+ backend: Arc>,
is_authority: bool,
) -> Self {
Self {
@@ -76,6 +78,7 @@ impl EthApi {
is_authority,
signers,
pending_transactions,
+ backend,
_marker: PhantomData,
}
}
@@ -298,12 +301,8 @@ impl EthApi where
// Asumes there is only one mapped canonical block in the AuxStore, otherwise something is wrong
fn load_hash(&self, hash: H256) -> Result>> {
- let hashes = match fc_consensus::load_block_hash::(self.client.as_ref(), hash)
- .map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))?
- {
- Some(hashes) => hashes,
- None => return Ok(None),
- };
+ let hashes = self.backend.mapping_db().block_hashes(&hash)
+ .map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))?;
let out: Vec = hashes.into_iter()
.filter_map(|h| {
if self.is_canon(h) {
@@ -331,30 +330,14 @@ impl EthApi where
}
fn load_transactions(&self, transaction_hash: H256) -> Result> {
- let mut transactions: Vec<(H256, u32)> = Vec::new();
- match fc_consensus::load_transaction_metadata(
- self.client.as_ref(),
- transaction_hash,
- ).map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))? {
- Some(metadata) => {
- for (block_hash, index) in metadata {
- match self.load_hash(block_hash)
- .map_err(|err| internal_err(format!("{:?}", err)))?
- {
- Some(_) => {
- transactions.push((block_hash, index));
- },
- _ => {},
- };
- }
- },
- None => return Ok(None),
- };
+ let transaction_metadata = self.backend.mapping_db().transaction_metadata(&transaction_hash)
+ .map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))?;
- if transactions.len() == 1 {
- return Ok(Some(transactions[0]));
+ if transaction_metadata.len() == 1 {
+ Ok(Some((transaction_metadata[0].ethereum_block_hash, transaction_metadata[0].ethereum_index)))
+ } else {
+ Ok(None)
}
- Ok(None)
}
}
diff --git a/template/node/Cargo.toml b/template/node/Cargo.toml
index 5545cf102..1e6d0737e 100644
--- a/template/node/Cargo.toml
+++ b/template/node/Cargo.toml
@@ -55,6 +55,7 @@ frontier-template-runtime = { path = "../runtime" }
fc-rpc = { path = "../../client/rpc" }
fp-rpc = { path = "../../primitives/rpc" }
fc-rpc-core = { path = "../../client/rpc-core" }
+fc-db = { path = "../../client/db" }
[build-dependencies]
substrate-build-script-utils = { git = "https://github.com/paritytech/substrate.git", branch = "frontier" }
diff --git a/template/node/src/rpc.rs b/template/node/src/rpc.rs
index 5bc97709d..c3532f087 100644
--- a/template/node/src/rpc.rs
+++ b/template/node/src/rpc.rs
@@ -49,6 +49,8 @@ pub struct FullDeps {
pub pending_transactions: PendingTransactions,
/// EthFilterApi pool.
pub filter_pool: Option,
+ /// Backend.
+ pub backend: Arc>,
/// Manual seal command sink
pub command_sink: Option>>,
}
@@ -89,6 +91,7 @@ pub fn create_full(
pending_transactions,
filter_pool,
command_sink,
+ backend,
enable_dev_signer,
} = deps;
@@ -111,6 +114,7 @@ pub fn create_full(
network.clone(),
pending_transactions.clone(),
signers,
+ backend,
is_authority,
))
);
diff --git a/template/node/src/service.rs b/template/node/src/service.rs
index 6dc8bdc2b..186ba76b3 100644
--- a/template/node/src/service.rs
+++ b/template/node/src/service.rs
@@ -6,7 +6,7 @@ use sc_client_api::{ExecutorProvider, RemoteBackend, BlockchainEvents};
use sc_consensus_manual_seal::{self as manual_seal};
use fc_consensus::FrontierBlockImport;
use frontier_template_runtime::{self, opaque::Block, RuntimeApi, SLOT_DURATION};
-use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
+use sc_service::{error::Error as ServiceError, Configuration, TaskManager, BasePath};
use sp_inherents::{InherentDataProviders, ProvideInherentData, InherentIdentifier, InherentData};
use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
@@ -14,6 +14,7 @@ use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair};
use sc_finality_grandpa::SharedVoterState;
use sp_timestamp::InherentError;
use sc_telemetry::TelemetrySpan;
+use sc_cli::SubstrateCli;
use crate::cli::Sealing;
// Our native executor instance.
@@ -72,12 +73,29 @@ impl ProvideInherentData for MockTimestampInherentDataProvider {
}
}
+pub fn open_frontier_backend(config: &Configuration) -> Result>, String> {
+ let config_dir = config.base_path.as_ref()
+ .map(|base_path| base_path.config_dir(config.chain_spec.id()))
+ .unwrap_or_else(|| {
+ BasePath::from_project("", "", &crate::cli::Cli::executable_name())
+ .config_dir(config.chain_spec.id())
+ });
+ let database_dir = config_dir.join("frontier").join("db");
+
+ Ok(Arc::new(fc_db::Backend::::new(&fc_db::DatabaseSettings {
+ source: fc_db::DatabaseSettingsSrc::RocksDb {
+ path: database_dir,
+ cache_size: 0,
+ }
+ })?))
+}
+
pub fn new_partial(config: &Configuration, sealing: Option) -> Result<
sc_service::PartialComponents<
FullClient, FullBackend, FullSelectChain,
sp_consensus::import_queue::BasicQueue>,
sc_transaction_pool::FullPool,
- (ConsensusResult, PendingTransactions, Option, Option),
+ (ConsensusResult, PendingTransactions, Option, Option, Arc>),
>, ServiceError> {
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
@@ -100,6 +118,8 @@ pub fn new_partial(config: &Configuration, sealing: Option) -> Result<
let filter_pool: Option
= Some(Arc::new(Mutex::new(BTreeMap::new())));
+ let frontier_backend = open_frontier_backend(config)?;
+
if let Some(sealing) = sealing {
inherent_data_providers
.register_provider(MockTimestampInherentDataProvider)
@@ -109,6 +129,7 @@ pub fn new_partial(config: &Configuration, sealing: Option) -> Result<
let frontier_block_import = FrontierBlockImport::new(
client.clone(),
client.clone(),
+ frontier_backend.clone(),
true,
);
@@ -121,7 +142,7 @@ pub fn new_partial(config: &Configuration, sealing: Option) -> Result<
return Ok(sc_service::PartialComponents {
client, backend, task_manager, import_queue, keystore_container,
select_chain, transaction_pool, inherent_data_providers,
- other: (ConsensusResult::ManualSeal(frontier_block_import, sealing), pending_transactions, telemetry_span, filter_pool)
+ other: (ConsensusResult::ManualSeal(frontier_block_import, sealing), pending_transactions, telemetry_span, filter_pool, frontier_backend)
})
}
@@ -132,6 +153,7 @@ pub fn new_partial(config: &Configuration, sealing: Option) -> Result<
let frontier_block_import = FrontierBlockImport::new(
grandpa_block_import.clone(),
client.clone(),
+ frontier_backend.clone(),
true
);
@@ -153,7 +175,7 @@ pub fn new_partial(config: &Configuration, sealing: Option) -> Result<
Ok(sc_service::PartialComponents {
client, backend, task_manager, import_queue, keystore_container,
select_chain, transaction_pool, inherent_data_providers,
- other: (ConsensusResult::Aura(aura_block_import, grandpa_link), pending_transactions, telemetry_span, filter_pool)
+ other: (ConsensusResult::Aura(aura_block_import, grandpa_link), pending_transactions, telemetry_span, filter_pool, frontier_backend)
})
}
@@ -166,7 +188,7 @@ pub fn new_full(
let sc_service::PartialComponents {
client, backend, mut task_manager, import_queue, keystore_container,
select_chain, transaction_pool, inherent_data_providers,
- other: (consensus_result, pending_transactions, telemetry_span, filter_pool),
+ other: (consensus_result, pending_transactions, telemetry_span, filter_pool, frontier_backend),
} = new_partial(&config, sealing)?;
let (network, network_status_sinks, system_rpc_tx, network_starter) =
@@ -214,6 +236,7 @@ pub fn new_full(
network: network.clone(),
pending_transactions: pending.clone(),
filter_pool: filter_pool.clone(),
+ backend: frontier_backend.clone(),
command_sink: Some(command_sink.clone())
};
crate::rpc::create_full(