From c039ab79b59e418a210f630a5ae485c39171f0e4 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 9 Apr 2018 20:21:37 +0800 Subject: [PATCH] Decouple rocksdb dependency from ethcore (#8320) * Move client DB opening logic to CLI * Move restoration db open logic to CLI This adds KeyValueDBHandler which handles opening a new database, thus allow us to move the restoration db open logic out of ethcore. * Move rocksdb's compactionprofile conversion to CLI * Move kvdb_rocksdb as test dependency for ethcore * Fix tests due to interface change * Fix service tests * Remove unused migration dep for ethcore --- Cargo.lock | 1 - ethcore/Cargo.toml | 3 +- ethcore/service/Cargo.toml | 2 +- ethcore/service/src/lib.rs | 4 +- ethcore/service/src/service.rs | 61 +++++++++++++++++---------- ethcore/src/client/config.rs | 13 ------ ethcore/src/lib.rs | 5 ++- ethcore/src/snapshot/service.rs | 27 ++++++------ ethcore/src/snapshot/tests/service.rs | 6 +-- ethcore/src/tests/helpers.rs | 19 +++++++++ parity/blockchain.rs | 22 +++++++--- parity/export_hardcoded_sync.rs | 4 +- parity/helpers.rs | 52 ++++++++++++++++++++++- parity/run.rs | 13 ++++-- parity/snapshot.rs | 11 +++-- util/kvdb/src/lib.rs | 9 ++++ 16 files changed, 176 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c66c6147295..403b468f843 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -535,7 +535,6 @@ dependencies = [ "macros 0.1.0", "memory-cache 0.1.0", "memorydb 0.1.1", - "migration 0.1.0", "num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-machine 0.1.0", diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index ba984f315ff..9c61fd8ea4b 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -51,12 +51,10 @@ rlp = { path = "../util/rlp" } rlp_compress = { path = "../util/rlp_compress" } rlp_derive = { path = "../util/rlp_derive" } kvdb = { path = "../util/kvdb" } -kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } kvdb-memorydb = { path = "../util/kvdb-memorydb" } util-error = { path = "../util/error" } snappy = { git = "https://github.com/paritytech/rust-snappy" } stop-guard = { path = "../util/stop-guard" } -migration = { path = "../util/migration" } macros = { path = "../util/macros" } rust-crypto = "0.2.34" rustc-hex = "1.0" @@ -74,6 +72,7 @@ journaldb = { path = "../util/journaldb" } [dev-dependencies] tempdir = "0.3" trie-standardmap = { path = "../util/trie-standardmap" } +kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } [features] evm-debug = ["slow-blocks"] diff --git a/ethcore/service/Cargo.toml b/ethcore/service/Cargo.toml index 634769d0b54..4b53f5d00a5 100644 --- a/ethcore/service/Cargo.toml +++ b/ethcore/service/Cargo.toml @@ -8,9 +8,9 @@ ansi_term = "0.10" ethcore = { path = ".." } ethcore-io = { path = "../../util/io" } kvdb = { path = "../../util/kvdb" } -kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" } log = "0.3" stop-guard = { path = "../../util/stop-guard" } [dev-dependencies] tempdir = "0.3" +kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" } diff --git a/ethcore/service/src/lib.rs b/ethcore/service/src/lib.rs index 907009ba324..83d9a8fe10c 100644 --- a/ethcore/service/src/lib.rs +++ b/ethcore/service/src/lib.rs @@ -18,7 +18,6 @@ extern crate ansi_term; extern crate ethcore; extern crate ethcore_io as io; extern crate kvdb; -extern crate kvdb_rocksdb; extern crate stop_guard; #[macro_use] @@ -27,6 +26,9 @@ extern crate log; #[cfg(test)] extern crate tempdir; +#[cfg(test)] +extern crate kvdb_rocksdb; + mod service; pub use service::ClientService; diff --git a/ethcore/service/src/service.rs b/ethcore/service/src/service.rs index f190d6e6ac3..4337996e2b0 100644 --- a/ethcore/service/src/service.rs +++ b/ethcore/service/src/service.rs @@ -21,12 +21,10 @@ use std::path::Path; use ansi_term::Colour; use io::{IoContext, TimerToken, IoHandler, IoService, IoError}; -use kvdb::KeyValueDB; -use kvdb_rocksdb::{Database, DatabaseConfig}; +use kvdb::{KeyValueDB, KeyValueDBHandler}; use stop_guard::StopGuard; -use ethcore::client::{self, Client, ClientConfig, ChainNotify, ClientIoMessage}; -use ethcore::db; +use ethcore::client::{Client, ClientConfig, ChainNotify, ClientIoMessage}; use ethcore::error::Error; use ethcore::miner::Miner; use ethcore::snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams}; @@ -38,7 +36,7 @@ pub struct ClientService { io_service: Arc>, client: Arc, snapshot: Arc, - database: Arc, + database: Arc, _stop_guard: StopGuard, } @@ -47,8 +45,9 @@ impl ClientService { pub fn start( config: ClientConfig, spec: &Spec, - client_path: &Path, + client_db: Arc, snapshot_path: &Path, + restoration_db_handler: Box, _ipc_path: &Path, miner: Arc, ) -> Result @@ -57,25 +56,13 @@ impl ClientService { info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name())); - let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); - - db_config.memory_budget = config.db_cache_size; - db_config.compaction = config.db_compaction.compaction_profile(client_path); - db_config.wal = config.db_wal; - - let db = Arc::new(Database::open( - &db_config, - &client_path.to_str().expect("DB path could not be converted to string.") - ).map_err(client::Error::Database)?); - - let pruning = config.pruning; - let client = Client::new(config, &spec, db.clone(), miner, io_service.channel())?; + let client = Client::new(config, &spec, client_db.clone(), miner, io_service.channel())?; let snapshot_params = SnapServiceParams { engine: spec.engine.clone(), genesis_block: spec.genesis_block(), - db_config: db_config.clone(), + restoration_db_handler: restoration_db_handler, pruning: pruning, channel: io_service.channel(), snapshot_root: snapshot_path.into(), @@ -97,7 +84,7 @@ impl ClientService { io_service: Arc::new(io_service), client: client, snapshot: snapshot, - database: db, + database: client_db, _stop_guard: stop_guard, }) } @@ -208,6 +195,9 @@ mod tests { use ethcore::client::ClientConfig; use ethcore::miner::Miner; use ethcore::spec::Spec; + use ethcore::db::NUM_COLUMNS; + use kvdb::Error; + use kvdb_rocksdb::{Database, DatabaseConfig, CompactionProfile}; use super::*; #[test] @@ -216,12 +206,39 @@ mod tests { let client_path = tempdir.path().join("client"); let snapshot_path = tempdir.path().join("snapshot"); + let client_config = ClientConfig::default(); + let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); + + client_db_config.memory_budget = client_config.db_cache_size; + client_db_config.compaction = CompactionProfile::auto(&client_path); + client_db_config.wal = client_config.db_wal; + + let client_db = Arc::new(Database::open( + &client_db_config, + &client_path.to_str().expect("DB path could not be converted to string.") + ).unwrap()); + + struct RestorationDBHandler { + config: DatabaseConfig, + } + + impl KeyValueDBHandler for RestorationDBHandler { + fn open(&self, db_path: &Path) -> Result, Error> { + Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?)) + } + } + + let restoration_db_handler = Box::new(RestorationDBHandler { + config: client_db_config, + }); + let spec = Spec::new_test(); let service = ClientService::start( ClientConfig::default(), &spec, - &client_path, + client_db, &snapshot_path, + restoration_db_handler, tempdir.path(), Arc::new(Miner::with_spec(&spec)), ); diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index b337bf4319e..9787f822a4d 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -15,13 +15,11 @@ // along with Parity. If not, see . use std::str::FromStr; -use std::path::Path; use std::fmt::{Display, Formatter, Error as FmtError}; use mode::Mode as IpcMode; use verification::{VerifierType, QueueConfig}; use journaldb; -use kvdb_rocksdb::CompactionProfile; pub use std::time::Duration; pub use blockchain::Config as BlockChainConfig; @@ -45,17 +43,6 @@ impl Default for DatabaseCompactionProfile { } } -impl DatabaseCompactionProfile { - /// Returns corresponding compaction profile. - pub fn compaction_profile(&self, db_path: &Path) -> CompactionProfile { - match *self { - DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), - DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), - DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), - } - } -} - impl FromStr for DatabaseCompactionProfile { type Err = String; diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 43c3336f6f8..d822195d321 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -93,11 +93,9 @@ extern crate triehash; extern crate ansi_term; extern crate unexpected; extern crate kvdb; -extern crate kvdb_rocksdb; extern crate kvdb_memorydb; extern crate util_error; extern crate snappy; -extern crate migration; extern crate ethabi; #[macro_use] @@ -130,6 +128,9 @@ extern crate trace_time; #[cfg_attr(test, macro_use)] extern crate evm; +#[cfg(test)] +extern crate kvdb_rocksdb; + pub extern crate ethstore; pub mod account_provider; diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 7def518f357..fad150645db 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -39,7 +39,7 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard}; use util_error::UtilError; use bytes::Bytes; use journaldb::Algorithm; -use kvdb_rocksdb::{Database, DatabaseConfig}; +use kvdb::{KeyValueDB, KeyValueDBHandler}; use snappy; /// Helper for removing directories in case of error. @@ -79,14 +79,13 @@ struct Restoration { snappy_buffer: Bytes, final_state_root: H256, guard: Guard, - db: Arc, + db: Arc, } struct RestorationParams<'a> { manifest: ManifestData, // manifest to base restoration on. pruning: Algorithm, // pruning algorithm for the database. - db_path: PathBuf, // database path - db_config: &'a DatabaseConfig, // configuration for the database. + db: Arc, // database writer: Option, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. guard: Guard, // guard for the restoration directory. @@ -101,8 +100,7 @@ impl Restoration { let state_chunks = manifest.state_hashes.iter().cloned().collect(); let block_chunks = manifest.block_hashes.iter().cloned().collect(); - let raw_db = Arc::new(Database::open(params.db_config, &*params.db_path.to_string_lossy()) - .map_err(UtilError::from)?); + let raw_db = params.db; let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); let components = params.engine.snapshot_components() @@ -211,10 +209,10 @@ pub struct ServiceParams { pub engine: Arc, /// The chain's genesis block. pub genesis_block: Bytes, - /// Database configuration options. - pub db_config: DatabaseConfig, /// State pruning algorithm. pub pruning: Algorithm, + /// Handler for opening a restoration DB. + pub restoration_db_handler: Box, /// Async IO channel for sending messages. pub channel: Channel, /// The directory to put snapshots in. @@ -228,8 +226,8 @@ pub struct ServiceParams { /// This controls taking snapshots and restoring from them. pub struct Service { restoration: Mutex>, + restoration_db_handler: Box, snapshot_root: PathBuf, - db_config: DatabaseConfig, io_channel: Mutex, pruning: Algorithm, status: Mutex, @@ -249,8 +247,8 @@ impl Service { pub fn new(params: ServiceParams) -> Result { let mut service = Service { restoration: Mutex::new(None), + restoration_db_handler: params.restoration_db_handler, snapshot_root: params.snapshot_root, - db_config: params.db_config, io_channel: Mutex::new(params.channel), pruning: params.pruning, status: Mutex::new(RestorationStatus::Inactive), @@ -437,8 +435,7 @@ impl Service { let params = RestorationParams { manifest: manifest, pruning: self.pruning, - db_path: self.restoration_db(), - db_config: &self.db_config, + db: self.restoration_db_handler.open(&self.restoration_db())?, writer: writer, genesis: &self.genesis_block, guard: Guard::new(rest_dir), @@ -638,6 +635,7 @@ mod tests { use snapshot::{ManifestData, RestorationStatus, SnapshotService}; use super::*; use tempdir::TempDir; + use tests::helpers::restoration_db_handler; struct NoopDBRestore; impl DatabaseRestore for NoopDBRestore { @@ -657,7 +655,7 @@ mod tests { let snapshot_params = ServiceParams { engine: spec.engine.clone(), genesis_block: spec.genesis_block(), - db_config: Default::default(), + restoration_db_handler: restoration_db_handler(Default::default()), pruning: Algorithm::Archive, channel: service.channel(), snapshot_root: dir, @@ -709,8 +707,7 @@ mod tests { block_hash: H256::default(), }, pruning: Algorithm::Archive, - db_path: tempdir.path().to_owned(), - db_config: &db_config, + db: restoration_db_handler(db_config).open(&tempdir.path().to_owned()).unwrap(), writer: None, genesis: &gb, guard: Guard::benign(), diff --git a/ethcore/src/snapshot/tests/service.rs b/ethcore/src/snapshot/tests/service.rs index 52b4b3cc979..4548741f399 100644 --- a/ethcore/src/snapshot/tests/service.rs +++ b/ethcore/src/snapshot/tests/service.rs @@ -24,7 +24,7 @@ use ids::BlockId; use snapshot::service::{Service, ServiceParams}; use snapshot::{self, ManifestData, SnapshotService}; use spec::Spec; -use tests::helpers::generate_dummy_client_with_spec_and_data; +use tests::helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler}; use io::IoChannel; use kvdb_rocksdb::{Database, DatabaseConfig}; @@ -65,7 +65,7 @@ fn restored_is_equivalent() { let service_params = ServiceParams { engine: spec.engine.clone(), genesis_block: spec.genesis_block(), - db_config: db_config, + restoration_db_handler: restoration_db_handler(db_config), pruning: ::journaldb::Algorithm::Archive, channel: IoChannel::disconnected(), snapshot_root: path, @@ -107,7 +107,7 @@ fn guards_delete_folders() { let service_params = ServiceParams { engine: spec.engine.clone(), genesis_block: spec.genesis_block(), - db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS), + restoration_db_handler: restoration_db_handler(DatabaseConfig::with_columns(::db::NUM_COLUMNS)), pruning: ::journaldb::Algorithm::Archive, channel: IoChannel::disconnected(), snapshot_root: tempdir.path().to_owned(), diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index fd37164dcbb..14dcf3630b0 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -33,8 +33,11 @@ use spec::Spec; use state_db::StateDB; use state::*; use std::sync::Arc; +use std::path::Path; use transaction::{Action, Transaction, SignedTransaction}; use views::BlockView; +use kvdb::{KeyValueDB, KeyValueDBHandler}; +use kvdb_rocksdb::{Database, DatabaseConfig}; pub fn create_test_block(header: &Header) -> Bytes { let mut rlp = RlpStream::new_list(3); @@ -349,3 +352,19 @@ impl ChainNotify for TestNotify { self.messages.write().push(data); } } + +pub fn restoration_db_handler(config: DatabaseConfig) -> Box { + use kvdb::Error; + + struct RestorationDBHandler { + config: DatabaseConfig, + } + + impl KeyValueDBHandler for RestorationDBHandler { + fn open(&self, db_path: &Path) -> Result, Error> { + Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?)) + } + } + + Box::new(RestorationDBHandler { config }) +} diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 9dab9069d78..26eae459765 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -35,7 +35,7 @@ use cache::CacheConfig; use informant::{Informant, FullNodeInformantData, MillisecondDuration}; use kvdb_rocksdb::{Database, DatabaseConfig}; use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; -use helpers::{to_client_config, execute_upgrades}; +use helpers::{to_client_config, execute_upgrades, open_client_db, client_db_config, restoration_db_handler, compaction_profile}; use dir::Directories; use user_defaults::UserDefaults; use fdlimit; @@ -186,7 +186,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { let client_path = db_dirs.client_path(algorithm); // execute upgrades - let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()); + let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()); execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction)?; // create dirs used by parity @@ -352,7 +352,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?; + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?; // create dirs used by parity cmd.dirs.create_dirs(false, false, false)?; @@ -376,12 +376,17 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { client_config.queue.verifier_settings = cmd.verifier_settings; + let client_db_config = client_db_config(&client_path, &client_config); + let client_db = open_client_db(&client_path, &client_db_config)?; + let restoration_db_handler = restoration_db_handler(client_db_config); + // build client let service = ClientService::start( client_config, &spec, - &client_path, + client_db, &snapshot_path, + restoration_db_handler, &cmd.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))?; @@ -537,7 +542,7 @@ fn start_client( let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&dirs.base, &db_dirs, algorithm, compaction.compaction_profile(db_dirs.db_root_path().as_path()))?; + execute_upgrades(&dirs.base, &db_dirs, algorithm, compaction_profile(&compaction, db_dirs.db_root_path().as_path()))?; // create dirs used by parity dirs.create_dirs(false, false, false)?; @@ -559,11 +564,16 @@ fn start_client( true, ); + let client_db_config = client_db_config(&client_path, &client_config); + let client_db = open_client_db(&client_path, &client_db_config)?; + let restoration_db_handler = restoration_db_handler(client_db_config); + let service = ClientService::start( client_config, &spec, - &client_path, + client_db, &snapshot_path, + restoration_db_handler, &dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))?; diff --git a/parity/export_hardcoded_sync.rs b/parity/export_hardcoded_sync.rs index 7a48c0592e9..accb6159fa3 100644 --- a/parity/export_hardcoded_sync.rs +++ b/parity/export_hardcoded_sync.rs @@ -25,7 +25,7 @@ use light::client::fetch::Unavailable as UnavailableDataFetcher; use light::Cache as LightDataCache; use params::{SpecType, Pruning}; -use helpers::execute_upgrades; +use helpers::{execute_upgrades, compaction_profile}; use dir::Directories; use cache::CacheConfig; use user_defaults::UserDefaults; @@ -66,7 +66,7 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); - let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()); + let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()); // execute upgrades execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?; diff --git a/parity/helpers.rs b/parity/helpers.rs index 959dddba92d..1c6c70cdae9 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -18,11 +18,13 @@ use std::io; use std::io::{Write, BufReader, BufRead}; use std::time::Duration; use std::fs::File; +use std::sync::Arc; +use std::path::Path; use ethereum_types::{U256, clean_0x, Address}; -use kvdb_rocksdb::CompactionProfile; use journaldb::Algorithm; use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType}; use ethcore::miner::{PendingSet, GasLimit}; +use ethcore::db::NUM_COLUMNS; use miner::transaction_queue::PrioritizationStrategy; use cache::CacheConfig; use dir::DatabaseDirectories; @@ -30,6 +32,8 @@ use dir::helpers::replace_home; use upgrade::{upgrade, upgrade_data_paths}; use migration::migrate; use ethsync::{validate_node_url, self}; +use kvdb::{KeyValueDB, KeyValueDBHandler}; +use kvdb_rocksdb::{Database, DatabaseConfig, CompactionProfile}; use path; pub fn to_duration(s: &str) -> Result { @@ -255,6 +259,52 @@ pub fn to_client_config( client_config } +// We assume client db has similar config as restoration db. +pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig { + let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); + + client_db_config.memory_budget = client_config.db_cache_size; + client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path); + client_db_config.wal = client_config.db_wal; + + client_db_config +} + +pub fn open_client_db(client_path: &Path, client_db_config: &DatabaseConfig) -> Result, String> { + let client_db = Arc::new(Database::open( + &client_db_config, + &client_path.to_str().expect("DB path could not be converted to string.") + ).map_err(|e| format!("Client service database error: {:?}", e))?); + + Ok(client_db) +} + +pub fn restoration_db_handler(client_db_config: DatabaseConfig) -> Box { + use kvdb::Error; + + struct RestorationDBHandler { + config: DatabaseConfig, + } + + impl KeyValueDBHandler for RestorationDBHandler { + fn open(&self, db_path: &Path) -> Result, Error> { + Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?)) + } + } + + Box::new(RestorationDBHandler { + config: client_db_config, + }) +} + +pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile { + match profile { + &DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), + &DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), + &DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), + } +} + pub fn execute_upgrades( base_path: &str, dirs: &DatabaseDirectories, diff --git a/parity/run.rs b/parity/run.rs index 7baeaaed340..816ddd4dd30 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -55,7 +55,7 @@ use params::{ SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool }; -use helpers::{to_client_config, execute_upgrades, passwords_from_files}; +use helpers::{to_client_config, execute_upgrades, passwords_from_files, client_db_config, open_client_db, restoration_db_handler, compaction_profile}; use upgrade::upgrade_key_location; use dir::{Directories, DatabaseDirectories}; use cache::CacheConfig; @@ -206,7 +206,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc) -> Result(cmd: RunCmd, logger: Arc, on_client_rq: let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?; + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?; // create dirs used by parity cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.ui_conf.enabled, cmd.secretstore_conf.enabled)?; @@ -609,12 +609,17 @@ fn execute_impl(cmd: RunCmd, logger: Arc, on_client_rq: // set network path. net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); + let client_db_config = client_db_config(&client_path, &client_config); + let client_db = open_client_db(&client_path, &client_db_config)?; + let restoration_db_handler = restoration_db_handler(client_db_config); + // create client service. let service = ClientService::start( client_config, &spec, - &client_path, + client_db, &snapshot_path, + restoration_db_handler, &cmd.dirs.ipc_path(), miner.clone(), ).map_err(|e| format!("Client service error: {:?}", e))?; diff --git a/parity/snapshot.rs b/parity/snapshot.rs index bda5059f7e5..ae7a0698b74 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -31,7 +31,7 @@ use ethcore_service::ClientService; use cache::CacheConfig; use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; -use helpers::{to_client_config, execute_upgrades}; +use helpers::{to_client_config, execute_upgrades, client_db_config, open_client_db, restoration_db_handler, compaction_profile}; use dir::Directories; use user_defaults::UserDefaults; use fdlimit; @@ -162,7 +162,7 @@ impl SnapshotCommand { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&self.dirs.base, &db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?; + execute_upgrades(&self.dirs.base, &db_dirs, algorithm, compaction_profile(&self.compaction, db_dirs.db_root_path().as_path()))?; // prepare client config let client_config = to_client_config( @@ -181,11 +181,16 @@ impl SnapshotCommand { true ); + let client_db_config = client_db_config(&client_path, &client_config); + let client_db = open_client_db(&client_path, &client_db_config)?; + let restoration_db_handler = restoration_db_handler(client_db_config); + let service = ClientService::start( client_config, &spec, - &client_path, + client_db, &snapshot_path, + restoration_db_handler, &self.dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)) ).map_err(|e| format!("Client service error: {:?}", e))?; diff --git a/util/kvdb/src/lib.rs b/util/kvdb/src/lib.rs index 6a8412c0e8c..9ed1038bffd 100644 --- a/util/kvdb/src/lib.rs +++ b/util/kvdb/src/lib.rs @@ -22,6 +22,8 @@ extern crate elastic_array; extern crate ethcore_bytes as bytes; use std::io; +use std::path::Path; +use std::sync::Arc; use elastic_array::{ElasticArray128, ElasticArray32}; use bytes::Bytes; @@ -176,3 +178,10 @@ pub trait KeyValueDB: Sync + Send { /// Attempt to replace this database with a new one located at the given path. fn restore(&self, new_db: &str) -> Result<()>; } + +/// Generic key-value database handler. This trait contains one function `open`. When called, it opens database with a +/// predefined config. +pub trait KeyValueDBHandler: Send + Sync { + /// Open the predefined key-value database. + fn open(&self, path: &Path) -> Result>; +}