From d70c4485160ae33cb767e75e583271aae90a17f9 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 17:48:40 +0800 Subject: [PATCH 01/13] secret_store: remove kvdb_rocksdb dependency --- parity/main.rs | 1 + parity/secretstore.rs | 5 ++- secret_store/Cargo.toml | 2 +- secret_store/src/key_storage.rs | 76 ++++++++++++--------------------- secret_store/src/lib.rs | 9 ++-- secret_store/src/types/all.rs | 2 - 6 files changed, 38 insertions(+), 57 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 7179996bf58..7c91297abe5 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -127,6 +127,7 @@ mod upgrade; mod url; mod user_defaults; mod whisper; +mod db; #[cfg(feature="stratum")] mod stratum; diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 9c130a59a92..168a9b3fcf1 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -117,6 +117,7 @@ mod server { use ethcore_secretstore; use ethkey::KeyPair; use ansi_term::Colour::Red; + use db; use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress}; fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress { @@ -173,7 +174,6 @@ mod server { service_contract_srv_retr_address: conf.service_contract_srv_retr_address.map(into_service_contract_address), service_contract_doc_store_address: conf.service_contract_doc_store_address.map(into_service_contract_address), service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address), - data_path: conf.data_path.clone(), acl_check_enabled: conf.acl_check_enabled, cluster_config: ethcore_secretstore::ClusterConfiguration { threads: 4, @@ -193,7 +193,8 @@ mod server { cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone()); - let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf) + let db = db::open_secretstore_db(&conf.data_path)?; + let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf, db) .map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?; Ok(KeyServer { diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index 91889d7012e..fee832d069c 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -31,7 +31,6 @@ ethcore-sync = { path = "../ethcore/sync" } ethcore-transaction = { path = "../ethcore/transaction" } ethereum-types = "0.3" kvdb = { path = "../util/kvdb" } -kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } keccak-hash = { path = "../util/hash" } ethkey = { path = "../ethkey" } lazy_static = "1.0" @@ -41,3 +40,4 @@ ethabi-contract = "5.0" [dev-dependencies] tempdir = "0.3" +kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index 1d4b968c07c..fee73c2ae1a 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::path::PathBuf; use std::collections::BTreeMap; +use std::sync::Arc; use serde_json; use tiny_keccak::Keccak; use ethereum_types::{H256, Address}; use ethkey::{Secret, Public, public_to_address}; -use kvdb_rocksdb::{Database, DatabaseIterator}; -use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId}; +use kvdb::KeyValueDB; +use types::all::{Error, ServerKeyId, NodeId}; use serialization::{SerializablePublic, SerializableSecret, SerializableH256, SerializableAddress}; /// Key of version value. @@ -82,17 +82,17 @@ pub trait KeyStorage: Send + Sync { /// Persistent document encryption keys storage pub struct PersistentKeyStorage { - db: Database, + db: Arc, } /// Persistent document encryption keys storage iterator pub struct PersistentKeyStorageIterator<'a> { - iter: Option>, + iter: Box, Box<[u8]>)> + 'a>, } /// V0 of encrypted key share, as it is stored by key storage on the single key server. #[derive(Serialize, Deserialize)] -struct SerializableDocumentKeyShareV0 { +pub struct SerializableDocumentKeyShareV0 { /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). pub threshold: usize, /// Nodes ids numbers. @@ -172,12 +172,7 @@ type SerializableDocumentKeyShareVersionV3 = SerializableDocumentKeyShareVersion impl PersistentKeyStorage { /// Create new persistent document encryption keys storage - pub fn new(config: &ServiceConfiguration) -> Result { - let mut db_path = PathBuf::from(&config.data_path); - db_path.push("db"); - let db_path = db_path.to_str().ok_or_else(|| Error::Database("Invalid secretstore path".to_owned()))?; - - let db = Database::open_default(&db_path)?; + pub fn new(db: Arc) -> Result { let db = upgrade_db(db)?; Ok(PersistentKeyStorage { @@ -186,14 +181,14 @@ impl PersistentKeyStorage { } } -fn upgrade_db(db: Database) -> Result { +fn upgrade_db(db: Arc) -> Result, Error> { let version = db.get(None, DB_META_KEY_VERSION)?; let version = version.and_then(|v| v.get(0).cloned()).unwrap_or(0); match version { 0 => { let mut batch = db.transaction(); batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); - for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { + for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { let v0_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; let current_key = CurrentSerializableDocumentKeyShare { // author is used in separate generation + encrypt sessions. @@ -218,7 +213,7 @@ fn upgrade_db(db: Database) -> Result { 1 => { let mut batch = db.transaction(); batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); - for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { + for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { let v1_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; let current_key = CurrentSerializableDocumentKeyShare { author: public_to_address(&v1_key.author).into(), // added in v1 + changed in v3 @@ -241,7 +236,7 @@ fn upgrade_db(db: Database) -> Result { 2 => { let mut batch = db.transaction(); batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]); - for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { + for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) { let v2_key = serde_json::from_slice::(&db_value).map_err(|e| Error::Database(e.to_string()))?; let current_key = CurrentSerializableDocumentKeyShare { author: public_to_address(&v2_key.author).into(), // changed in v3 @@ -319,11 +314,10 @@ impl<'a> Iterator for PersistentKeyStorageIterator<'a> { type Item = (ServerKeyId, DocumentKeyShare); fn next(&mut self) -> Option<(ServerKeyId, DocumentKeyShare)> { - self.iter.as_mut() - .and_then(|iter| iter.next() - .and_then(|(db_key, db_val)| serde_json::from_slice::(&db_val) - .ok() - .map(|key| ((*db_key).into(), key.into())))) + self.iter.as_mut().next() + .and_then(|(db_key, db_val)| serde_json::from_slice::(&db_val) + .ok() + .map(|key| ((*db_key).into(), key.into()))) } } @@ -417,14 +411,15 @@ impl From for DocumentKeyShare { pub mod tests { extern crate tempdir; - use std::collections::{BTreeMap, HashMap}; + use std::collections::HashMap; + use std::sync::Arc; use parking_lot::RwLock; use serde_json; use self::tempdir::TempDir; use ethereum_types::{Address, H256}; use ethkey::{Random, Generator, Public, Secret, public_to_address}; use kvdb_rocksdb::Database; - use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId}; + use types::all::{Error, ServerKeyId}; use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare, DocumentKeyShareVersion, CurrentSerializableDocumentKeyShare, upgrade_db, SerializableDocumentKeyShareV0, SerializableDocumentKeyShareV1, SerializableDocumentKeyShareV2, SerializableDocumentKeyShareVersionV2}; @@ -472,27 +467,6 @@ pub mod tests { #[test] fn persistent_key_storage() { let tempdir = TempDir::new("").unwrap(); - let config = ServiceConfiguration { - listener_address: None, - service_contract_address: None, - service_contract_srv_gen_address: None, - service_contract_srv_retr_address: None, - service_contract_doc_store_address: None, - service_contract_doc_sretr_address: None, - acl_check_enabled: true, - data_path: tempdir.path().display().to_string(), - cluster_config: ClusterConfiguration { - threads: 1, - listener_address: NodeAddress { - address: "0.0.0.0".to_owned(), - port: 8083, - }, - nodes: BTreeMap::new(), - allow_connecting_to_higher_nodes: false, - admin_public: None, - auto_migrate_enabled: false, - }, - }; let key1 = ServerKeyId::from(1); let value1 = DocumentKeyShare { @@ -526,7 +500,9 @@ pub mod tests { }; let key3 = ServerKeyId::from(3); - let key_storage = PersistentKeyStorage::new(&config).unwrap(); + let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); + + let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap(); key_storage.insert(key1.clone(), value1.clone()).unwrap(); key_storage.insert(key2.clone(), value2.clone()).unwrap(); assert_eq!(key_storage.get(&key1), Ok(Some(value1.clone()))); @@ -534,7 +510,9 @@ pub mod tests { assert_eq!(key_storage.get(&key3), Ok(None)); drop(key_storage); - let key_storage = PersistentKeyStorage::new(&config).unwrap(); + let db = Database::open_default(&tempdir.path().display().to_string()).unwrap(); + + let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap(); assert_eq!(key_storage.get(&key1), Ok(Some(value1))); assert_eq!(key_storage.get(&key2), Ok(Some(value2))); assert_eq!(key_storage.get(&key3), Ok(None)); @@ -563,7 +541,7 @@ pub mod tests { } // upgrade database - let db = upgrade_db(db).unwrap(); + let db = upgrade_db(Arc::new(db)).unwrap(); // check upgrade assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); @@ -606,7 +584,7 @@ pub mod tests { } // upgrade database - let db = upgrade_db(db).unwrap(); + let db = upgrade_db(Arc::new(db)).unwrap(); // check upgrade assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); @@ -654,7 +632,7 @@ pub mod tests { } // upgrade database - let db = upgrade_db(db).unwrap(); + let db = upgrade_db(Arc::new(db)).unwrap(); // check upgrade assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION); diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index dc45f1af369..8e1278e425b 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -28,7 +28,6 @@ extern crate futures_cpupool; extern crate hyper; extern crate keccak_hash as hash; extern crate kvdb; -extern crate kvdb_rocksdb; extern crate parking_lot; extern crate rustc_hex; extern crate serde; @@ -54,6 +53,9 @@ extern crate lazy_static; #[macro_use] extern crate log; +#[cfg(test)] +extern crate kvdb_rocksdb; + mod key_server_cluster; mod types; mod helpers; @@ -69,6 +71,7 @@ mod listener; mod trusted_client; use std::sync::Arc; +use kvdb::KeyValueDB; use ethcore::client::Client; use ethcore::miner::Miner; use sync::SyncProvider; @@ -79,7 +82,7 @@ pub use traits::{NodeKeyPair, KeyServer}; pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair}; /// Start new key server instance -pub fn start(client: Arc, sync: Arc, miner: Arc, self_key_pair: Arc, config: ServiceConfiguration) -> Result, Error> { +pub fn start(client: Arc, sync: Arc, miner: Arc, self_key_pair: Arc, config: ServiceConfiguration, db: Arc) -> Result, Error> { let trusted_client = trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner); let acl_storage: Arc = if config.acl_check_enabled { acl_storage::OnChainAclStorage::new(trusted_client.clone())? @@ -89,7 +92,7 @@ pub fn start(client: Arc, sync: Arc, miner: Arc, se let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), self_key_pair.clone(), config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?; - let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); + let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(db)?); let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage.clone(), key_storage.clone())?); let cluster = key_server.cluster(); let key_server: Arc = key_server; diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index 5e28415b188..b9940268144 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -91,8 +91,6 @@ pub struct ServiceConfiguration { pub service_contract_doc_sretr_address: Option, /// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only. pub acl_check_enabled: bool, - /// Data directory path for secret store - pub data_path: String, /// Cluster configuration. pub cluster_config: ClusterConfiguration, } From 99047a5d853898e87bc874bd904c9a4c6c568912 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 17:56:20 +0800 Subject: [PATCH 02/13] cli: init db mod for open dispatch --- parity/db/mod.rs | 20 ++++++++++++++++++++ parity/db/rocksdb.rs | 27 +++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 parity/db/mod.rs create mode 100644 parity/db/rocksdb.rs diff --git a/parity/db/mod.rs b/parity/db/mod.rs new file mode 100644 index 00000000000..96a21f4b215 --- /dev/null +++ b/parity/db/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#[path="rocksdb.rs"] +mod impls; + +pub use self::impls::open_secretstore_db; diff --git a/parity/db/rocksdb.rs b/parity/db/rocksdb.rs new file mode 100644 index 00000000000..af8666776ea --- /dev/null +++ b/parity/db/rocksdb.rs @@ -0,0 +1,27 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::path::PathBuf; +use kvdb::KeyValueDB; +use kvdb_rocksdb::Database; + +pub fn open_secretstore_db(data_path: &str) -> Result, String> { + let mut db_path = PathBuf::from(data_path); + db_path.push("db"); + let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?; + Ok(Arc::new(Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?)) +} From d3f5915fcc1b32c6be1f9c4375fa7e54fc7bfe7f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 19:08:43 +0800 Subject: [PATCH 03/13] cli: move db, client_db, restoration_db, secretstore_db to a separate mod --- parity/blockchain.rs | 37 ++++--------- parity/db/mod.rs | 4 +- parity/db/rocksdb.rs | 27 --------- parity/db/rocksdb/helpers.rs | 22 ++++++++ parity/{ => db/rocksdb}/migration.rs | 7 ++- parity/db/rocksdb/mod.rs | 83 ++++++++++++++++++++++++++++ parity/export_hardcoded_sync.rs | 24 ++------ parity/helpers.rs | 55 +----------------- parity/main.rs | 1 - parity/run.rs | 31 +++-------- parity/snapshot.rs | 10 ++-- 11 files changed, 144 insertions(+), 157 deletions(-) delete mode 100644 parity/db/rocksdb.rs create mode 100644 parity/db/rocksdb/helpers.rs rename parity/{ => db/rocksdb}/migration.rs (96%) create mode 100644 parity/db/rocksdb/mod.rs diff --git a/parity/blockchain.rs b/parity/blockchain.rs index c2c6f2aa5cf..a19dec1829b 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -27,20 +27,19 @@ use bytes::ToPretty; use rlp::PayloadInfo; use ethcore::account_provider::AccountProvider; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, Nonce, Balance, BlockChainClient, BlockId, BlockInfo, ImportBlock}; -use ethcore::db::NUM_COLUMNS; use ethcore::error::ImportError; use ethcore::miner::Miner; use ethcore::verification::queue::VerifierSettings; use ethcore_service::ClientService; use cache::CacheConfig; use informant::{Informant, FullNodeInformantData, MillisecondDuration}; -use kvdb_rocksdb::{Database, DatabaseConfig}; use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; -use helpers::{to_client_config, execute_upgrades, open_client_db, client_db_config, restoration_db_handler, compaction_profile}; +use helpers::{to_client_config, execute_upgrades}; use dir::Directories; use user_defaults::UserDefaults; use fdlimit; use ethcore_private_tx; +use db; #[derive(Debug, PartialEq)] pub enum DataFormat { @@ -188,8 +187,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { let client_path = db_dirs.client_path(algorithm); // execute upgrades - let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()); - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction)?; + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; // create dirs used by parity cmd.dirs.create_dirs(false, false, false)?; @@ -210,19 +208,8 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { config.queue.verifier_settings = cmd.verifier_settings; // initialize database. - let db = { - let db_config = DatabaseConfig { - memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024), - compaction: compaction, - wal: cmd.wal, - .. DatabaseConfig::with_columns(NUM_COLUMNS) - }; - - Arc::new(Database::open( - &db_config, - &client_path.to_str().expect("DB path could not be converted to string.") - ).map_err(|e| format!("Failed to open database: {}", e))?) - }; + let db = db::open_db(&client_path.to_str().expect("DB path could not be converted to string."), + &cmd.cache_config, &cmd.compaction, cmd.wal)?; // TODO: could epoch signals be avilable at the end of the file? let fetch = ::light::client::fetch::unavailable(); @@ -354,7 +341,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?; + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; // create dirs used by parity cmd.dirs.create_dirs(false, false, false)?; @@ -378,9 +365,8 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { client_config.queue.verifier_settings = cmd.verifier_settings; - let client_db_config = client_db_config(&client_path, &client_config); - let client_db = open_client_db(&client_path, &client_db_config)?; - let restoration_db_handler = restoration_db_handler(client_db_config); + let client_db = db::open_client_db(&client_path, &client_config)?; + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); // build client let service = ClientService::start( @@ -547,7 +533,7 @@ fn start_client( let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&dirs.base, &db_dirs, algorithm, compaction_profile(&compaction, db_dirs.db_root_path().as_path()))?; + execute_upgrades(&dirs.base, &db_dirs, algorithm, &compaction)?; // create dirs used by parity dirs.create_dirs(false, false, false)?; @@ -569,9 +555,8 @@ fn start_client( true, ); - let client_db_config = client_db_config(&client_path, &client_config); - let client_db = open_client_db(&client_path, &client_db_config)?; - let restoration_db_handler = restoration_db_handler(client_db_config); + let client_db = db::open_client_db(&client_path, &client_config)?; + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); let service = ClientService::start( client_config, diff --git a/parity/db/mod.rs b/parity/db/mod.rs index 96a21f4b215..b2b6e10356c 100644 --- a/parity/db/mod.rs +++ b/parity/db/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#[path="rocksdb.rs"] +#[path="rocksdb/mod.rs"] mod impls; -pub use self::impls::open_secretstore_db; +pub use self::impls::{open_db, open_client_db, open_secretstore_db, restoration_db_handler, migrate}; diff --git a/parity/db/rocksdb.rs b/parity/db/rocksdb.rs deleted file mode 100644 index af8666776ea..00000000000 --- a/parity/db/rocksdb.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use std::sync::Arc; -use std::path::PathBuf; -use kvdb::KeyValueDB; -use kvdb_rocksdb::Database; - -pub fn open_secretstore_db(data_path: &str) -> Result, String> { - let mut db_path = PathBuf::from(data_path); - db_path.push("db"); - let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?; - Ok(Arc::new(Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?)) -} diff --git a/parity/db/rocksdb/helpers.rs b/parity/db/rocksdb/helpers.rs new file mode 100644 index 00000000000..e58cc830bd7 --- /dev/null +++ b/parity/db/rocksdb/helpers.rs @@ -0,0 +1,22 @@ +use std::path::Path; +use ethcore::db::NUM_COLUMNS; +use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; +use kvdb_rocksdb::{CompactionProfile, DatabaseConfig}; + +pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile { + match profile { + &DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), + &DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), + &DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), + } +} + +pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig { + let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); + + client_db_config.memory_budget = client_config.db_cache_size; + client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path); + client_db_config.wal = client_config.db_wal; + + client_db_config +} diff --git a/parity/migration.rs b/parity/db/rocksdb/migration.rs similarity index 96% rename from parity/migration.rs rename to parity/db/rocksdb/migration.rs index bd659cba015..38dfc206e82 100644 --- a/parity/migration.rs +++ b/parity/db/rocksdb/migration.rs @@ -20,8 +20,11 @@ use std::path::{Path, PathBuf}; use std::fmt::{Display, Formatter, Error as FmtError}; use migr::{self, Manager as MigrationManager, Config as MigrationConfig}; use kvdb_rocksdb::CompactionProfile; +use ethcore::client::DatabaseCompactionProfile; use migrations; +use super::helpers; + /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. @@ -176,7 +179,9 @@ fn exists(path: &Path) -> bool { } /// Migrates the database. -pub fn migrate(path: &Path, compaction_profile: CompactionProfile) -> Result<(), Error> { +pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> Result<(), Error> { + let compaction_profile = helpers::compaction_profile(&compaction_profile, path); + // read version file. let version = current_version(path)?; diff --git a/parity/db/rocksdb/mod.rs b/parity/db/rocksdb/mod.rs new file mode 100644 index 00000000000..97f8e2a5a7d --- /dev/null +++ b/parity/db/rocksdb/mod.rs @@ -0,0 +1,83 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::path::{Path, PathBuf}; +use ethcore::db::NUM_COLUMNS; +use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; +use kvdb::{KeyValueDB, KeyValueDBHandler}; +use kvdb_rocksdb::{Database, DatabaseConfig}; + +use cache::CacheConfig; + +mod migration; +mod helpers; + +pub use self::migration::migrate; + +// This function will only be used when compiling with secretstore feature. +#[allow(dead_code)] +pub fn open_secretstore_db(data_path: &str) -> Result, String> { + let mut db_path = PathBuf::from(data_path); + db_path.push("db"); + let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?; + Ok(Arc::new(Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?)) +} + +pub fn open_client_db(client_path: &Path, client_config: &ClientConfig) -> Result, String> { + let client_db_config = helpers::client_db_config(client_path, client_config); + + let client_db = Arc::new(Database::open( + &client_db_config, + &client_path.to_str().expect("DB path could not be converted to string.") + ).map_err(|e| format!("Client service database error: {:?}", e))?); + + Ok(client_db) +} + +pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) -> Box { + use kvdb::Error; + + let client_db_config = helpers::client_db_config(client_path, client_config); + + struct RestorationDBHandler { + config: DatabaseConfig, + } + + impl KeyValueDBHandler for RestorationDBHandler { + fn open(&self, db_path: &Path) -> Result, Error> { + Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?)) + } + } + + Box::new(RestorationDBHandler { + config: client_db_config, + }) +} + +pub fn open_db(client_path: &str, cache_config: &CacheConfig, compaction: &DatabaseCompactionProfile, wal: bool) -> Result, String> { + let db_config = DatabaseConfig { + memory_budget: Some(cache_config.blockchain() as usize * 1024 * 1024), + compaction: helpers::compaction_profile(&compaction, &Path::new(client_path)), + wal: wal, + .. DatabaseConfig::with_columns(NUM_COLUMNS) + }; + + Ok(Arc::new(Database::open( + &db_config, + client_path + ).map_err(|e| format!("Failed to open database: {}", e))?)) +} diff --git a/parity/export_hardcoded_sync.rs b/parity/export_hardcoded_sync.rs index accb6159fa3..652bada98f6 100644 --- a/parity/export_hardcoded_sync.rs +++ b/parity/export_hardcoded_sync.rs @@ -18,17 +18,16 @@ use std::sync::Arc; use std::time::Duration; use ethcore::client::DatabaseCompactionProfile; -use ethcore::db::NUM_COLUMNS; use ethcore::spec::{SpecParams, OptimizeFor}; -use kvdb_rocksdb::{Database, DatabaseConfig}; use light::client::fetch::Unavailable as UnavailableDataFetcher; use light::Cache as LightDataCache; use params::{SpecType, Pruning}; -use helpers::{execute_upgrades, compaction_profile}; +use helpers::execute_upgrades; use dir::Directories; use cache::CacheConfig; use user_defaults::UserDefaults; +use db; // Number of minutes before a given gas price corpus should expire. // Light client only. @@ -66,10 +65,8 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result { // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); - let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()); - // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?; + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; // create dirs used by parity cmd.dirs.create_dirs(false, false, false)?; @@ -90,19 +87,8 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result { config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; // initialize database. - let db = { - let db_config = DatabaseConfig { - memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024), - compaction: compaction, - wal: cmd.wal, - .. DatabaseConfig::with_columns(NUM_COLUMNS) - }; - - Arc::new(Database::open( - &db_config, - &db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string.") - ).map_err(|e| format!("Error opening database: {}", e))?) - }; + let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."), + &cmd.cache_config, &cmd.compaction, cmd.wal)?; let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache) .map_err(|e| format!("Error starting light client: {}", e))?; diff --git a/parity/helpers.rs b/parity/helpers.rs index 0494be40d8e..8fe6bf26658 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -18,22 +18,17 @@ use std::io; use std::io::{Write, BufReader, BufRead}; use std::time::Duration; use std::fs::File; -use std::sync::Arc; -use std::path::Path; use ethereum_types::{U256, clean_0x, Address}; use journaldb::Algorithm; use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType}; use ethcore::miner::{PendingSet, GasLimit}; -use ethcore::db::NUM_COLUMNS; use miner::transaction_queue::PrioritizationStrategy; use cache::CacheConfig; use dir::DatabaseDirectories; use dir::helpers::replace_home; use upgrade::{upgrade, upgrade_data_paths}; -use migration::migrate; use sync::{validate_node_url, self}; -use kvdb::{KeyValueDB, KeyValueDBHandler}; -use kvdb_rocksdb::{Database, DatabaseConfig, CompactionProfile}; +use db::migrate; use path; pub fn to_duration(s: &str) -> Result { @@ -259,57 +254,11 @@ pub fn to_client_config( client_config } -// We assume client db has similar config as restoration db. -pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig { - let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); - - client_db_config.memory_budget = client_config.db_cache_size; - client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path); - client_db_config.wal = client_config.db_wal; - - client_db_config -} - -pub fn open_client_db(client_path: &Path, client_db_config: &DatabaseConfig) -> Result, String> { - let client_db = Arc::new(Database::open( - &client_db_config, - &client_path.to_str().expect("DB path could not be converted to string.") - ).map_err(|e| format!("Client service database error: {:?}", e))?); - - Ok(client_db) -} - -pub fn restoration_db_handler(client_db_config: DatabaseConfig) -> Box { - use kvdb::Error; - - struct RestorationDBHandler { - config: DatabaseConfig, - } - - impl KeyValueDBHandler for RestorationDBHandler { - fn open(&self, db_path: &Path) -> Result, Error> { - Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?)) - } - } - - Box::new(RestorationDBHandler { - config: client_db_config, - }) -} - -pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile { - match profile { - &DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), - &DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), - &DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), - } -} - pub fn execute_upgrades( base_path: &str, dirs: &DatabaseDirectories, pruning: Algorithm, - compaction_profile: CompactionProfile + compaction_profile: &DatabaseCompactionProfile ) -> Result<(), String> { upgrade_data_paths(base_path, dirs, pruning); diff --git a/parity/main.rs b/parity/main.rs index 7c91297abe5..e395285ba4c 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -113,7 +113,6 @@ mod deprecated; mod helpers; mod informant; mod light_helpers; -mod migration; mod modules; mod params; mod presale; diff --git a/parity/run.rs b/parity/run.rs index 4cc7b29b04d..6d99e9e2f1d 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -25,7 +25,6 @@ use ansi_term::{Colour, Style}; use ctrlc::CtrlC; use ethcore::account_provider::{AccountProvider, AccountProviderSettings}; use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient}; -use ethcore::db::NUM_COLUMNS; use ethcore::ethstore::ethkey; use ethcore::miner::{Miner, MinerService, MinerOptions}; use ethcore::miner::{StratumOptions, Stratum}; @@ -41,7 +40,6 @@ use futures_cpupool::CpuPool; use hash_fetch::{self, fetch}; use informant::{Informant, LightNodeInformantData, FullNodeInformantData}; use journaldb::Algorithm; -use kvdb_rocksdb::{Database, DatabaseConfig}; use light::Cache as LightDataCache; use miner::external::ExternalMiner; use node_filter::NodeFilter; @@ -56,7 +54,7 @@ use params::{ SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool }; -use helpers::{to_client_config, execute_upgrades, passwords_from_files, client_db_config, open_client_db, restoration_db_handler, compaction_profile}; +use helpers::{to_client_config, execute_upgrades, passwords_from_files}; use upgrade::upgrade_key_location; use dir::{Directories, DatabaseDirectories}; use cache::CacheConfig; @@ -69,6 +67,7 @@ use rpc_apis; use secretstore; use signer; use url; +use db; // how often to take periodic snapshots. const SNAPSHOT_PERIOD: u64 = 5000; @@ -211,10 +210,8 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc) -> Result) -> Result(cmd: RunCmd, logger: Arc, on_client_rq: let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?; + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?; // create dirs used by parity cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.ui_conf.enabled, cmd.secretstore_conf.enabled)?; @@ -622,9 +608,8 @@ fn execute_impl(cmd: RunCmd, logger: Arc, on_client_rq: // set network path. net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); - let client_db_config = client_db_config(&client_path, &client_config); - let client_db = open_client_db(&client_path, &client_db_config)?; - let restoration_db_handler = restoration_db_handler(client_db_config); + let client_db = db::open_client_db(&client_path, &client_config)?; + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); // create client service. let service = ClientService::start( diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 90967328083..d4e27cc1c8d 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -32,11 +32,12 @@ use ethcore_service::ClientService; use cache::CacheConfig; use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; -use helpers::{to_client_config, execute_upgrades, client_db_config, open_client_db, restoration_db_handler, compaction_profile}; +use helpers::{to_client_config, execute_upgrades}; use dir::Directories; use user_defaults::UserDefaults; use fdlimit; use ethcore_private_tx; +use db; /// Kinds of snapshot commands. #[derive(Debug, PartialEq, Clone, Copy)] @@ -164,7 +165,7 @@ impl SnapshotCommand { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - execute_upgrades(&self.dirs.base, &db_dirs, algorithm, compaction_profile(&self.compaction, db_dirs.db_root_path().as_path()))?; + execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?; // prepare client config let client_config = to_client_config( @@ -183,9 +184,8 @@ impl SnapshotCommand { true ); - let client_db_config = client_db_config(&client_path, &client_config); - let client_db = open_client_db(&client_path, &client_db_config)?; - let restoration_db_handler = restoration_db_handler(client_db_config); + let client_db = db::open_client_db(&client_path, &client_config)?; + let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); let service = ClientService::start( client_config, From 5ae790895e2e4f19f1ffd3a1922289386f641ac3 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 19:18:21 +0800 Subject: [PATCH 04/13] migration: rename to migration-rocksdb and remove ethcore-migrations --- Cargo.lock | 12 +----- Cargo.toml | 3 +- ethcore/migrations/Cargo.toml | 7 ---- ethcore/migrations/src/lib.rs | 37 ------------------- parity/db/rocksdb/helpers.rs | 2 +- parity/db/rocksdb/migration.rs | 34 ++++++++++++----- parity/db/rocksdb/mod.rs | 5 ++- parity/main.rs | 3 -- .../Cargo.toml | 2 +- .../src/lib.rs | 0 .../tests/tests.rs | 0 11 files changed, 34 insertions(+), 71 deletions(-) delete mode 100644 ethcore/migrations/Cargo.toml delete mode 100644 ethcore/migrations/src/lib.rs rename util/{migration => migration-rocksdb}/Cargo.toml (91%) rename util/{migration => migration-rocksdb}/src/lib.rs (100%) rename util/{migration => migration-rocksdb}/tests/tests.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index dbc605dac5c..ad794e53cb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -644,13 +644,6 @@ dependencies = [ "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ethcore-migrations" -version = "0.1.0" -dependencies = [ - "migration 0.1.0", -] - [[package]] name = "ethcore-miner" version = "1.11.0" @@ -1630,7 +1623,7 @@ dependencies = [ ] [[package]] -name = "migration" +name = "migration-rocksdb" version = "0.1.0" dependencies = [ "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1932,7 +1925,6 @@ dependencies = [ "ethcore-io 1.11.0", "ethcore-light 1.11.0", "ethcore-logger 1.11.0", - "ethcore-migrations 0.1.0", "ethcore-miner 1.11.0", "ethcore-network 1.11.0", "ethcore-private-tx 1.0.0", @@ -1954,7 +1946,7 @@ dependencies = [ "kvdb 0.1.0", "kvdb-rocksdb 0.1.0", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "migration 0.1.0", + "migration-rocksdb 0.1.0", "node-filter 1.11.0", "node-health 0.1.0", "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index 09c00b3386b..dbd2d408c56 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,6 @@ ethcore-bytes = { path = "util/bytes" } ethcore-io = { path = "util/io" } ethcore-light = { path = "ethcore/light" } ethcore-logger = { path = "logger" } -ethcore-migrations = { path = "ethcore/migrations" } ethcore-miner = { path = "miner" } ethcore-network = { path = "util/network" } ethcore-private-tx = { path = "ethcore/private-tx" } @@ -65,7 +64,7 @@ path = { path = "util/path" } dir = { path = "util/dir" } panic_hook = { path = "util/panic_hook" } keccak-hash = { path = "util/hash" } -migration = { path = "util/migration" } +migration-rocksdb = { path = "util/migration-rocksdb" } kvdb = { path = "util/kvdb" } kvdb-rocksdb = { path = "util/kvdb-rocksdb" } journaldb = { path = "util/journaldb" } diff --git a/ethcore/migrations/Cargo.toml b/ethcore/migrations/Cargo.toml deleted file mode 100644 index 561925be4c5..00000000000 --- a/ethcore/migrations/Cargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "ethcore-migrations" -version = "0.1.0" -authors = ["Parity Technologies "] - -[dependencies] -migration = { path = "../../util/migration" } diff --git a/ethcore/migrations/src/lib.rs b/ethcore/migrations/src/lib.rs deleted file mode 100644 index 429c39102cb..00000000000 --- a/ethcore/migrations/src/lib.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Database migrations. - -extern crate migration; - -use migration::ChangeColumns; - -/// The migration from v10 to v11. -/// Adds a column for node info. -pub const TO_V11: ChangeColumns = ChangeColumns { - pre_columns: Some(6), - post_columns: Some(7), - version: 11, -}; - -/// The migration from v11 to v12. -/// Adds a column for light chain storage. -pub const TO_V12: ChangeColumns = ChangeColumns { - pre_columns: Some(7), - post_columns: Some(8), - version: 12, -}; diff --git a/parity/db/rocksdb/helpers.rs b/parity/db/rocksdb/helpers.rs index e58cc830bd7..a7bf1d2a634 100644 --- a/parity/db/rocksdb/helpers.rs +++ b/parity/db/rocksdb/helpers.rs @@ -1,7 +1,7 @@ use std::path::Path; use ethcore::db::NUM_COLUMNS; use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; -use kvdb_rocksdb::{CompactionProfile, DatabaseConfig}; +use super::kvdb_rocksdb::{CompactionProfile, DatabaseConfig}; pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile { match profile { diff --git a/parity/db/rocksdb/migration.rs b/parity/db/rocksdb/migration.rs index 38dfc206e82..df6a4b5dc9c 100644 --- a/parity/db/rocksdb/migration.rs +++ b/parity/db/rocksdb/migration.rs @@ -18,13 +18,29 @@ use std::fs; use std::io::{Read, Write, Error as IoError, ErrorKind}; use std::path::{Path, PathBuf}; use std::fmt::{Display, Formatter, Error as FmtError}; -use migr::{self, Manager as MigrationManager, Config as MigrationConfig}; -use kvdb_rocksdb::CompactionProfile; +use super::migration_rocksdb::{self, Manager as MigrationManager, Config as MigrationConfig, ChangeColumns}; +use super::kvdb_rocksdb::CompactionProfile; use ethcore::client::DatabaseCompactionProfile; -use migrations; use super::helpers; +/// The migration from v10 to v11. +/// Adds a column for node info. +pub const TO_V11: ChangeColumns = ChangeColumns { + pre_columns: Some(6), + post_columns: Some(7), + version: 11, +}; + +/// The migration from v11 to v12. +/// Adds a column for light chain storage. +pub const TO_V12: ChangeColumns = ChangeColumns { + pre_columns: Some(7), + post_columns: Some(8), + version: 12, +}; + + /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. @@ -46,7 +62,7 @@ pub enum Error { /// Migration is not possible. MigrationImpossible, /// Internal migration error. - Internal(migr::Error), + Internal(migration_rocksdb::Error), /// Migration was completed succesfully, /// but there was a problem with io. Io(IoError), @@ -72,10 +88,10 @@ impl From for Error { } } -impl From for Error { - fn from(err: migr::Error) -> Self { +impl From for Error { + fn from(err: migration_rocksdb::Error) -> Self { match err.into() { - migr::ErrorKind::Io(e) => Error::Io(e), + migration_rocksdb::ErrorKind::Io(e) => Error::Io(e), err => Error::Internal(err.into()), } } @@ -137,8 +153,8 @@ pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> Mig /// Migrations on the consolidated database. fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result { let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); - manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?; - manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?; + manager.add_migration(TO_V11).map_err(|_| Error::MigrationImpossible)?; + manager.add_migration(TO_V12).map_err(|_| Error::MigrationImpossible)?; Ok(manager) } diff --git a/parity/db/rocksdb/mod.rs b/parity/db/rocksdb/mod.rs index 97f8e2a5a7d..aa230a71a4d 100644 --- a/parity/db/rocksdb/mod.rs +++ b/parity/db/rocksdb/mod.rs @@ -14,12 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +extern crate kvdb_rocksdb; +extern crate migration_rocksdb; + use std::sync::Arc; use std::path::{Path, PathBuf}; use ethcore::db::NUM_COLUMNS; use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; use kvdb::{KeyValueDB, KeyValueDBHandler}; -use kvdb_rocksdb::{Database, DatabaseConfig}; +use self::kvdb_rocksdb::{Database, DatabaseConfig}; use cache::CacheConfig; diff --git a/parity/main.rs b/parity/main.rs index e395285ba4c..1a8ba688219 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -50,7 +50,6 @@ extern crate ethcore_bytes as bytes; extern crate ethcore_io as io; extern crate ethcore_light as light; extern crate ethcore_logger; -extern crate ethcore_migrations as migrations; extern crate ethcore_miner as miner; extern crate ethcore_network as network; extern crate ethcore_private_tx; @@ -60,8 +59,6 @@ extern crate ethcore_transaction as transaction; extern crate ethereum_types; extern crate ethkey; extern crate kvdb; -extern crate kvdb_rocksdb; -extern crate migration as migr; extern crate node_health; extern crate panic_hook; extern crate parity_hash_fetch as hash_fetch; diff --git a/util/migration/Cargo.toml b/util/migration-rocksdb/Cargo.toml similarity index 91% rename from util/migration/Cargo.toml rename to util/migration-rocksdb/Cargo.toml index d938822bacd..3f0b8e75204 100644 --- a/util/migration/Cargo.toml +++ b/util/migration-rocksdb/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "migration" +name = "migration-rocksdb" version = "0.1.0" authors = ["Parity Technologies "] diff --git a/util/migration/src/lib.rs b/util/migration-rocksdb/src/lib.rs similarity index 100% rename from util/migration/src/lib.rs rename to util/migration-rocksdb/src/lib.rs diff --git a/util/migration/tests/tests.rs b/util/migration-rocksdb/tests/tests.rs similarity index 100% rename from util/migration/tests/tests.rs rename to util/migration-rocksdb/tests/tests.rs From 19cca5327318e4e1549d8c75cffdccda41a1297f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 19:21:11 +0800 Subject: [PATCH 05/13] ethcore: re-move kvdb-rocksdb dep to test --- ethcore/Cargo.toml | 2 +- ethcore/src/lib.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 0ffb7034a35..5be67a4161a 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -53,7 +53,6 @@ rlp_compress = { path = "../util/rlp_compress" } rlp_derive = { path = "../util/rlp_derive" } kvdb = { path = "../util/kvdb" } kvdb-memorydb = { path = "../util/kvdb-memorydb" } -kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } util-error = { path = "../util/error" } snappy = { git = "https://github.com/paritytech/rust-snappy" } stop-guard = { path = "../util/stop-guard" } @@ -74,6 +73,7 @@ journaldb = { path = "../util/journaldb" } [dev-dependencies] tempdir = "0.3" trie-standardmap = { path = "../util/trie-standardmap" } +kvdb-rocksdb = { path = "../util/kvdb-rocksdb" } [features] evm-debug = ["slow-blocks"] diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index c71a266f740..90fe5a15fcb 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -94,6 +94,7 @@ extern crate ansi_term; extern crate unexpected; extern crate kvdb; extern crate kvdb_memorydb; +#[cfg(test)] extern crate kvdb_rocksdb; extern crate util_error; extern crate snappy; From f486699b70b51fb8d65d6a618893f23516161d9c Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 19:24:50 +0800 Subject: [PATCH 06/13] mark test_helpers as test only and fix migration mod naming --- ethcore/src/lib.rs | 1 + util/migration-rocksdb/tests/tests.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 90fe5a15fcb..2835539d080 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -151,6 +151,7 @@ pub mod spec; pub mod state; pub mod state_db; // Test helpers made public for usage outside ethcore +#[cfg(test)] pub mod test_helpers; pub mod trace; pub mod verification; diff --git a/util/migration-rocksdb/tests/tests.rs b/util/migration-rocksdb/tests/tests.rs index c1ff8228f87..85c48f12b67 100644 --- a/util/migration-rocksdb/tests/tests.rs +++ b/util/migration-rocksdb/tests/tests.rs @@ -22,7 +22,7 @@ extern crate macros; extern crate tempdir; extern crate kvdb_rocksdb; -extern crate migration; +extern crate migration_rocksdb as migration; use std::collections::BTreeMap; use std::path::{Path, PathBuf}; From 5d4b24b6a0a68ca920de1543d75e528f63a84bf9 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 20:41:37 +0800 Subject: [PATCH 07/13] Move restoration_db_handler to test_helpers_internal --- ethcore/src/lib.rs | 3 ++- ethcore/src/snapshot/service.rs | 2 +- ethcore/src/snapshot/tests/service.rs | 3 ++- ethcore/src/test_helpers.rs | 20 -------------------- ethcore/src/test_helpers_internal.rs | 21 +++++++++++++++++++++ 5 files changed, 26 insertions(+), 23 deletions(-) create mode 100644 ethcore/src/test_helpers_internal.rs diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 2835539d080..5817c380eba 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -151,7 +151,6 @@ pub mod spec; pub mod state; pub mod state_db; // Test helpers made public for usage outside ethcore -#[cfg(test)] pub mod test_helpers; pub mod trace; pub mod verification; @@ -172,6 +171,8 @@ mod tests; #[cfg(test)] #[cfg(feature="json-tests")] mod json_tests; +#[cfg(test)] +mod test_helpers_internal; pub use types::*; pub use executive::contract_address; diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 7d5eea1eff4..9cfb2eb63f4 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -635,7 +635,7 @@ mod tests { use snapshot::{ManifestData, RestorationStatus, SnapshotService}; use super::*; use tempdir::TempDir; - use test_helpers::restoration_db_handler; + use test_helpers_internal::restoration_db_handler; struct NoopDBRestore; impl DatabaseRestore for NoopDBRestore { diff --git a/ethcore/src/snapshot/tests/service.rs b/ethcore/src/snapshot/tests/service.rs index 7e81b05796f..7fec1f74334 100644 --- a/ethcore/src/snapshot/tests/service.rs +++ b/ethcore/src/snapshot/tests/service.rs @@ -24,7 +24,8 @@ use ids::BlockId; use snapshot::service::{Service, ServiceParams}; use snapshot::{self, ManifestData, SnapshotService}; use spec::Spec; -use test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler}; +use test_helpers::generate_dummy_client_with_spec_and_data; +use test_helpers_internal::restoration_db_handler; use io::IoChannel; use kvdb_rocksdb::{Database, DatabaseConfig}; diff --git a/ethcore/src/test_helpers.rs b/ethcore/src/test_helpers.rs index 4c013dd251d..22a54909769 100644 --- a/ethcore/src/test_helpers.rs +++ b/ethcore/src/test_helpers.rs @@ -35,11 +35,8 @@ use spec::Spec; use state_db::StateDB; use state::*; use std::sync::Arc; -use std::path::Path; use transaction::{Action, Transaction, SignedTransaction}; use views::BlockView; -use kvdb::{KeyValueDB, KeyValueDBHandler}; -use kvdb_rocksdb::{Database, DatabaseConfig}; /// Creates test block with corresponding header pub fn create_test_block(header: &Header) -> Bytes { @@ -402,20 +399,3 @@ impl ChainNotify for TestNotify { self.messages.write().push(data); } } - -/// Creates new instance of KeyValueDBHandler -pub fn restoration_db_handler(config: DatabaseConfig) -> Box { - use kvdb::Error; - - struct RestorationDBHandler { - config: DatabaseConfig, - } - - impl KeyValueDBHandler for RestorationDBHandler { - fn open(&self, db_path: &Path) -> Result, Error> { - Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?)) - } - } - - Box::new(RestorationDBHandler { config }) -} diff --git a/ethcore/src/test_helpers_internal.rs b/ethcore/src/test_helpers_internal.rs new file mode 100644 index 00000000000..130c40a5c55 --- /dev/null +++ b/ethcore/src/test_helpers_internal.rs @@ -0,0 +1,21 @@ +use std::path::Path; +use std::sync::Arc; +use kvdb::{KeyValueDB, KeyValueDBHandler}; +use kvdb_rocksdb::{Database, DatabaseConfig}; + +/// Creates new instance of KeyValueDBHandler +pub fn restoration_db_handler(config: DatabaseConfig) -> Box { + use kvdb::Error; + + struct RestorationDBHandler { + config: DatabaseConfig, + } + + impl KeyValueDBHandler for RestorationDBHandler { + fn open(&self, db_path: &Path) -> Result, Error> { + Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?)) + } + } + + Box::new(RestorationDBHandler { config }) +} From 3221ab80a8cc71f1fdc85239954027376ae843b8 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 22:22:12 +0800 Subject: [PATCH 08/13] Fix missing preambles in test_helpers_internal and rocksdb/helpers --- ethcore/src/test_helpers_internal.rs | 18 ++++++++++++++++++ parity/db/rocksdb/helpers.rs | 16 ++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/ethcore/src/test_helpers_internal.rs b/ethcore/src/test_helpers_internal.rs index 130c40a5c55..ef98c7c85b5 100644 --- a/ethcore/src/test_helpers_internal.rs +++ b/ethcore/src/test_helpers_internal.rs @@ -1,3 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Internal helpers for client tests + use std::path::Path; use std::sync::Arc; use kvdb::{KeyValueDB, KeyValueDBHandler}; diff --git a/parity/db/rocksdb/helpers.rs b/parity/db/rocksdb/helpers.rs index a7bf1d2a634..ca685d3e86d 100644 --- a/parity/db/rocksdb/helpers.rs +++ b/parity/db/rocksdb/helpers.rs @@ -1,3 +1,19 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + use std::path::Path; use ethcore::db::NUM_COLUMNS; use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; From 1ce5f04d288ac316b6e0983f57b1f85840d8737f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 22:23:40 +0800 Subject: [PATCH 09/13] Move test crates downward --- ethcore/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 5817c380eba..c130f7a87f3 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -94,8 +94,6 @@ extern crate ansi_term; extern crate unexpected; extern crate kvdb; extern crate kvdb_memorydb; -#[cfg(test)] -extern crate kvdb_rocksdb; extern crate util_error; extern crate snappy; @@ -132,6 +130,9 @@ extern crate evm; pub extern crate ethstore; +#[cfg(test)] +extern crate kvdb_rocksdb; + pub mod account_provider; pub mod block; pub mod client; From 8da60bfc5ae2bb301a01b81942121f63682e0376 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 22:27:29 +0800 Subject: [PATCH 10/13] Fix missing docs --- parity/db/mod.rs | 2 ++ parity/db/rocksdb/mod.rs | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/parity/db/mod.rs b/parity/db/mod.rs index b2b6e10356c..8acb7d0b264 100644 --- a/parity/db/mod.rs +++ b/parity/db/mod.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! Database-related operations. + #[path="rocksdb/mod.rs"] mod impls; diff --git a/parity/db/rocksdb/mod.rs b/parity/db/rocksdb/mod.rs index aa230a71a4d..3280ed928ad 100644 --- a/parity/db/rocksdb/mod.rs +++ b/parity/db/rocksdb/mod.rs @@ -31,6 +31,7 @@ mod helpers; pub use self::migration::migrate; +/// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path. // This function will only be used when compiling with secretstore feature. #[allow(dead_code)] pub fn open_secretstore_db(data_path: &str) -> Result, String> { @@ -40,6 +41,7 @@ pub fn open_secretstore_db(data_path: &str) -> Result, String> { Ok(Arc::new(Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?)) } +/// Open a new client DB. pub fn open_client_db(client_path: &Path, client_config: &ClientConfig) -> Result, String> { let client_db_config = helpers::client_db_config(client_path, client_config); @@ -51,6 +53,7 @@ pub fn open_client_db(client_path: &Path, client_config: &ClientConfig) -> Resul Ok(client_db) } +/// Create a restoration db handler using the config generated by `client_path` and `client_config`. pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) -> Box { use kvdb::Error; @@ -71,6 +74,7 @@ pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) }) } +/// Open a new main DB. pub fn open_db(client_path: &str, cache_config: &CacheConfig, compaction: &DatabaseCompactionProfile, wal: bool) -> Result, String> { let db_config = DatabaseConfig { memory_budget: Some(cache_config.blockchain() as usize * 1024 * 1024), From 64f176a9645db2c314aa2d27034af7ce7e1db1ec Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 22:29:49 +0800 Subject: [PATCH 11/13] cli, db::open_db: move each argument to a separate line --- parity/blockchain.rs | 4 +++- parity/export_hardcoded_sync.rs | 4 +++- parity/run.rs | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/parity/blockchain.rs b/parity/blockchain.rs index a19dec1829b..a1681a89647 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -209,7 +209,9 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { // initialize database. let db = db::open_db(&client_path.to_str().expect("DB path could not be converted to string."), - &cmd.cache_config, &cmd.compaction, cmd.wal)?; + &cmd.cache_config, + &cmd.compaction, + cmd.wal)?; // TODO: could epoch signals be avilable at the end of the file? let fetch = ::light::client::fetch::unavailable(); diff --git a/parity/export_hardcoded_sync.rs b/parity/export_hardcoded_sync.rs index 652bada98f6..3aa2b561490 100644 --- a/parity/export_hardcoded_sync.rs +++ b/parity/export_hardcoded_sync.rs @@ -88,7 +88,9 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result { // initialize database. let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."), - &cmd.cache_config, &cmd.compaction, cmd.wal)?; + &cmd.cache_config, + &cmd.compaction, + cmd.wal)?; let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache) .map_err(|e| format!("Error starting light client: {}", e))?; diff --git a/parity/run.rs b/parity/run.rs index 6d99e9e2f1d..2c123bca6d0 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -248,7 +248,9 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc) -> Result Date: Wed, 11 Apr 2018 22:35:00 +0800 Subject: [PATCH 12/13] Use featuregate instead of dead code for `open_secretstore_db` --- parity/db/mod.rs | 5 ++++- parity/db/rocksdb/mod.rs | 3 +-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/parity/db/mod.rs b/parity/db/mod.rs index 8acb7d0b264..39f43fd145e 100644 --- a/parity/db/mod.rs +++ b/parity/db/mod.rs @@ -19,4 +19,7 @@ #[path="rocksdb/mod.rs"] mod impls; -pub use self::impls::{open_db, open_client_db, open_secretstore_db, restoration_db_handler, migrate}; +pub use self::impls::{open_db, open_client_db, restoration_db_handler, migrate}; + +#[cfg(feature = "secretstore")] +pub use self::impls::open_secretstore_db; diff --git a/parity/db/rocksdb/mod.rs b/parity/db/rocksdb/mod.rs index 3280ed928ad..5eaf263000c 100644 --- a/parity/db/rocksdb/mod.rs +++ b/parity/db/rocksdb/mod.rs @@ -32,8 +32,7 @@ mod helpers; pub use self::migration::migrate; /// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path. -// This function will only be used when compiling with secretstore feature. -#[allow(dead_code)] +#[cfg(feature = "secretstore")] pub fn open_secretstore_db(data_path: &str) -> Result, String> { let mut db_path = PathBuf::from(data_path); db_path.push("db"); From 17cf6441afde85211f4de27ccfefc6adfbb4bb50 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 11 Apr 2018 22:38:53 +0800 Subject: [PATCH 13/13] Move pathbuf import to open_secretstore_db Because it's only used there behind a feature gate --- parity/db/rocksdb/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/parity/db/rocksdb/mod.rs b/parity/db/rocksdb/mod.rs index 5eaf263000c..7bfd28f6502 100644 --- a/parity/db/rocksdb/mod.rs +++ b/parity/db/rocksdb/mod.rs @@ -18,7 +18,7 @@ extern crate kvdb_rocksdb; extern crate migration_rocksdb; use std::sync::Arc; -use std::path::{Path, PathBuf}; +use std::path::Path; use ethcore::db::NUM_COLUMNS; use ethcore::client::{ClientConfig, DatabaseCompactionProfile}; use kvdb::{KeyValueDB, KeyValueDBHandler}; @@ -34,6 +34,8 @@ pub use self::migration::migrate; /// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path. #[cfg(feature = "secretstore")] pub fn open_secretstore_db(data_path: &str) -> Result, String> { + use std::path::PathBuf; + let mut db_path = PathBuf::from(data_path); db_path.push("db"); let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;