From 6e0364bf64a8d8c5f40c39a74ba4b431b2b139ff Mon Sep 17 00:00:00 2001 From: Eval EXEC Date: Sat, 25 May 2024 01:28:37 +0800 Subject: [PATCH] Reorder COLUMNs key by block_number Signed-off-by: Eval EXEC --- Cargo.lock | 4 + block-filter/src/filter.rs | 13 +- chain/src/chain.rs | 26 +- chain/src/tests/uncle.rs | 7 +- db-migration/src/lib.rs | 20 +- db-migration/src/tests.rs | 12 +- db-schema/Cargo.toml | 1 + db-schema/src/lib.rs | 304 +++++++++++++--- rpc/src/module/chain.rs | 14 +- shared/src/shared.rs | 21 +- shared/src/shared_builder.rs | 6 +- store/Cargo.toml | 1 + store/src/cell.rs | 6 +- store/src/db.rs | 13 +- store/src/snapshot.rs | 1 + store/src/store.rs | 343 ++++++++++++------ store/src/tests/db.rs | 7 +- store/src/transaction.rs | 223 ++++++++---- store/src/write_batch.rs | 82 +++-- .../get_block_filter_check_points_process.rs | 14 +- .../filter/get_block_filter_hashes_process.rs | 25 +- sync/src/filter/get_block_filters_process.rs | 5 +- sync/src/types/mod.rs | 8 +- util/app-config/src/lib.rs | 2 +- util/gen-types/schemas/extensions.mol | 1 + util/gen-types/src/extension/shortcut.rs | 9 +- util/gen-types/src/generated/extensions.rs | 69 ++-- util/indexer-sync/src/lib.rs | 14 +- .../src/components/get_blocks_proof.rs | 2 +- .../src/components/get_transactions_proof.rs | 5 +- util/migrate/src/migrate.rs | 2 +- .../src/migrations/add_block_filter_hash.rs | 20 +- .../src/migrations/add_extra_data_hash.rs | 10 +- .../src/migrations/add_number_hash_mapping.rs | 6 +- util/migrate/src/migrations/cell.rs | 25 +- .../migrations/set_2019_block_cycle_zero.rs | 5 +- .../migrate/src/migrations/table_to_struct.rs | 41 ++- util/migrate/src/tests.rs | 38 +- util/reward-calculator/src/lib.rs | 18 +- util/reward-calculator/src/tests.rs | 2 +- util/types/src/core/extras.rs | 1 + util/types/src/core/views.rs | 11 +- .../src/contextual_block_verifier.rs | 8 +- 43 files changed, 975 insertions(+), 470 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 128d1ef557a..2078341daca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -862,6 +862,9 @@ dependencies = [ [[package]] name = "ckb-db-schema" version = "0.117.0-pre" +dependencies = [ + "ckb-types", +] [[package]] name = "ckb-error" @@ -1579,6 +1582,7 @@ dependencies = [ "ckb-traits", "ckb-types", "ckb-util", + "log", "lru", "tempfile", ] diff --git a/block-filter/src/filter.rs b/block-filter/src/filter.rs index 3c8a2175578..720c189a53d 100644 --- a/block-filter/src/filter.rs +++ b/block-filter/src/filter.rs @@ -124,7 +124,8 @@ impl BlockFilter { header.hash() ); let db = self.shared.store(); - if db.get_block_filter_hash(&header.hash()).is_some() { + let num_hash = header.num_hash(); + if db.get_block_filter_hash(num_hash.clone()).is_some() { debug!( "Filter data for block {:#x} already exists. Skip building.", header.hash() @@ -134,11 +135,11 @@ impl BlockFilter { let parent_block_filter_hash = if header.is_genesis() { Byte32::zero() } else { - db.get_block_filter_hash(&header.parent_hash()) + db.get_block_filter_hash(header.parent_num_hash()) .expect("parent block filter data stored") }; - let transactions = db.get_block_body(&header.hash()); + let transactions = db.get_block_body_by_num_hash(num_hash.clone()); let transactions_size: usize = transactions.iter().map(|tx| tx.data().total_size()).sum(); let provider = WrappedChainDB::new(db); let (filter_data, missing_out_points) = build_filter_data(provider, &transactions); @@ -151,11 +152,7 @@ impl BlockFilter { } let db_transaction = db.begin_transaction(); db_transaction - .insert_block_filter( - &header.hash(), - &filter_data.pack(), - &parent_block_filter_hash, - ) + .insert_block_filter(&num_hash, &filter_data.pack(), &parent_block_filter_hash) .expect("insert_block_filter should be ok"); db_transaction.commit().expect("commit should be ok"); debug!("Inserted filter data for block: {}, hash: {:#x}, filter data size: {}, transactions size: {}", header.number(), header.hash(), filter_data.len(), transactions_size); diff --git a/chain/src/chain.rs b/chain/src/chain.rs index c1915ed48e8..401676f5555 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -25,7 +25,7 @@ use ckb_types::{ }, packed::{Byte32, ProposalShortId}, utilities::merkle_mountain_range::ChainRootMMR, - U256, + BlockNumberAndHash, U256, }; use ckb_verification::cache::Completed; use ckb_verification::{BlockVerifier, InvalidParentError, NonContextualBlockTxsVerifier}; @@ -480,7 +480,7 @@ impl ChainService { } total_difficulty = cannon_total_difficulty.clone(); } else { - db_txn.insert_block_ext(&block.header().hash(), &ext)?; + db_txn.insert_block_ext(block.header().num_hash(), &ext)?; } db_txn.commit()?; @@ -804,7 +804,7 @@ impl ChainService { self.insert_ok_ext( &txn, - &b.header().hash(), + b.header().num_hash(), ext.clone(), Some(&cache_entries), Some(txs_sizes), @@ -822,24 +822,28 @@ impl ChainService { Err(err) => { self.print_error(b, &err); found_error = Some(err); - self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; + self.insert_failure_ext( + &txn, + b.header().num_hash(), + ext.clone(), + )?; } } } Err(err) => { found_error = Some(err); - self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; + self.insert_failure_ext(&txn, b.header().num_hash(), ext.clone())?; } } } else { - self.insert_failure_ext(&txn, &b.header().hash(), ext.clone())?; + self.insert_failure_ext(&txn, b.header().num_hash(), ext.clone())?; } } else { txn.attach_block(b)?; attach_block_cell(&txn, b)?; mmr.push(b.digest()) .map_err(|e| InternalErrorKind::MMR.other(e))?; - self.insert_ok_ext(&txn, &b.header().hash(), ext.clone(), None, None)?; + self.insert_ok_ext(&txn, b.header().num_hash(), ext.clone(), None, None)?; } } @@ -877,7 +881,7 @@ impl ChainService { fn insert_ok_ext( &self, txn: &StoreTransaction, - hash: &Byte32, + num_hash: BlockNumberAndHash, mut ext: BlockExt, cache_entries: Option<&[Completed]>, txs_sizes: Option>, @@ -892,17 +896,17 @@ impl ChainService { ext.cycles = Some(cycles); } ext.txs_sizes = txs_sizes; - txn.insert_block_ext(hash, &ext) + txn.insert_block_ext(num_hash, &ext) } fn insert_failure_ext( &self, txn: &StoreTransaction, - hash: &Byte32, + num_hash: BlockNumberAndHash, mut ext: BlockExt, ) -> Result<(), Error> { ext.verified = Some(false); - txn.insert_block_ext(hash, &ext) + txn.insert_block_ext(num_hash, &ext) } fn monitor_block_txs_verified( diff --git a/chain/src/tests/uncle.rs b/chain/src/tests/uncle.rs index 3d8d4da0a01..da6c30c97aa 100644 --- a/chain/src/tests/uncle.rs +++ b/chain/src/tests/uncle.rs @@ -29,7 +29,10 @@ fn test_get_block_body_after_inserting() { chain_service .process_block(Arc::new(blk.clone()), Switch::DISABLE_ALL) .unwrap(); - let len = shared.snapshot().get_block_body(&blk.hash()).len(); + let len = shared + .snapshot() + .get_block_body(blk.number(), &blk.hash()) + .len(); assert_eq!(len, 1, "[fork1] snapshot.get_block_body({})", blk.hash(),); } for blk in fork2.blocks() { @@ -40,7 +43,7 @@ fn test_get_block_body_after_inserting() { assert!(snapshot.get_block_header(&blk.hash()).is_some()); assert!(snapshot.get_block_uncles(&blk.hash()).is_some()); assert!(snapshot.get_block_proposal_txs_ids(&blk.hash()).is_some()); - let len = snapshot.get_block_body(&blk.hash()).len(); + let len = snapshot.get_block_body(blk.number(), &blk.hash()).len(); assert_eq!(len, 1, "[fork2] snapshot.get_block_body({})", blk.hash(),); } } diff --git a/db-migration/src/lib.rs b/db-migration/src/lib.rs index fa2ba321dc1..ad65dd99fb0 100644 --- a/db-migration/src/lib.rs +++ b/db-migration/src/lib.rs @@ -3,7 +3,7 @@ use ckb_channel::select; use ckb_channel::unbounded; use ckb_channel::Receiver; use ckb_db::{ReadOnlyDB, RocksDB}; -use ckb_db_schema::{COLUMN_META, META_TIP_HEADER_KEY, MIGRATION_VERSION_KEY}; +use ckb_db_schema::COLUMN_META; use ckb_error::{Error, InternalErrorKind}; use ckb_logger::{debug, error, info}; use ckb_stop_handler::register_thread; @@ -79,7 +79,7 @@ impl MigrationWorker { pb }; if let Ok(db) = task.migrate(self.db.clone(), Arc::new(pb)) { - db.put_default(MIGRATION_VERSION_KEY, task.version()) + db.put_default(COLUMN_META::MIGRATION_VERSION_KEY, task.version()) .map_err(|err| { internal_error(format!("failed to migrate the database: {err}")) }) @@ -117,7 +117,7 @@ impl Migrations { /// Requires upgrade the executable binary. pub fn check(&self, db: &ReadOnlyDB, include_background: bool) -> Ordering { let db_version = match db - .get_pinned_default(MIGRATION_VERSION_KEY) + .get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .expect("get the version of database") { Some(version_bytes) => { @@ -152,7 +152,7 @@ impl Migrations { /// Check if the migrations will consume a lot of time. pub fn expensive(&self, db: &ReadOnlyDB, include_background: bool) -> bool { let db_version = match db - .get_pinned_default(MIGRATION_VERSION_KEY) + .get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .expect("get the version of database") { Some(version_bytes) => { @@ -178,7 +178,7 @@ impl Migrations { /// Check if all the pending migrations will be executed in background. pub fn can_run_in_background(&self, db: &ReadOnlyDB) -> bool { let db_version = match db - .get_pinned_default(MIGRATION_VERSION_KEY) + .get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .expect("get the version of database") { Some(version_bytes) => { @@ -198,7 +198,7 @@ impl Migrations { } fn is_non_empty_rdb(&self, db: &ReadOnlyDB) -> bool { - if let Ok(v) = db.get_pinned(COLUMN_META, META_TIP_HEADER_KEY) { + if let Ok(v) = db.get_pinned(COLUMN_META::NAME, COLUMN_META::META_TIP_HEADER_KEY) { if v.is_some() { return true; } @@ -207,7 +207,7 @@ impl Migrations { } fn is_non_empty_db(&self, db: &RocksDB) -> bool { - if let Ok(v) = db.get_pinned(COLUMN_META, META_TIP_HEADER_KEY) { + if let Ok(v) = db.get_pinned(COLUMN_META::NAME, COLUMN_META::META_TIP_HEADER_KEY) { if v.is_some() { return true; } @@ -232,7 +232,7 @@ impl Migrations { pb }; db = m.migrate(db, Arc::new(pb))?; - db.put_default(MIGRATION_VERSION_KEY, m.version()) + db.put_default(COLUMN_META::MIGRATION_VERSION_KEY, m.version()) .map_err(|err| internal_error(format!("failed to migrate the database: {err}")))?; } mpb.join_and_clear().expect("MultiProgress join"); @@ -273,7 +273,7 @@ impl Migrations { fn get_migration_version(&self, db: &RocksDB) -> Result, Error> { let raw = db - .get_pinned_default(MIGRATION_VERSION_KEY) + .get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .map_err(|err| { internal_error(format!("failed to get the version of database: {err}")) })?; @@ -289,7 +289,7 @@ impl Migrations { if db_version.is_none() { if let Some(m) = self.migrations.values().last() { info!("Init database version {}", m.version()); - db.put_default(MIGRATION_VERSION_KEY, m.version()) + db.put_default(COLUMN_META::MIGRATION_VERSION_KEY, m.version()) .map_err(|err| { internal_error(format!("failed to migrate the database: {err}")) })?; diff --git a/db-migration/src/tests.rs b/db-migration/src/tests.rs index b3a6fa18c35..2d3a295ab14 100644 --- a/db-migration/src/tests.rs +++ b/db-migration/src/tests.rs @@ -1,7 +1,7 @@ use ckb_app_config::DBConfig; use ckb_db::ReadOnlyDB; use ckb_db::RocksDB; -use ckb_db_schema::MIGRATION_VERSION_KEY; +use ckb_db_schema::COLUMN_META; use ckb_error::Error; use indicatif::ProgressBar; use std::sync::Arc; @@ -26,7 +26,7 @@ fn test_default_migration() { let r = migrations.migrate(db, false).unwrap(); assert_eq!( b"20191116225943".to_vec(), - r.get_pinned_default(MIGRATION_VERSION_KEY) + r.get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .unwrap() .unwrap() .to_vec() @@ -41,7 +41,7 @@ fn test_default_migration() { .unwrap(); assert_eq!( b"20191127101121".to_vec(), - r.get_pinned_default(MIGRATION_VERSION_KEY) + r.get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .unwrap() .unwrap() .to_vec() @@ -117,7 +117,7 @@ fn test_customized_migration() { ); assert_eq!( VERSION.as_bytes(), - db.get_pinned_default(MIGRATION_VERSION_KEY) + db.get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .unwrap() .unwrap() .to_vec() @@ -209,7 +209,7 @@ fn test_background_migration() { let r = migrations.migrate(db, false).unwrap(); assert_eq!( b"20191116225943".to_vec(), - r.get_pinned_default(MIGRATION_VERSION_KEY) + r.get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .unwrap() .unwrap() .to_vec() @@ -248,7 +248,7 @@ fn test_background_migration() { std::thread::sleep(std::time::Duration::from_millis(1000)); assert_eq!( b"20241127101122".to_vec(), - db.get_pinned_default(MIGRATION_VERSION_KEY) + db.get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .unwrap() .unwrap() .to_vec() diff --git a/db-schema/Cargo.toml b/db-schema/Cargo.toml index 229ed2ed087..7085964351c 100644 --- a/db-schema/Cargo.toml +++ b/db-schema/Cargo.toml @@ -11,3 +11,4 @@ repository = "https://github.com/nervosnetwork/ckb" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +ckb-types = { version = "0.117.0-pre", path = "../util/types" } diff --git a/db-schema/src/lib.rs b/db-schema/src/lib.rs index 5d6d91e4a98..526b2df0c77 100644 --- a/db-schema/src/lib.rs +++ b/db-schema/src/lib.rs @@ -1,58 +1,256 @@ +#![allow(non_snake_case)] //! The schema include constants define the low level database column families. +use ckb_types::core::BlockNumber; +use ckb_types::packed::Byte32; +use ckb_types::packed::NumberHash; +use ckb_types::packed::OutPoint; +use ckb_types::packed::TransactionKey; +use ckb_types::prelude::*; +use ckb_types::BlockNumberAndHash; + /// Column families alias type pub type Col = &'static str; + /// Total column number -pub const COLUMNS: u32 = 19; -/// Column store chain index -pub const COLUMN_INDEX: Col = "0"; -/// Column store block's header -pub const COLUMN_BLOCK_HEADER: Col = "1"; -/// Column store block's body -pub const COLUMN_BLOCK_BODY: Col = "2"; -/// Column store block's uncle and uncles’ proposal zones -pub const COLUMN_BLOCK_UNCLE: Col = "3"; -/// Column store meta data -pub const COLUMN_META: Col = "4"; -/// Column store transaction extra information -pub const COLUMN_TRANSACTION_INFO: Col = "5"; -/// Column store block extra information -pub const COLUMN_BLOCK_EXT: Col = "6"; -/// Column store block's proposal ids -pub const COLUMN_BLOCK_PROPOSAL_IDS: Col = "7"; -/// Column store indicates track block epoch -pub const COLUMN_BLOCK_EPOCH: Col = "8"; -/// Column store indicates track block epoch -pub const COLUMN_EPOCH: Col = "9"; -/// Column store cell -pub const COLUMN_CELL: Col = "10"; -/// Column store main chain consensus include uncles -/// -/// -pub const COLUMN_UNCLES: Col = "11"; -/// Column store cell data -pub const COLUMN_CELL_DATA: Col = "12"; -/// Column store block number-hash pair -pub const COLUMN_NUMBER_HASH: Col = "13"; -/// Column store cell data hash -pub const COLUMN_CELL_DATA_HASH: Col = "14"; -/// Column store block extension data -pub const COLUMN_BLOCK_EXTENSION: Col = "15"; -/// Column store chain root MMR data -pub const COLUMN_CHAIN_ROOT_MMR: Col = "16"; -/// Column store filter data for client-side filtering -pub const COLUMN_BLOCK_FILTER: Col = "17"; -/// Column store filter data hash for client-side filtering -pub const COLUMN_BLOCK_FILTER_HASH: Col = "18"; - -/// META_TIP_HEADER_KEY tracks the latest known best block header -pub const META_TIP_HEADER_KEY: &[u8] = b"TIP_HEADER"; -/// META_CURRENT_EPOCH_KEY tracks the latest known epoch -pub const META_CURRENT_EPOCH_KEY: &[u8] = b"CURRENT_EPOCH"; -/// META_FILTER_DATA_KEY tracks the latest built filter data block hash -pub const META_LATEST_BUILT_FILTER_DATA_KEY: &[u8] = b"LATEST_BUILT_FILTER_DATA"; - -/// CHAIN_SPEC_HASH_KEY tracks the hash of chain spec which created current database -pub const CHAIN_SPEC_HASH_KEY: &[u8] = b"chain-spec-hash"; -/// MIGRATION_VERSION_KEY tracks the current database version. -pub const MIGRATION_VERSION_KEY: &[u8] = b"db-version"; +pub const COLUMNS: u32 = 20; + +pub mod COLUMN_INDEX { + use super::*; + + pub const NAME: Col = "0"; + + pub fn key_number(number: BlockNumber) -> impl AsRef<[u8]> { + number.to_be_bytes() + } + + pub fn key_hash(hash: Byte32) -> impl AsRef<[u8]> { + hash.as_slice().to_vec() + } +} + +pub mod COLUMN_BLOCK_HEADER { + use super::*; + + pub const NAME: Col = "1"; + + pub fn key(num_hash: BlockNumberAndHash) -> Vec { + let mut key = Vec::with_capacity(40); + key.extend(num_hash.number().to_be_bytes()); + key.extend(num_hash.hash().as_slice()); + key + } +} + +pub mod COLUMN_BLOCK_BODY { + use super::*; + + pub const NAME: Col = "2"; + + pub fn key(num_hash: BlockNumberAndHash, tx_index: usize) -> Vec { + TransactionKey::new_builder() + .block_number(num_hash.number().pack()) + .block_hash(num_hash.hash()) + .index(tx_index.pack()) + .build() + .as_slice() + .to_vec() + } + + pub fn prefix_key(num_hash: BlockNumberAndHash) -> Vec { + TransactionKey::new_builder() + .block_number(num_hash.number.pack()) + .block_hash(num_hash.hash) + .build() + .as_slice()[..40] + .to_vec() + } +} + +fn num_hash_key(num_hash: BlockNumberAndHash) -> impl AsRef<[u8]> { + let mut key = Vec::with_capacity(40); + key.extend(num_hash.number.to_be_bytes()); + key.extend(num_hash.hash.as_slice()); + key +} + +pub mod COLUMN_BLOCK_UNCLE { + use super::*; + + /// Column store block's uncle and uncles’ proposal zones + pub const NAME: Col = "3"; + + pub fn key(num_hash: BlockNumberAndHash) -> impl AsRef<[u8]> { + num_hash_key(num_hash) + } +} + +pub mod COLUMN_META { + use super::*; + + /// Column store meta data + pub const NAME: Col = "4"; + + /// META_TIP_HEADER_KEY tracks the latest known best block header + pub const META_TIP_HEADER_KEY: &[u8] = b"TIP_HEADER"; + /// META_CURRENT_EPOCH_KEY tracks the latest known epoch + pub const META_CURRENT_EPOCH_KEY: &[u8] = b"CURRENT_EPOCH"; + /// META_FILTER_DATA_KEY tracks the latest built filter data block hash + pub const META_LATEST_BUILT_FILTER_DATA_KEY: &[u8] = b"LATEST_BUILT_FILTER_DATA"; + + /// CHAIN_SPEC_HASH_KEY tracks the hash of chain spec which created current database + pub const CHAIN_SPEC_HASH_KEY: &[u8] = b"chain-spec-hash"; + /// MIGRATION_VERSION_KEY tracks the current database version. + pub const MIGRATION_VERSION_KEY: &[u8] = b"db-version"; +} + +pub mod COLUMN_TRANSACTION_INFO { + use super::*; + + /// Column store transaction extra information + pub const NAME: Col = "5"; +} + +pub mod COLUMN_BLOCK_EXT { + use super::*; + + /// Column store block extra information + pub const NAME: Col = "6"; + + pub fn key(num_hash: BlockNumberAndHash) -> impl AsRef<[u8]> { + num_hash_key(num_hash) + } +} + +pub mod COLUMN_BLOCK_PROPOSAL_IDS { + use super::*; + + /// Column store block's proposal ids + pub const NAME: Col = "7"; + + pub fn key(num_hash: BlockNumberAndHash) -> impl AsRef<[u8]> { + num_hash_key(num_hash) + } +} + +pub mod COLUMN_BLOCK_EPOCH { + use super::*; + + /// Column store indicates track block epoch + pub const NAME: Col = "8"; +} + +pub mod COLUMN_EPOCH { + use super::*; + + /// Column store indicates track block epoch + pub const NAME: Col = "9"; +} + +pub mod COLUMN_CELL { + use super::*; + + /// Column store cell + pub const NAME: Col = "10"; + + pub fn key(block_number: BlockNumber, cell: &OutPoint) -> impl AsRef<[u8]> { + cell.to_cell_key(block_number) + } +} + +pub mod COLUMN_UNCLES { + use super::*; + + /// Column store main chain consensus include uncles + /// + pub const NAME: Col = "11"; +} + +pub mod COLUMN_CELL_DATA { + use super::*; + + /// Column store cell data + pub const NAME: Col = "12"; + + pub fn key(block_number: BlockNumber, cell: &OutPoint) -> impl AsRef<[u8]> { + cell.to_cell_key(block_number) + } +} + +pub mod COLUMN_NUMBER_HASH { + use super::*; + + /// Column store block number-hash pair + pub const NAME: Col = "13"; + + pub fn key(num_hash: BlockNumberAndHash) -> impl AsRef<[u8]> { + num_hash_key(num_hash) + } +} + +pub mod COLUMN_CELL_DATA_HASH { + use super::*; + use ckb_types::packed::OutPoint; + + /// Column store cell data hash + pub const NAME: Col = "14"; + + pub fn key(block_number: BlockNumber, cell: &OutPoint) -> impl AsRef<[u8]> { + cell.to_cell_key(block_number) + } +} + +pub mod COLUMN_BLOCK_EXTENSION { + use super::*; + + /// Column store block extension data + pub const NAME: Col = "15"; +} + +pub mod COLUMN_CHAIN_ROOT_MMR { + use super::*; + use ckb_types::packed; + + /// Column store chain root MMR data + pub const NAME: Col = "16"; + + /// Build COLUMN_CHAIN_ROOT_MMR's key + pub fn key(pos: u64) -> packed::BeUint64 { + let key: packed::BeUint64 = pos.pack(); + key + } +} + +pub mod COLUMN_BLOCK_FILTER { + use super::*; + + /// Column store filter data for client-side filtering + pub const NAME: Col = "17"; + + /// Build COLUMN_BLOCK_FILTER's key + pub fn key(num_hash: BlockNumberAndHash) -> impl AsRef<[u8]> { + num_hash_key(num_hash) + } +} + +pub mod COLUMN_BLOCK_FILTER_HASH { + use super::*; + + /// Column store filter data hash for client-side filtering + pub const NAME: Col = "18"; + + /// Build COLUMN_BLOCK_FILTER_HASH's key + pub fn key(num_hash: BlockNumberAndHash) -> impl AsRef<[u8]> { + num_hash_key(num_hash) + } +} + +pub mod COLUMN_BLOCK_HEADER_NUM { + use super::*; + + pub const NAME: Col = "19"; + + pub fn key(hash: Byte32) -> Vec { + hash.as_slice().to_vec() + } +} diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index a7d2c861fba..a8f63da823e 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -22,7 +22,7 @@ use ckb_types::{ packed, prelude::*, utilities::{merkle_root, MerkleProof, CBMT}, - H256, + BlockNumberAndHash, H256, }; use ckb_verification::ScriptVerifier; use ckb_verification::TxVerifyEnv; @@ -1730,12 +1730,14 @@ impl ChainRpc for ChainRpcImpl { fn get_block_filter(&self, block_hash: H256) -> Result> { let store = self.shared.store(); let block_hash = block_hash.pack(); - if !store.is_main_chain(&block_hash) { - return Ok(None); - } - Ok(store.get_block_filter(&block_hash).map(|data| { + let block_number = match store.get_block_number(&block_hash) { + Some(block_number) => block_number, + _ => return Ok(None), + }; + let num_hash = BlockNumberAndHash::new(block_number, block_hash.clone()); + Ok(store.get_block_filter(&num_hash).map(|data| { let hash = store - .get_block_filter_hash(&block_hash) + .get_block_filter_hash(num_hash) .expect("stored filter hash"); BlockFilter { data: data.into(), diff --git a/shared/src/shared.rs b/shared/src/shared.rs index fc3e9fea04c..17b5f9dddc3 100644 --- a/shared/src/shared.rs +++ b/shared/src/shared.rs @@ -18,7 +18,7 @@ use ckb_types::{ core::{BlockNumber, EpochExt, EpochNumber, HeaderView, Version}, packed::{self, Byte32}, prelude::*, - U256, + BlockNumberAndHash, U256, }; use ckb_verification::cache::TxVerificationCache; use std::cmp; @@ -182,7 +182,8 @@ impl Shared { if !frozen.is_empty() { // remain header for (hash, (number, txs)) in &frozen { - batch.delete_block_body(*number, hash, *txs).map_err(|e| { + let num_hash = BlockNumberAndHash::new(number.to_owned(), hash.to_owned()); + batch.delete_block_body(num_hash, *txs).map_err(|e| { ckb_logger::error!("Freezer delete_block_body failed {}", e); e })?; @@ -191,7 +192,7 @@ impl Shared { let prefix = pack_number.as_slice(); for (key, value) in snapshot .get_iter( - COLUMN_NUMBER_HASH, + COLUMN_NUMBER_HASH::NAME, IteratorMode::From(prefix, Direction::Forward), ) .take_while(|(key, _)| key.starts_with(prefix)) @@ -221,12 +222,12 @@ impl Shared { if !side.is_empty() { // Wipe out side chain for (hash, (number, txs)) in &side { - batch - .delete_block(number.unpack(), hash, *txs) - .map_err(|e| { - ckb_logger::error!("Freezer delete_block_body failed {}", e); - e - })?; + let number: u64 = number.unpack(); + let num_hash = BlockNumberAndHash::new(number, hash.to_owned()); + batch.delete_block(num_hash, *txs).map_err(|e| { + ckb_logger::error!("Freezer delete_block_body failed {}", e); + e + })?; } self.store.write(&batch).map_err(|e| { @@ -255,7 +256,7 @@ impl Shared { .build(); if let Err(e) = self.store.compact_range( - COLUMN_BLOCK_BODY, + COLUMN_BLOCK_BODY::NAME, Some(start_t.as_slice()), Some(end_t.as_slice()), ) { diff --git a/shared/src/shared_builder.rs b/shared/src/shared_builder.rs index 985add3ba0f..414ad433231 100644 --- a/shared/src/shared_builder.rs +++ b/shared/src/shared_builder.rs @@ -28,6 +28,7 @@ use ckb_types::core::service::PoolTransactionEntry; use ckb_types::core::tx_pool::Reject; use ckb_types::core::EpochExt; use ckb_types::core::HeaderView; +use ckb_types::BlockNumberAndHash; use ckb_verification::cache::init_cache; use std::collections::HashSet; use std::path::{Path, PathBuf}; @@ -244,12 +245,13 @@ impl SharedBuilder { let proposal_start = tip_number.saturating_sub(proposal_window.farthest()); for bn in proposal_start..=tip_number { if let Some(hash) = store.get_block_hash(bn) { + let num_hash = BlockNumberAndHash::new(bn, hash); let mut ids_set = HashSet::new(); - if let Some(ids) = store.get_block_proposal_txs_ids(&hash) { + if let Some(ids) = store.get_block_proposal_txs_ids(num_hash.clone()) { ids_set.extend(ids) } - if let Some(us) = store.get_block_uncles(&hash) { + if let Some(us) = store.get_block_uncles(num_hash) { for u in us.data().into_iter() { ids_set.extend(u.proposals().into_iter()); } diff --git a/store/Cargo.toml b/store/Cargo.toml index f4c5121fa52..f87c71bd870 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -20,6 +20,7 @@ ckb-app-config = { path = "../util/app-config", version = "= 0.117.0-pre" } ckb-db-schema = { path = "../db-schema", version = "= 0.117.0-pre" } ckb-freezer = { path = "../freezer", version = "= 0.117.0-pre" } ckb-merkle-mountain-range = "0.5.2" +log = "0.4.21" [dev-dependencies] tempfile.workspace = true diff --git a/store/src/cell.rs b/store/src/cell.rs index 88c6e49e17c..5e2c18e77da 100644 --- a/store/src/cell.rs +++ b/store/src/cell.rs @@ -25,6 +25,7 @@ use std::collections::HashMap; // Apply the effects of this block on the live cell set. pub fn attach_block_cell(txn: &StoreTransaction, block: &BlockView) -> Result<(), Error> { let transactions = block.transactions(); + let block_number = block.header().number(); // add new live cells let new_cells = transactions @@ -76,7 +77,7 @@ pub fn attach_block_cell(txn: &StoreTransaction, block: &BlockView) -> Result<() .iter() .skip(1) .flat_map(|tx| tx.input_pts_iter()); - txn.delete_cells(deads)?; + txn.delete_cells(block_number, deads)?; Ok(()) } @@ -94,7 +95,6 @@ pub fn detach_block_cell(txn: &StoreTransaction, block: &BlockView) -> Result<() indexes.push(index); } } - // restore inputs // skip cellbase let undo_deads = input_pts @@ -145,7 +145,7 @@ pub fn detach_block_cell(txn: &StoreTransaction, block: &BlockView) -> Result<() // undo live cells let undo_cells = transactions.iter().flat_map(|tx| tx.output_pts_iter()); - txn.delete_cells(undo_cells)?; + txn.delete_cells(block.number(), undo_cells)?; Ok(()) } diff --git a/store/src/db.rs b/store/src/db.rs index 88b1da32fcc..279aab2bea3 100644 --- a/store/src/db.rs +++ b/store/src/db.rs @@ -10,7 +10,7 @@ use ckb_db::{ iter::{DBIter, DBIterator, IteratorMode}, DBPinnableSlice, RocksDB, }; -use ckb_db_schema::{Col, CHAIN_SPEC_HASH_KEY, MIGRATION_VERSION_KEY}; +use ckb_db_schema::{Col, COLUMN_META}; use ckb_error::{Error, InternalErrorKind}; use ckb_freezer::Freezer; use ckb_types::{ @@ -18,6 +18,7 @@ use ckb_types::{ packed, prelude::*, utilities::merkle_mountain_range::ChainRootMMR, + BlockNumberAndHash, }; use std::sync::Arc; @@ -100,13 +101,14 @@ impl ChainDB { /// Store the chain spec hash pub fn put_chain_spec_hash(&self, hash: &packed::Byte32) -> Result<(), Error> { - self.db.put_default(CHAIN_SPEC_HASH_KEY, hash.as_slice()) + self.db + .put_default(COLUMN_META::CHAIN_SPEC_HASH_KEY, hash.as_slice()) } /// Return the chain spec hash pub fn get_chain_spec_hash(&self) -> Option { self.db - .get_pinned_default(CHAIN_SPEC_HASH_KEY) + .get_pinned_default(COLUMN_META::CHAIN_SPEC_HASH_KEY) .expect("db operation should be ok") .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } @@ -114,7 +116,7 @@ impl ChainDB { /// Return the chain spec hash pub fn get_migration_version(&self) -> Option { self.db - .get_pinned_default(MIGRATION_VERSION_KEY) + .get_pinned_default(COLUMN_META::MIGRATION_VERSION_KEY) .expect("db operation should be ok") } @@ -173,6 +175,7 @@ impl ChainDB { let epoch = consensus.genesis_epoch_ext(); let db_txn = self.begin_transaction(); let genesis_hash = genesis.hash(); + let genesis_num_hash = BlockNumberAndHash::new(0, genesis_hash.clone()); let ext = BlockExt { received_at: genesis.timestamp(), total_difficulty: genesis.difficulty(), @@ -187,7 +190,7 @@ impl ChainDB { let last_block_hash_in_previous_epoch = epoch.last_block_hash_in_previous_epoch(); db_txn.insert_block(genesis)?; - db_txn.insert_block_ext(&genesis_hash, &ext)?; + db_txn.insert_block_ext(genesis_num_hash, &ext)?; db_txn.insert_tip_header(&genesis.header())?; db_txn.insert_current_epoch_ext(epoch)?; db_txn.insert_block_epoch_index(&genesis_hash, &last_block_hash_in_previous_epoch)?; diff --git a/store/src/snapshot.rs b/store/src/snapshot.rs index 57579abc416..086fb0e266e 100644 --- a/store/src/snapshot.rs +++ b/store/src/snapshot.rs @@ -25,6 +25,7 @@ impl ChainStore for StoreSnapshot { } fn get(&self, col: Col, key: &[u8]) -> Option { + // println!("get col={:?} key={}", col, hex(key)); self.inner .get_pinned(col, key) .expect("db operation should be ok") diff --git a/store/src/store.rs b/store/src/store.rs index f6866207010..f8427d22765 100644 --- a/store/src/store.rs +++ b/store/src/store.rs @@ -6,10 +6,10 @@ use ckb_db::{ }; use ckb_db_schema::{ Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_EXTENSION, - COLUMN_BLOCK_FILTER, COLUMN_BLOCK_FILTER_HASH, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, - COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_CELL_DATA_HASH, - COLUMN_CHAIN_ROOT_MMR, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, COLUMN_TRANSACTION_INFO, - COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, META_LATEST_BUILT_FILTER_DATA_KEY, META_TIP_HEADER_KEY, + COLUMN_BLOCK_FILTER, COLUMN_BLOCK_FILTER_HASH, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_HEADER_NUM, + COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, + COLUMN_CELL_DATA_HASH, COLUMN_CHAIN_ROOT_MMR, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, + COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, }; use ckb_freezer::Freezer; use ckb_types::{ @@ -20,6 +20,7 @@ use ckb_types::{ }, packed::{self, OutPoint}, prelude::*, + BlockNumberAndHash, }; /// The `ChainStore` trait provides chain data store interface @@ -40,6 +41,7 @@ pub trait ChainStore: Send + Sync + Sized { /// Get block by block header hash fn get_block(&self, h: &packed::Byte32) -> Option { let header = self.get_block_header(h)?; + let num_hash = header.num_hash(); if let Some(freezer) = self.freezer() { if header.number() > 0 && header.number() < freezer.number() { let raw_block = freezer.retrieve(header.number()).expect("block frozen")?; @@ -49,12 +51,12 @@ pub trait ChainStore: Send + Sync + Sized { return Some(raw_block.into_view()); } } - let body = self.get_block_body(h); + let body = self.get_block_body_by_num_hash(num_hash.clone()); let uncles = self - .get_block_uncles(h) + .get_block_uncles(num_hash.clone()) .expect("block uncles must be stored"); let proposals = self - .get_block_proposal_txs_ids(h) + .get_block_proposal_txs_ids(num_hash) .expect("block proposal_ids must be stored"); let extension_opt = self.get_block_extension(h); @@ -73,10 +75,9 @@ pub trait ChainStore: Send + Sync + Sized { return Some(header.clone()); } }; - let ret = self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).map(|slice| { - let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); - Unpack::::unpack(&reader) - }); + let ret = self + .get_packed_block_header(hash) + .map(|header| header.into_view()); if let Some(cache) = self.cache() { ret.map(|header| { @@ -89,13 +90,33 @@ pub trait ChainStore: Send + Sync + Sized { } /// Get block body by block header hash - fn get_block_body(&self, hash: &packed::Byte32) -> Vec { - let prefix = hash.as_slice(); + fn get_block_body(&self, num_hash: BlockNumberAndHash) -> Vec { + // let num_hash = BlockNumberAndHash::new(number, hash.clone()); + // let num_hash = num_hash.to_db_key(); + // let prefix: &[u8] = num_hash.as_slice(); + // let prefix = column_block_body_prefix_key(number, hash).as_ref(); + let prefix = COLUMN_BLOCK_BODY::prefix_key(num_hash); + + self.get_iter( + COLUMN_BLOCK_BODY::NAME, + IteratorMode::From(prefix.as_ref(), Direction::Forward), + ) + .take_while(|(key, _)| key.starts_with(prefix.as_ref())) + .map(|(_key, value)| { + let reader = packed::TransactionViewReader::from_slice_should_be_ok(value.as_ref()); + Unpack::::unpack(&reader) + }) + .collect() + } + + /// Get block body by number and hash + fn get_block_body_by_num_hash(&self, num_hash: BlockNumberAndHash) -> Vec { + let prefix = COLUMN_BLOCK_BODY::prefix_key(num_hash); self.get_iter( - COLUMN_BLOCK_BODY, - IteratorMode::From(prefix, Direction::Forward), + COLUMN_BLOCK_BODY::NAME, + IteratorMode::From(prefix.as_ref(), Direction::Forward), ) - .take_while(|(key, _)| key.starts_with(prefix)) + .take_while(|(key, _)| key.starts_with(prefix.as_ref())) .map(|(_key, value)| { let reader = packed::TransactionViewReader::from_slice_should_be_ok(value.as_ref()); Unpack::::unpack(&reader) @@ -106,16 +127,20 @@ pub trait ChainStore: Send + Sync + Sized { /// Get unfrozen block from ky-store with given hash fn get_unfrozen_block(&self, hash: &packed::Byte32) -> Option { let header = self - .get(COLUMN_BLOCK_HEADER, hash.as_slice()) + .get(COLUMN_BLOCK_HEADER::NAME, hash.as_slice()) .map(|slice| { let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) })?; + let num_hash = header.num_hash(); - let body = self.get_block_body(hash); + let body = self.get_block_body(num_hash.clone()); let uncles = self - .get(COLUMN_BLOCK_UNCLE, hash.as_slice()) + .get( + COLUMN_BLOCK_UNCLE::NAME, + COLUMN_BLOCK_UNCLE::key(num_hash.clone()).as_ref(), + ) .map(|slice| { let reader = packed::UncleBlockVecViewReader::from_slice_should_be_ok(slice.as_ref()); @@ -124,7 +149,10 @@ pub trait ChainStore: Send + Sync + Sized { .expect("block uncles must be stored"); let proposals = self - .get(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice()) + .get( + COLUMN_BLOCK_PROPOSAL_IDS::NAME, + COLUMN_BLOCK_PROPOSAL_IDS::key(num_hash.clone()).as_ref(), + ) .map(|slice| { packed::ProposalShortIdVecReader::from_slice_should_be_ok(slice.as_ref()) .to_entity() @@ -132,7 +160,7 @@ pub trait ChainStore: Send + Sync + Sized { .expect("block proposal_ids must be stored"); let extension_opt = self - .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) + .get(COLUMN_BLOCK_EXTENSION::NAME, hash.as_slice()) .map(|slice| packed::BytesReader::from_slice_should_be_ok(slice.as_ref()).to_entity()); let block = if let Some(extension) = extension_opt { @@ -151,14 +179,17 @@ pub trait ChainStore: Send + Sync + Sized { return hashes.clone(); } }; + let block_number = self.get_block_number(hash).expect("block number"); + let num_hash = BlockNumberAndHash::new(block_number, hash.to_owned()); + + let prefix = COLUMN_BLOCK_BODY::prefix_key(num_hash); - let prefix = hash.as_slice(); let ret: Vec<_> = self .get_iter( - COLUMN_BLOCK_BODY, - IteratorMode::From(prefix, Direction::Forward), + COLUMN_BLOCK_BODY::NAME, + IteratorMode::From(prefix.as_ref(), Direction::Forward), ) - .take_while(|(key, _)| key.starts_with(prefix)) + .take_while(|(key, _)| key.starts_with(prefix.as_ref())) .map(|(_key, value)| { let reader = packed::TransactionViewReader::from_slice_should_be_ok(value.as_ref()); reader.hash().to_entity() @@ -175,16 +206,19 @@ pub trait ChainStore: Send + Sync + Sized { /// Get proposal short id by block header hash fn get_block_proposal_txs_ids( &self, - hash: &packed::Byte32, + num_hash: BlockNumberAndHash, ) -> Option { if let Some(cache) = self.cache() { - if let Some(data) = cache.block_proposals.lock().get(hash) { + if let Some(data) = cache.block_proposals.lock().get(&num_hash.hash()) { return Some(data.clone()); } }; let ret = self - .get(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice()) + .get( + COLUMN_BLOCK_PROPOSAL_IDS::NAME, + COLUMN_BLOCK_PROPOSAL_IDS::key(num_hash.clone()).as_ref(), + ) .map(|slice| { packed::ProposalShortIdVecReader::from_slice_should_be_ok(slice.as_ref()) .to_entity() @@ -192,7 +226,10 @@ pub trait ChainStore: Send + Sync + Sized { if let Some(cache) = self.cache() { ret.map(|data| { - cache.block_proposals.lock().put(hash.clone(), data.clone()); + cache + .block_proposals + .lock() + .put(num_hash.hash().clone(), data.clone()); data }) } else { @@ -201,21 +238,27 @@ pub trait ChainStore: Send + Sync + Sized { } /// Get block uncles by block header hash - fn get_block_uncles(&self, hash: &packed::Byte32) -> Option { + fn get_block_uncles(&self, num_hash: BlockNumberAndHash) -> Option { if let Some(cache) = self.cache() { - if let Some(data) = cache.block_uncles.lock().get(hash) { + if let Some(data) = cache.block_uncles.lock().get(&num_hash.hash()) { return Some(data.clone()); } }; - let ret = self.get(COLUMN_BLOCK_UNCLE, hash.as_slice()).map(|slice| { - let reader = packed::UncleBlockVecViewReader::from_slice_should_be_ok(slice.as_ref()); - Unpack::::unpack(&reader) - }); + let ret = self + .get( + COLUMN_BLOCK_UNCLE::NAME, + COLUMN_BLOCK_UNCLE::key(num_hash.clone()).as_ref(), + ) + .map(|slice| { + let reader = + packed::UncleBlockVecViewReader::from_slice_should_be_ok(slice.as_ref()); + Unpack::::unpack(&reader) + }); if let Some(cache) = self.cache() { ret.map(|uncles| { - cache.block_uncles.lock().put(hash.clone(), uncles.clone()); + cache.block_uncles.lock().put(num_hash.hash, uncles.clone()); uncles }) } else { @@ -232,7 +275,7 @@ pub trait ChainStore: Send + Sync + Sized { }; let ret = self - .get(COLUMN_BLOCK_EXTENSION, hash.as_slice()) + .get(COLUMN_BLOCK_EXTENSION::NAME, hash.as_slice()) .map(|slice| packed::BytesReader::from_slice_should_be_ok(slice.as_ref()).to_entity()); if let Some(cache) = self.cache() { @@ -245,44 +288,48 @@ pub trait ChainStore: Send + Sync + Sized { /// /// Since v0.106, `BlockExt` added two option fields, so we have to use compatibility mode to read fn get_block_ext(&self, block_hash: &packed::Byte32) -> Option { - self.get(COLUMN_BLOCK_EXT, block_hash.as_slice()) - .map(|slice| { - let reader = - packed::BlockExtReader::from_compatible_slice_should_be_ok(slice.as_ref()); - match reader.count_extra_fields() { - 0 => reader.unpack(), - 2 => packed::BlockExtV1Reader::from_slice_should_be_ok(slice.as_ref()).unpack(), - _ => { - panic!( - "BlockExt storage field count doesn't match, expect 7 or 5, actual {}", - reader.field_count() - ) - } + let block_number = self.get_block_number(block_hash)?; + let num_hash = BlockNumberAndHash::new(block_number, block_hash.to_owned()); + self.get( + COLUMN_BLOCK_EXT::NAME, + COLUMN_BLOCK_EXT::key(num_hash).as_ref(), + ) + .map(|slice| { + let reader = packed::BlockExtReader::from_compatible_slice_should_be_ok(slice.as_ref()); + match reader.count_extra_fields() { + 0 => reader.unpack(), + 2 => packed::BlockExtV1Reader::from_slice_should_be_ok(slice.as_ref()).unpack(), + _ => { + panic!( + "BlockExt storage field count doesn't match, expect 7 or 5, actual {}", + reader.field_count() + ) } - }) + } + }) } /// Get block header hash by block number fn get_block_hash(&self, number: BlockNumber) -> Option { let block_number: packed::Uint64 = number.pack(); - self.get(COLUMN_INDEX, block_number.as_slice()) + self.get(COLUMN_INDEX::NAME, block_number.as_slice()) .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } /// Get block number by block header hash fn get_block_number(&self, hash: &packed::Byte32) -> Option { - self.get(COLUMN_INDEX, hash.as_slice()) + self.get(COLUMN_BLOCK_HEADER_NUM::NAME, hash.as_slice()) .map(|raw| packed::Uint64Reader::from_slice_should_be_ok(raw.as_ref()).unpack()) } /// TODO(doc): @quake fn is_main_chain(&self, hash: &packed::Byte32) -> bool { - self.get(COLUMN_INDEX, hash.as_slice()).is_some() + self.get(COLUMN_INDEX::NAME, hash.as_slice()).is_some() } /// TODO(doc): @quake fn get_tip_header(&self) -> Option { - self.get(COLUMN_META, META_TIP_HEADER_KEY) + self.get(COLUMN_META::NAME, COLUMN_META::META_TIP_HEADER_KEY) .and_then(|raw| { self.get_block_header( &packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity(), @@ -296,7 +343,8 @@ pub trait ChainStore: Send + Sync + Sized { /// This function is base on transaction index `COLUMN_TRANSACTION_INFO`. /// Current release maintains a full index of historical transaction by default, this may be changed in future fn transaction_exists(&self, hash: &packed::Byte32) -> bool { - self.get(COLUMN_TRANSACTION_INFO, hash.as_slice()).is_some() + self.get(COLUMN_TRANSACTION_INFO::NAME, hash.as_slice()) + .is_some() } /// Get commit transaction and block hash by its hash @@ -307,13 +355,18 @@ pub trait ChainStore: Send + Sync + Sized { /// TODO(doc): @quake fn get_transaction_info(&self, hash: &packed::Byte32) -> Option { - self.get(COLUMN_TRANSACTION_INFO, hash.as_slice()) + self.get(COLUMN_TRANSACTION_INFO::NAME, hash.as_slice()) .map(|slice| { let reader = packed::TransactionInfoReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }) } + fn get_transaction_block_number(&self, hash: &packed::Byte32) -> Option { + self.get_transaction_info(hash) + .map(|tx_info| tx_info.block_number) + } + /// Gets transaction and associated info with correspond hash fn get_transaction_with_info( &self, @@ -331,7 +384,7 @@ pub trait ChainStore: Send + Sync + Sized { return Some((tx_reader.to_entity().into_view(), tx_info)); } } - self.get(COLUMN_BLOCK_BODY, tx_info.key().as_slice()) + self.get(COLUMN_BLOCK_BODY::NAME, tx_info.key().as_slice()) .map(|slice| { let reader = packed::TransactionViewReader::from_slice_should_be_ok(slice.as_ref()); (reader.unpack(), tx_info) @@ -340,14 +393,25 @@ pub trait ChainStore: Send + Sync + Sized { /// Return whether cell is live fn have_cell(&self, out_point: &OutPoint) -> bool { - let key = out_point.to_cell_key(); - self.get(COLUMN_CELL, &key).is_some() + if let Some(block_number) = self.get_transaction_block_number(&out_point.tx_hash()) { + self.get( + COLUMN_CELL::NAME, + COLUMN_CELL::key(block_number, out_point).as_ref(), + ) + .is_some() + } else { + false + } } /// Gets cell meta data with out_point fn get_cell(&self, out_point: &OutPoint) -> Option { - let key = out_point.to_cell_key(); - self.get(COLUMN_CELL, &key).map(|slice| { + let block_number = self.get_transaction_block_number(&out_point.tx_hash())?; + self.get( + COLUMN_CELL::NAME, + COLUMN_CELL::key(block_number, out_point).as_ref(), + ) + .map(|slice| { let reader = packed::CellEntryReader::from_slice_should_be_ok(slice.as_ref()); build_cell_meta_from_reader(out_point.clone(), reader) }) @@ -355,14 +419,15 @@ pub trait ChainStore: Send + Sync + Sized { /// TODO(doc): @quake fn get_cell_data(&self, out_point: &OutPoint) -> Option<(Bytes, packed::Byte32)> { - let key = out_point.to_cell_key(); + let block_number = self.get_transaction_block_number(&out_point.tx_hash())?; + let key = COLUMN_CELL_DATA::key(block_number, out_point); if let Some(cache) = self.cache() { - if let Some(cached) = cache.cell_data.lock().get(&key) { + if let Some(cached) = cache.cell_data.lock().get(key.as_ref()) { return Some(cached.clone()); } }; - let ret = self.get(COLUMN_CELL_DATA, &key).map(|slice| { + let ret = self.get(COLUMN_CELL_DATA::NAME, key.as_ref()).map(|slice| { if !slice.as_ref().is_empty() { let reader = packed::CellDataEntryReader::from_slice_should_be_ok(slice.as_ref()); let data = reader.output_data().unpack(); @@ -384,7 +449,10 @@ pub trait ChainStore: Send + Sync + Sized { if let Some(cache) = self.cache() { ret.map(|cached| { - cache.cell_data.lock().put(key, cached.clone()); + cache + .cell_data + .lock() + .put(key.as_ref().to_vec(), cached.clone()); cached }) } else { @@ -394,33 +462,39 @@ pub trait ChainStore: Send + Sync + Sized { /// TODO(doc): @quake fn get_cell_data_hash(&self, out_point: &OutPoint) -> Option { - let key = out_point.to_cell_key(); + let block_number = self.get_transaction_block_number(&out_point.tx_hash())?; + let key = COLUMN_CELL_DATA::key(block_number, out_point); if let Some(cache) = self.cache() { - if let Some(cached) = cache.cell_data_hash.lock().get(&key) { + if let Some(cached) = cache.cell_data_hash.lock().get(key.as_ref()) { return Some(cached.clone()); } }; - let ret = self.get(COLUMN_CELL_DATA_HASH, &key).map(|raw| { - if !raw.as_ref().is_empty() { - packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity() - } else { - // impl packed::CellOutput { - // pub fn calc_data_hash(data: &[u8]) -> packed::Byte32 { - // if data.is_empty() { - // packed::Byte32::zero() - // } else { - // blake2b_256(data).pack() - // } - // } - // } - packed::Byte32::zero() - } - }); + let ret = self + .get(COLUMN_CELL_DATA_HASH::NAME, key.as_ref()) + .map(|raw| { + if !raw.as_ref().is_empty() { + packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity() + } else { + // impl packed::CellOutput { + // pub fn calc_data_hash(data: &[u8]) -> packed::Byte32 { + // if data.is_empty() { + // packed::Byte32::zero() + // } else { + // blake2b_256(data).pack() + // } + // } + // } + packed::Byte32::zero() + } + }); if let Some(cache) = self.cache() { ret.map(|cached| { - cache.cell_data_hash.lock().put(key, cached.clone()); + cache + .cell_data_hash + .lock() + .put(key.as_ref().to_vec(), cached.clone()); cached }) } else { @@ -430,26 +504,26 @@ pub trait ChainStore: Send + Sync + Sized { /// Gets current epoch ext fn get_current_epoch_ext(&self) -> Option { - self.get(COLUMN_META, META_CURRENT_EPOCH_KEY) + self.get(COLUMN_META::NAME, COLUMN_META::META_CURRENT_EPOCH_KEY) .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(slice.as_ref()).unpack()) } /// Gets epoch ext by epoch index fn get_epoch_ext(&self, hash: &packed::Byte32) -> Option { - self.get(COLUMN_EPOCH, hash.as_slice()) + self.get(COLUMN_EPOCH::NAME, hash.as_slice()) .map(|slice| packed::EpochExtReader::from_slice_should_be_ok(slice.as_ref()).unpack()) } /// Gets epoch index by epoch number fn get_epoch_index(&self, number: EpochNumber) -> Option { let epoch_number: packed::Uint64 = number.pack(); - self.get(COLUMN_EPOCH, epoch_number.as_slice()) + self.get(COLUMN_EPOCH::NAME, epoch_number.as_slice()) .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } /// Gets epoch index by block hash fn get_block_epoch_index(&self, block_hash: &packed::Byte32) -> Option { - self.get(COLUMN_BLOCK_EPOCH, block_hash.as_slice()) + self.get(COLUMN_BLOCK_EPOCH::NAME, block_hash.as_slice()) .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } @@ -461,12 +535,12 @@ pub trait ChainStore: Send + Sync + Sized { /// TODO(doc): @quake fn is_uncle(&self, hash: &packed::Byte32) -> bool { - self.get(COLUMN_UNCLES, hash.as_slice()).is_some() + self.get(COLUMN_UNCLES::NAME, hash.as_slice()).is_some() } /// Gets header by uncle header hash fn get_uncle_header(&self, hash: &packed::Byte32) -> Option { - self.get(COLUMN_UNCLES, hash.as_slice()).map(|slice| { + self.get(COLUMN_UNCLES::NAME, hash.as_slice()).map(|slice| { let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); Unpack::::unpack(&reader) }) @@ -479,62 +553,79 @@ pub trait ChainStore: Send + Sync + Sized { return true; } }; - self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).is_some() + self.get(COLUMN_BLOCK_HEADER::NAME, hash.as_slice()) + .is_some() } /// Gets cellbase by block hash fn get_cellbase(&self, hash: &packed::Byte32) -> Option { - let key = packed::TransactionKey::new_builder() - .block_hash(hash.to_owned()) - .build(); - self.get(COLUMN_BLOCK_BODY, key.as_slice()).map(|slice| { - let reader = packed::TransactionViewReader::from_slice_should_be_ok(slice.as_ref()); - Unpack::::unpack(&reader) - }) + let number = self.get_block_number(hash).expect("block number"); + let num_hash = BlockNumberAndHash::new(number, hash.to_owned()); + + let prefix = COLUMN_BLOCK_BODY::key(num_hash, 0); + + self.get(COLUMN_BLOCK_BODY::NAME, prefix.as_ref()) + .map(|slice| { + let reader = packed::TransactionViewReader::from_slice_should_be_ok(slice.as_ref()); + Unpack::::unpack(&reader) + }) } /// Gets latest built filter data block hash fn get_latest_built_filter_data_block_hash(&self) -> Option { - self.get(COLUMN_META, META_LATEST_BUILT_FILTER_DATA_KEY) - .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) + self.get( + COLUMN_META::NAME, + COLUMN_META::META_LATEST_BUILT_FILTER_DATA_KEY, + ) + .map(|raw| packed::Byte32Reader::from_slice_should_be_ok(raw.as_ref()).to_entity()) } /// Gets block filter data by block hash - fn get_block_filter(&self, hash: &packed::Byte32) -> Option { - self.get(COLUMN_BLOCK_FILTER, hash.as_slice()) - .map(|slice| packed::BytesReader::from_slice_should_be_ok(slice.as_ref()).to_entity()) + fn get_block_filter(&self, num_hash: &BlockNumberAndHash) -> Option { + self.get( + COLUMN_BLOCK_FILTER::NAME, + COLUMN_BLOCK_FILTER::key(num_hash.to_owned()).as_ref(), + ) + .map(|slice| packed::BytesReader::from_slice_should_be_ok(slice.as_ref()).to_entity()) } /// Gets block filter hash by block hash - fn get_block_filter_hash(&self, hash: &packed::Byte32) -> Option { - self.get(COLUMN_BLOCK_FILTER_HASH, hash.as_slice()) - .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(slice.as_ref()).to_entity()) + fn get_block_filter_hash(&self, num_hash: BlockNumberAndHash) -> Option { + self.get( + COLUMN_BLOCK_FILTER_HASH::NAME, + COLUMN_BLOCK_FILTER_HASH::key(num_hash).as_ref(), + ) + .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(slice.as_ref()).to_entity()) } /// Gets block bytes by block hash fn get_packed_block(&self, hash: &packed::Byte32) -> Option { let header = self - .get(COLUMN_BLOCK_HEADER, hash.as_slice()) + .get(COLUMN_BLOCK_HEADER::NAME, hash.as_slice()) .map(|slice| { let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); reader.data().to_entity() })?; - let prefix = hash.as_slice(); + let number: u64 = header.raw().number().unpack(); + let num_hash = BlockNumberAndHash::new(number, hash.to_owned()); + + let prefix = COLUMN_BLOCK_BODY::prefix_key(num_hash.clone()); + let transactions: packed::TransactionVec = self .get_iter( - COLUMN_BLOCK_BODY, - IteratorMode::From(prefix, Direction::Forward), + COLUMN_BLOCK_BODY::NAME, + IteratorMode::From(prefix.as_ref(), Direction::Forward), ) - .take_while(|(key, _)| key.starts_with(prefix)) + .take_while(|(key, _)| key.starts_with(prefix.as_ref())) .map(|(_key, value)| { let reader = packed::TransactionViewReader::from_slice_should_be_ok(value.as_ref()); reader.data().to_entity() }) .pack(); - let uncles = self.get_block_uncles(hash)?; - let proposals = self.get_block_proposal_txs_ids(hash)?; + let uncles = self.get_block_uncles(num_hash.clone())?; + let proposals = self.get_block_proposal_txs_ids(num_hash)?; let extension_opt = self.get_block_extension(hash); let block = if let Some(extension) = extension_opt { @@ -560,7 +651,15 @@ pub trait ChainStore: Send + Sync + Sized { /// Gets block header bytes by block hash fn get_packed_block_header(&self, hash: &packed::Byte32) -> Option { - self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).map(|slice| { + let block_number: BlockNumber = self + .get(COLUMN_BLOCK_HEADER_NUM::NAME, hash.as_slice()) + .map(|slice| packed::Uint64Reader::from_slice_should_be_ok(&slice).unpack())?; + let num_hash = BlockNumberAndHash::new(block_number, hash.to_owned()); + self.get( + COLUMN_BLOCK_HEADER::NAME, + COLUMN_BLOCK_HEADER::key(num_hash).as_slice(), + ) + .map(|slice| { let reader = packed::HeaderViewReader::from_slice_should_be_ok(slice.as_ref()); reader.data().to_entity() }) @@ -568,12 +667,14 @@ pub trait ChainStore: Send + Sync + Sized { /// Gets a header digest. fn get_header_digest(&self, position_u64: u64) -> Option { - let position: packed::Uint64 = position_u64.pack(); - self.get(COLUMN_CHAIN_ROOT_MMR, position.as_slice()) - .map(|slice| { - let reader = packed::HeaderDigestReader::from_slice_should_be_ok(slice.as_ref()); - reader.to_entity() - }) + self.get( + COLUMN_CHAIN_ROOT_MMR::NAME, + COLUMN_CHAIN_ROOT_MMR::key(position_u64).as_slice(), + ) + .map(|slice| { + let reader = packed::HeaderDigestReader::from_slice_should_be_ok(slice.as_ref()); + reader.to_entity() + }) } /// Gets ancestor block header by a base block hash and number diff --git a/store/src/tests/db.rs b/store/src/tests/db.rs index 39e1d24d616..c4fcd3e4df3 100644 --- a/store/src/tests/db.rs +++ b/store/src/tests/db.rs @@ -63,8 +63,9 @@ fn save_and_get_block_ext() { }; let hash = block.hash(); + let number = block.number(); let txn = store.begin_transaction(); - txn.insert_block_ext(&hash, &ext).unwrap(); + txn.insert_block_ext(number, &hash, &ext).unwrap(); txn.commit().unwrap(); assert_eq!(ext, store.get_block_ext(&hash).unwrap()); } @@ -109,7 +110,7 @@ fn freeze_blockv0() { let txn = store.begin_transaction(); txn.insert_raw( - COLUMN_BLOCK_HEADER, + COLUMN_BLOCK_HEADER::NAME, block_hash.as_slice(), header.pack().as_slice(), ) @@ -145,7 +146,7 @@ fn freeze_blockv1_with_extension() { let txn = store.begin_transaction(); txn.insert_raw( - COLUMN_BLOCK_HEADER, + COLUMN_BLOCK_HEADER::NAME, block_hash.as_slice(), header.pack().as_slice(), ) diff --git a/store/src/transaction.rs b/store/src/transaction.rs index 62ba110b0f8..d3c6dc5c21c 100644 --- a/store/src/transaction.rs +++ b/store/src/transaction.rs @@ -7,15 +7,15 @@ use ckb_db::{ }; use ckb_db_schema::{ Col, COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_EXTENSION, - COLUMN_BLOCK_FILTER, COLUMN_BLOCK_FILTER_HASH, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, - COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_CELL_DATA_HASH, - COLUMN_CHAIN_ROOT_MMR, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, COLUMN_NUMBER_HASH, - COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY, - META_LATEST_BUILT_FILTER_DATA_KEY, META_TIP_HEADER_KEY, + COLUMN_BLOCK_FILTER, COLUMN_BLOCK_FILTER_HASH, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_HEADER_NUM, + COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, + COLUMN_CELL_DATA_HASH, COLUMN_CHAIN_ROOT_MMR, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, + COLUMN_NUMBER_HASH, COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, }; use ckb_error::Error; use ckb_freezer::Freezer; use ckb_merkle_mountain_range::{Error as MMRError, MMRStore, Result as MMRResult}; +use ckb_types::core::BlockNumber; use ckb_types::{ core::{ cell::{CellChecker, CellProvider, CellStatus}, @@ -24,6 +24,7 @@ use ckb_types::{ packed::{self, Byte32, OutPoint}, prelude::*, utilities::calc_filter_hash, + BlockNumberAndHash, }; use std::sync::Arc; @@ -44,6 +45,7 @@ impl ChainStore for StoreTransaction { } fn get(&self, col: Col, key: &[u8]) -> Option> { + // println!("get col={:?} key={}", col, hex(key)); self.inner .get_pinned(col, key) .expect("db operation should be ok") @@ -117,6 +119,7 @@ impl<'a> ChainStore for StoreTransactionSnapshot<'a> { } fn get(&self, col: Col, key: &[u8]) -> Option { + // println!("get col={:?} key={}", col, hex(key)); self.inner .get_pinned(col, key) .expect("db operation should be ok") @@ -132,6 +135,12 @@ impl<'a> ChainStore for StoreTransactionSnapshot<'a> { impl StoreTransaction { /// TODO(doc): @quake pub fn insert_raw(&self, col: Col, key: &[u8], value: &[u8]) -> Result<(), Error> { + // println!( + // "insert_raw col={:?} key={} value={}", + // col, + // hex(key), + // hex(value) + // ); self.inner.put(col, key, value) } @@ -160,53 +169,71 @@ impl StoreTransaction { snapshot: &StoreTransactionSnapshot<'_>, ) -> Option { self.inner - .get_for_update(COLUMN_META, META_TIP_HEADER_KEY, &snapshot.inner) + .get_for_update( + COLUMN_META::NAME, + COLUMN_META::META_TIP_HEADER_KEY, + &snapshot.inner, + ) .expect("db operation should be ok") .map(|slice| packed::Byte32Reader::from_slice_should_be_ok(slice.as_ref()).to_entity()) } /// TODO(doc): @quake pub fn insert_tip_header(&self, h: &HeaderView) -> Result<(), Error> { - self.insert_raw(COLUMN_META, META_TIP_HEADER_KEY, h.hash().as_slice()) + self.insert_raw( + COLUMN_META::NAME, + COLUMN_META::META_TIP_HEADER_KEY, + h.hash().as_slice(), + ) } /// TODO(doc): @quake pub fn insert_block(&self, block: &BlockView) -> Result<(), Error> { let hash = block.hash(); let header = block.header().pack(); + let number = block.number(); + let num_hash = BlockNumberAndHash::new(number, hash.clone()); let uncles = block.uncles().pack(); let proposals = block.data().proposals(); let txs_len: packed::Uint32 = (block.transactions().len() as u32).pack(); - self.insert_raw(COLUMN_BLOCK_HEADER, hash.as_slice(), header.as_slice())?; - self.insert_raw(COLUMN_BLOCK_UNCLE, hash.as_slice(), uncles.as_slice())?; + let block_number: packed::Uint64 = number.pack(); + self.insert_raw( + COLUMN_BLOCK_HEADER_NUM::NAME, + hash.as_slice(), + block_number.as_slice(), + )?; + self.insert_raw( + COLUMN_BLOCK_HEADER::NAME, + COLUMN_BLOCK_HEADER::key(num_hash.clone()).as_slice(), + header.as_slice(), + )?; + + self.insert_raw( + COLUMN_BLOCK_UNCLE::NAME, + COLUMN_BLOCK_UNCLE::key(num_hash.clone()).as_ref(), + uncles.as_slice(), + )?; if let Some(extension) = block.extension() { self.insert_raw( - COLUMN_BLOCK_EXTENSION, + COLUMN_BLOCK_EXTENSION::NAME, hash.as_slice(), extension.as_slice(), )?; } self.insert_raw( - COLUMN_NUMBER_HASH, - packed::NumberHash::new_builder() - .number(block.number().pack()) - .block_hash(hash.clone()) - .build() - .as_slice(), + COLUMN_NUMBER_HASH::NAME, + COLUMN_NUMBER_HASH::key(num_hash.clone()).as_ref(), txs_len.as_slice(), )?; self.insert_raw( - COLUMN_BLOCK_PROPOSAL_IDS, - hash.as_slice(), + COLUMN_BLOCK_PROPOSAL_IDS::NAME, + COLUMN_BLOCK_PROPOSAL_IDS::key(num_hash.clone()).as_ref(), proposals.as_slice(), )?; for (index, tx) in block.transactions().into_iter().enumerate() { - let key = packed::TransactionKey::new_builder() - .block_hash(hash.clone()) - .index(index.pack()) - .build(); + let key = COLUMN_BLOCK_BODY::key(num_hash.clone(), index); let tx_data = tx.pack(); - self.insert_raw(COLUMN_BLOCK_BODY, key.as_slice(), tx_data.as_slice())?; + self.insert_raw(COLUMN_BLOCK_BODY::NAME, key.as_ref(), tx_data.as_slice())?; } Ok(()) } @@ -214,13 +241,24 @@ impl StoreTransaction { /// TODO(doc): @quake pub fn delete_block(&self, block: &BlockView) -> Result<(), Error> { let hash = block.hash(); + let number = block.number(); + let num_hash = BlockNumberAndHash::new(number, hash.clone()); let txs_len = block.transactions().len(); - self.delete(COLUMN_BLOCK_HEADER, hash.as_slice())?; - self.delete(COLUMN_BLOCK_UNCLE, hash.as_slice())?; - self.delete(COLUMN_BLOCK_EXTENSION, hash.as_slice())?; - self.delete(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice())?; self.delete( - COLUMN_NUMBER_HASH, + COLUMN_BLOCK_HEADER::NAME, + COLUMN_BLOCK_HEADER::key(num_hash.clone()).as_slice(), + )?; + self.delete( + COLUMN_BLOCK_UNCLE::NAME, + COLUMN_BLOCK_UNCLE::key(num_hash.clone()).as_ref(), + )?; + self.delete(COLUMN_BLOCK_EXTENSION::NAME, hash.as_slice())?; + self.delete( + COLUMN_BLOCK_PROPOSAL_IDS::NAME, + COLUMN_BLOCK_PROPOSAL_IDS::key(num_hash.clone()).as_ref(), + )?; + self.delete( + COLUMN_NUMBER_HASH::NAME, packed::NumberHash::new_builder() .number(block.number().pack()) .block_hash(hash.clone()) @@ -230,11 +268,8 @@ impl StoreTransaction { // currently rocksdb transaction do not support `DeleteRange` // https://github.com/facebook/rocksdb/issues/4812 for index in 0..txs_len { - let key = packed::TransactionKey::new_builder() - .block_hash(hash.clone()) - .index(index.pack()) - .build(); - self.delete(COLUMN_BLOCK_BODY, key.as_slice())?; + let key = COLUMN_BLOCK_BODY::key(num_hash.clone(), index); + self.delete(COLUMN_BLOCK_BODY::NAME, key.as_ref())?; } Ok(()) } @@ -242,13 +277,13 @@ impl StoreTransaction { /// TODO(doc): @quake pub fn insert_block_ext( &self, - block_hash: &packed::Byte32, + num_hash: BlockNumberAndHash, ext: &BlockExt, ) -> Result<(), Error> { let packed_ext: packed::BlockExtV1 = ext.pack(); self.insert_raw( - COLUMN_BLOCK_EXT, - block_hash.as_slice(), + COLUMN_BLOCK_EXT::NAME, + COLUMN_BLOCK_EXT::key(num_hash).as_ref(), packed_ext.as_slice(), ) } @@ -257,8 +292,10 @@ impl StoreTransaction { pub fn attach_block(&self, block: &BlockView) -> Result<(), Error> { let header = block.data().header(); let block_hash = block.hash(); + let number = block.number(); for (index, tx_hash) in block.tx_hashes().iter().enumerate() { let key = packed::TransactionKey::new_builder() + .block_number(number.pack()) .block_hash(block_hash.clone()) .index(index.pack()) .build(); @@ -267,31 +304,43 @@ impl StoreTransaction { .block_number(header.raw().number()) .block_epoch(header.raw().epoch()) .build(); - self.insert_raw(COLUMN_TRANSACTION_INFO, tx_hash.as_slice(), info.as_slice())?; + self.insert_raw( + COLUMN_TRANSACTION_INFO::NAME, + tx_hash.as_slice(), + info.as_slice(), + )?; } let block_number: packed::Uint64 = block.number().pack(); - self.insert_raw(COLUMN_INDEX, block_number.as_slice(), block_hash.as_slice())?; + self.insert_raw( + COLUMN_INDEX::NAME, + block_number.as_slice(), + block_hash.as_slice(), + )?; for uncle in block.uncles().into_iter() { self.insert_raw( - COLUMN_UNCLES, + COLUMN_UNCLES::NAME, uncle.hash().as_slice(), uncle.header().pack().as_slice(), )?; } - self.insert_raw(COLUMN_INDEX, block_hash.as_slice(), block_number.as_slice()) + self.insert_raw( + COLUMN_INDEX::NAME, + block_hash.as_slice(), + block_number.as_slice(), + ) } /// TODO(doc): @quake pub fn detach_block(&self, block: &BlockView) -> Result<(), Error> { for tx_hash in block.tx_hashes().iter() { - self.delete(COLUMN_TRANSACTION_INFO, tx_hash.as_slice())?; + self.delete(COLUMN_TRANSACTION_INFO::NAME, tx_hash.as_slice())?; } for uncle in block.uncles().into_iter() { - self.delete(COLUMN_UNCLES, uncle.hash().as_slice())?; + self.delete(COLUMN_UNCLES::NAME, uncle.hash().as_slice())?; } let block_number = block.data().header().raw().number(); - self.delete(COLUMN_INDEX, block_number.as_slice())?; - self.delete(COLUMN_INDEX, block.hash().as_slice()) + self.delete(COLUMN_INDEX::NAME, block_number.as_slice())?; + self.delete(COLUMN_INDEX::NAME, block.hash().as_slice()) } /// TODO(doc): @quake @@ -301,7 +350,7 @@ impl StoreTransaction { epoch_hash: &packed::Byte32, ) -> Result<(), Error> { self.insert_raw( - COLUMN_BLOCK_EPOCH, + COLUMN_BLOCK_EPOCH::NAME, block_hash.as_slice(), epoch_hash.as_slice(), ) @@ -309,14 +358,18 @@ impl StoreTransaction { /// TODO(doc): @quake pub fn insert_epoch_ext(&self, hash: &packed::Byte32, epoch: &EpochExt) -> Result<(), Error> { - self.insert_raw(COLUMN_EPOCH, hash.as_slice(), epoch.pack().as_slice())?; + self.insert_raw(COLUMN_EPOCH::NAME, hash.as_slice(), epoch.pack().as_slice())?; let epoch_number: packed::Uint64 = epoch.number().pack(); - self.insert_raw(COLUMN_EPOCH, epoch_number.as_slice(), hash.as_slice()) + self.insert_raw(COLUMN_EPOCH::NAME, epoch_number.as_slice(), hash.as_slice()) } /// TODO(doc): @quake pub fn insert_current_epoch_ext(&self, epoch: &EpochExt) -> Result<(), Error> { - self.insert_raw(COLUMN_META, META_CURRENT_EPOCH_KEY, epoch.pack().as_slice()) + self.insert_raw( + COLUMN_META::NAME, + COLUMN_META::META_CURRENT_EPOCH_KEY, + epoch.pack().as_slice(), + ) } /// TODO(doc): @quake @@ -331,18 +384,34 @@ impl StoreTransaction { >, ) -> Result<(), Error> { for (out_point, cell, cell_data) in cells { - let key = out_point.to_cell_key(); - self.insert_raw(COLUMN_CELL, &key, cell.as_slice())?; + let block_number: BlockNumber = cell.block_number().unpack(); + self.insert_raw( + COLUMN_CELL::NAME, + COLUMN_CELL::key(block_number, &out_point).as_ref(), + cell.as_slice(), + )?; if let Some(data) = cell_data { - self.insert_raw(COLUMN_CELL_DATA, &key, data.as_slice())?; self.insert_raw( - COLUMN_CELL_DATA_HASH, - &key, + COLUMN_CELL_DATA::NAME, + COLUMN_CELL_DATA::key(block_number, &out_point).as_ref(), + data.as_slice(), + )?; + self.insert_raw( + COLUMN_CELL_DATA_HASH::NAME, + COLUMN_CELL_DATA_HASH::key(block_number, &out_point).as_ref(), data.output_data_hash().as_slice(), )?; } else { - self.insert_raw(COLUMN_CELL_DATA, &key, &[])?; - self.insert_raw(COLUMN_CELL_DATA_HASH, &key, &[])?; + self.insert_raw( + COLUMN_CELL_DATA::NAME, + COLUMN_CELL_DATA::key(block_number, &out_point).as_ref(), + &[], + )?; + self.insert_raw( + COLUMN_CELL_DATA_HASH::NAME, + COLUMN_CELL_DATA_HASH::key(block_number, &out_point).as_ref(), + &[], + )?; } } Ok(()) @@ -351,13 +420,22 @@ impl StoreTransaction { /// TODO(doc): @quake pub fn delete_cells( &self, + block_number: BlockNumber, out_points: impl Iterator, ) -> Result<(), Error> { for out_point in out_points { - let key = out_point.to_cell_key(); - self.delete(COLUMN_CELL, &key)?; - self.delete(COLUMN_CELL_DATA, &key)?; - self.delete(COLUMN_CELL_DATA_HASH, &key)?; + self.delete( + COLUMN_CELL::NAME, + COLUMN_CELL::key(block_number, &out_point).as_ref(), + )?; + self.delete( + COLUMN_CELL_DATA::NAME, + COLUMN_CELL_DATA::key(block_number, &out_point).as_ref(), + )?; + self.delete( + COLUMN_CELL_DATA_HASH::NAME, + COLUMN_CELL_DATA_HASH::key(block_number, &out_point).as_ref(), + )?; } Ok(()) } @@ -368,42 +446,43 @@ impl StoreTransaction { position_u64: u64, header_digest: &packed::HeaderDigest, ) -> Result<(), Error> { - let position: packed::Uint64 = position_u64.pack(); self.insert_raw( - COLUMN_CHAIN_ROOT_MMR, - position.as_slice(), + COLUMN_CHAIN_ROOT_MMR::NAME, + COLUMN_CHAIN_ROOT_MMR::key(position_u64).as_slice(), header_digest.as_slice(), ) } /// Deletes a header digest. pub fn delete_header_digest(&self, position_u64: u64) -> Result<(), Error> { - let position: packed::Uint64 = position_u64.pack(); - self.delete(COLUMN_CHAIN_ROOT_MMR, position.as_slice()) + self.delete( + COLUMN_CHAIN_ROOT_MMR::NAME, + COLUMN_CHAIN_ROOT_MMR::key(position_u64).as_slice(), + ) } /// insert block filter data pub fn insert_block_filter( &self, - block_hash: &packed::Byte32, + num_hash: &BlockNumberAndHash, filter_data: &packed::Bytes, parent_block_filter_hash: &packed::Byte32, ) -> Result<(), Error> { self.insert_raw( - COLUMN_BLOCK_FILTER, - block_hash.as_slice(), + COLUMN_BLOCK_FILTER::NAME, + COLUMN_BLOCK_FILTER::key(num_hash.clone()).as_ref(), filter_data.as_slice(), )?; let current_block_filter_hash = calc_filter_hash(parent_block_filter_hash, filter_data); self.insert_raw( - COLUMN_BLOCK_FILTER_HASH, - block_hash.as_slice(), + COLUMN_BLOCK_FILTER_HASH::NAME, + COLUMN_BLOCK_FILTER_HASH::key(num_hash.clone()).as_ref(), current_block_filter_hash.as_slice(), )?; self.insert_raw( - COLUMN_META, - META_LATEST_BUILT_FILTER_DATA_KEY, - block_hash.as_slice(), + COLUMN_META::NAME, + COLUMN_META::META_LATEST_BUILT_FILTER_DATA_KEY, + num_hash.hash().as_slice(), ) } } diff --git a/store/src/write_batch.rs b/store/src/write_batch.rs index 65d74e2dc13..8ae40a300fa 100644 --- a/store/src/write_batch.rs +++ b/store/src/write_batch.rs @@ -4,7 +4,8 @@ use ckb_db_schema::{ COLUMN_BLOCK_UNCLE, COLUMN_CELL, COLUMN_CELL_DATA, COLUMN_CELL_DATA_HASH, COLUMN_NUMBER_HASH, }; use ckb_error::Error; -use ckb_types::{core::BlockNumber, packed, prelude::*}; +use ckb_types::core::BlockNumber; +use ckb_types::{packed, prelude::*, BlockNumberAndHash}; /// Wrapper of `RocksDBWriteBatch`, provides atomic batch of write operations. pub struct StoreWriteBatch { @@ -54,18 +55,34 @@ impl StoreWriteBatch { >, ) -> Result<(), Error> { for (out_point, cell, cell_data) in cells { - let key = out_point.to_cell_key(); - self.put(COLUMN_CELL, &key, cell.as_slice())?; + let block_number: BlockNumber = cell.block_number().unpack(); + self.put( + COLUMN_CELL::NAME, + COLUMN_CELL::key(block_number, &out_point).as_ref(), + cell.as_slice(), + )?; if let Some(data) = cell_data { - self.put(COLUMN_CELL_DATA, &key, data.as_slice())?; self.put( - COLUMN_CELL_DATA_HASH, - &key, + COLUMN_CELL_DATA::NAME, + COLUMN_CELL_DATA::key(block_number, &out_point).as_ref(), + data.as_slice(), + )?; + self.put( + COLUMN_CELL_DATA_HASH::NAME, + COLUMN_CELL_DATA_HASH::key(block_number, &out_point).as_ref(), data.output_data_hash().as_slice(), )?; } else { - self.put(COLUMN_CELL_DATA, &key, &[])?; - self.put(COLUMN_CELL_DATA_HASH, &key, &[])?; + self.put( + COLUMN_CELL_DATA::NAME, + COLUMN_CELL_DATA::key(block_number, &out_point).as_ref(), + &[], + )?; + self.put( + COLUMN_CELL_DATA_HASH::NAME, + COLUMN_CELL_DATA_HASH::key(block_number, &out_point).as_ref(), + &[], + )?; } } Ok(()) @@ -74,13 +91,14 @@ impl StoreWriteBatch { /// Remove cells from this write batch pub fn delete_cells( &mut self, + block_number: BlockNumber, out_points: impl Iterator, ) -> Result<(), Error> { for out_point in out_points { - let key = out_point.to_cell_key(); - self.delete(COLUMN_CELL, &key)?; - self.delete(COLUMN_CELL_DATA, &key)?; - self.delete(COLUMN_CELL_DATA_HASH, &key)?; + let key = out_point.to_cell_key(block_number); + self.delete(COLUMN_CELL::NAME, &key)?; + self.delete(COLUMN_CELL_DATA::NAME, &key)?; + self.delete(COLUMN_CELL_DATA_HASH::NAME, &key)?; } Ok(()) @@ -89,42 +107,44 @@ impl StoreWriteBatch { /// Removes the block body from database with corresponding hash, number and txs number pub fn delete_block_body( &mut self, - number: BlockNumber, - hash: &packed::Byte32, + num_hash: BlockNumberAndHash, txs_len: u32, ) -> Result<(), Error> { - self.inner.delete(COLUMN_BLOCK_UNCLE, hash.as_slice())?; - self.inner.delete(COLUMN_BLOCK_EXTENSION, hash.as_slice())?; + self.inner.delete( + COLUMN_BLOCK_UNCLE::NAME, + COLUMN_BLOCK_UNCLE::key(num_hash.clone()).as_ref(), + )?; + self.inner + .delete(COLUMN_BLOCK_EXTENSION::NAME, num_hash.hash().as_slice())?; self.inner - .delete(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice())?; + .delete(COLUMN_BLOCK_PROPOSAL_IDS::NAME, num_hash.hash().as_slice())?; self.inner.delete( - COLUMN_NUMBER_HASH, + COLUMN_NUMBER_HASH::NAME, packed::NumberHash::new_builder() - .number(number.pack()) - .block_hash(hash.clone()) + .number(num_hash.number().pack()) + .block_hash(num_hash.hash().clone()) .build() .as_slice(), )?; - let key_range = (0u32..txs_len).map(|i| { - packed::TransactionKey::new_builder() - .block_hash(hash.clone()) - .index(i.pack()) - .build() - }); + let key_range = + (0u32..txs_len).map(|i| COLUMN_BLOCK_BODY::key(num_hash.clone(), i as usize)); - self.inner.delete_range(COLUMN_BLOCK_BODY, key_range)?; + self.inner + .delete_range(COLUMN_BLOCK_BODY::NAME, key_range)?; Ok(()) } /// Removes the entire block from database with corresponding hash, number and txs number pub fn delete_block( &mut self, - number: BlockNumber, - hash: &packed::Byte32, + num_hash: BlockNumberAndHash, txs_len: u32, ) -> Result<(), Error> { - self.inner.delete(COLUMN_BLOCK_HEADER, hash.as_slice())?; - self.delete_block_body(number, hash, txs_len) + self.inner.delete( + COLUMN_BLOCK_HEADER::NAME, + COLUMN_BLOCK_HEADER::key(num_hash.clone()).as_mut_slice(), + )?; + self.delete_block_body(num_hash, txs_len) } } diff --git a/sync/src/filter/get_block_filter_check_points_process.rs b/sync/src/filter/get_block_filter_check_points_process.rs index a1945250526..a82d14cd76e 100644 --- a/sync/src/filter/get_block_filter_check_points_process.rs +++ b/sync/src/filter/get_block_filter_check_points_process.rs @@ -3,7 +3,7 @@ use crate::utils::send_message_to; use crate::{attempt, Status}; use ckb_network::{CKBProtocolContext, PeerIndex}; use ckb_types::core::BlockNumber; -use ckb_types::{packed, prelude::*}; +use ckb_types::{packed, prelude::*, BlockNumberAndHash}; use std::sync::Arc; const BATCH_SIZE: BlockNumber = 2000; @@ -42,9 +42,15 @@ impl<'a> GetBlockFilterCheckPointsProcess<'a> { for block_number in (start_number..start_number + BATCH_SIZE * CHECK_POINT_INTERVAL) .step_by(CHECK_POINT_INTERVAL as usize) { - if let Some(block_filter_hash) = active_chain - .get_block_hash(block_number) - .and_then(|block_hash| active_chain.get_block_filter_hash(&block_hash)) + if let Some(block_filter_hash) = + active_chain + .get_block_hash(block_number) + .and_then(|block_hash| { + active_chain.get_block_filter_hash(BlockNumberAndHash::new( + block_number, + block_hash, + )) + }) { block_filter_hashes.push(block_filter_hash); } else { diff --git a/sync/src/filter/get_block_filter_hashes_process.rs b/sync/src/filter/get_block_filter_hashes_process.rs index c1a3f5a6e08..5106ba210a5 100644 --- a/sync/src/filter/get_block_filter_hashes_process.rs +++ b/sync/src/filter/get_block_filter_hashes_process.rs @@ -2,7 +2,7 @@ use crate::filter::BlockFilter; use crate::utils::send_message_to; use crate::{attempt, Status}; use ckb_network::{CKBProtocolContext, PeerIndex}; -use ckb_types::{core::BlockNumber, packed, prelude::*}; +use ckb_types::{core::BlockNumber, packed, prelude::*, BlockNumberAndHash}; use std::sync::Arc; const BATCH_SIZE: BlockNumber = 2000; @@ -38,10 +38,15 @@ impl<'a> GetBlockFilterHashesProcess<'a> { if latest >= start_number { let parent_block_filter_hash = if start_number > 0 { + let block_number = start_number - 1; match active_chain - .get_block_hash(start_number - 1) - .and_then(|block_hash| active_chain.get_block_filter_hash(&block_hash)) - { + .get_block_hash(block_number) + .and_then(|block_hash| { + active_chain.get_block_filter_hash(BlockNumberAndHash::new( + block_number, + block_hash, + )) + }) { Some(parent_block_filter_hash) => parent_block_filter_hash, None => return Status::ignored(), } @@ -50,9 +55,15 @@ impl<'a> GetBlockFilterHashesProcess<'a> { }; for block_number in start_number..start_number + BATCH_SIZE { - if let Some(block_filter_hash) = active_chain - .get_block_hash(block_number) - .and_then(|block_hash| active_chain.get_block_filter_hash(&block_hash)) + if let Some(block_filter_hash) = + active_chain + .get_block_hash(block_number) + .and_then(|block_hash| { + active_chain.get_block_filter_hash(BlockNumberAndHash::new( + block_number, + block_hash, + )) + }) { block_filter_hashes.push(block_filter_hash); } else { diff --git a/sync/src/filter/get_block_filters_process.rs b/sync/src/filter/get_block_filters_process.rs index 22e527f0e5b..27bd43e556f 100644 --- a/sync/src/filter/get_block_filters_process.rs +++ b/sync/src/filter/get_block_filters_process.rs @@ -3,7 +3,7 @@ use crate::utils::send_message_to; use crate::{attempt, Status}; use ckb_network::{CKBProtocolContext, PeerIndex}; use ckb_types::core::BlockNumber; -use ckb_types::{packed, prelude::*}; +use ckb_types::{packed, prelude::*, BlockNumberAndHash}; use std::sync::Arc; const BATCH_SIZE: BlockNumber = 1000; @@ -40,7 +40,8 @@ impl<'a> GetBlockFiltersProcess<'a> { let mut filters = Vec::new(); for block_number in start_number..start_number + BATCH_SIZE { if let Some(block_hash) = active_chain.get_block_hash(block_number) { - if let Some(block_filter) = active_chain.get_block_filter(&block_hash) { + let num_hash = BlockNumberAndHash::new(block_number, block_hash.clone()); + if let Some(block_filter) = active_chain.get_block_filter(&num_hash) { block_hashes.push(block_hash); filters.push(block_filter); } else { diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index e634c5f8387..f66ec41e152 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -2027,12 +2027,12 @@ impl ActiveChain { self.snapshot().get_block_ext(h) } - pub fn get_block_filter(&self, hash: &packed::Byte32) -> Option { - self.store().get_block_filter(hash) + pub fn get_block_filter(&self, num_hash: &BlockNumberAndHash) -> Option { + self.store().get_block_filter(num_hash) } - pub fn get_block_filter_hash(&self, hash: &packed::Byte32) -> Option { - self.store().get_block_filter_hash(hash) + pub fn get_block_filter_hash(&self, num_hash: BlockNumberAndHash) -> Option { + self.store().get_block_filter_hash(num_hash) } pub fn get_latest_built_filter_block_number(&self) -> BlockNumber { diff --git a/util/app-config/src/lib.rs b/util/app-config/src/lib.rs index 02110816aa6..bf45368a536 100644 --- a/util/app-config/src/lib.rs +++ b/util/app-config/src/lib.rs @@ -32,7 +32,7 @@ use clap::ArgMatches; use std::{path::PathBuf, str::FromStr}; // 500_000 total difficulty -const MIN_CHAIN_WORK_500K: U256 = u256!("0x3314412053c82802a7"); +const MIN_CHAIN_WORK_500K: U256 = u256!("0x0"); /// A struct including all the information to start the ckb process. pub struct Setup { diff --git a/util/gen-types/schemas/extensions.mol b/util/gen-types/schemas/extensions.mol index 13169dd99f4..49be1ed8f20 100644 --- a/util/gen-types/schemas/extensions.mol +++ b/util/gen-types/schemas/extensions.mol @@ -93,6 +93,7 @@ struct EpochExt { } struct TransactionKey { + block_number: BeUint64, block_hash: Byte32, index: BeUint32, } diff --git a/util/gen-types/src/extension/shortcut.rs b/util/gen-types/src/extension/shortcut.rs index 499145ee904..aac500918c8 100644 --- a/util/gen-types/src/extension/shortcut.rs +++ b/util/gen-types/src/extension/shortcut.rs @@ -71,18 +71,19 @@ impl packed::OutPoint { /// The difference between [`Self::as_slice()`](../prelude/trait.Entity.html#tymethod.as_slice) /// and [`Self::to_cell_key()`](#method.to_cell_key) is the byteorder of the field `index`. /// - /// - Uses little endian for the field `index` in serialization. + /// - Uses little endian for the field `block_number` and `index` in serialization. /// /// Because in the real world, the little endian machines make up the majority, we can cast /// it as a number without re-order the bytes. /// - /// - Uses big endian for the field `index` to index cells in storage. + /// - Uses big endian for the field `block_number` and `index` to index cells in storage. /// /// So we can use `tx_hash` as key prefix to seek the cells from storage in the forward /// order, so as to traverse cells in the forward order too. - pub fn to_cell_key(&self) -> Vec { - let mut key = Vec::with_capacity(36); + pub fn to_cell_key(&self, block_number: BlockNumber) -> Vec { + let mut key = Vec::with_capacity(44); let index: u32 = self.index().unpack(); + key.extend_from_slice(block_number.to_be_bytes().as_ref()); key.extend_from_slice(self.tx_hash().as_slice()); key.extend_from_slice(&index.to_be_bytes()); key diff --git a/util/gen-types/src/generated/extensions.rs b/util/gen-types/src/generated/extensions.rs index c9066a2cc12..ab7f780fb20 100644 --- a/util/gen-types/src/generated/extensions.rs +++ b/util/gen-types/src/generated/extensions.rs @@ -4724,7 +4724,8 @@ impl ::core::fmt::Debug for TransactionKey { impl ::core::fmt::Display for TransactionKey { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { write!(f, "{} {{ ", Self::NAME)?; - write!(f, "{}: {}", "block_hash", self.block_hash())?; + write!(f, "{}: {}", "block_number", self.block_number())?; + write!(f, ", {}: {}", "block_hash", self.block_hash())?; write!(f, ", {}: {}", "index", self.index())?; write!(f, " }}") } @@ -4736,18 +4737,21 @@ impl ::core::default::Default for TransactionKey { } } impl TransactionKey { - const DEFAULT_VALUE: [u8; 36] = [ + const DEFAULT_VALUE: [u8; 44] = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - pub const TOTAL_SIZE: usize = 36; - pub const FIELD_SIZES: [usize; 2] = [32, 4]; - pub const FIELD_COUNT: usize = 2; + pub const TOTAL_SIZE: usize = 44; + pub const FIELD_SIZES: [usize; 3] = [8, 32, 4]; + pub const FIELD_COUNT: usize = 3; + pub fn block_number(&self) -> BeUint64 { + BeUint64::new_unchecked(self.0.slice(0..8)) + } pub fn block_hash(&self) -> Byte32 { - Byte32::new_unchecked(self.0.slice(0..32)) + Byte32::new_unchecked(self.0.slice(8..40)) } pub fn index(&self) -> BeUint32 { - BeUint32::new_unchecked(self.0.slice(32..36)) + BeUint32::new_unchecked(self.0.slice(40..44)) } pub fn as_reader<'r>(&'r self) -> TransactionKeyReader<'r> { TransactionKeyReader::new_unchecked(self.as_slice()) @@ -4776,6 +4780,7 @@ impl molecule::prelude::Entity for TransactionKey { } fn as_builder(self) -> Self::Builder { Self::new_builder() + .block_number(self.block_number()) .block_hash(self.block_hash()) .index(self.index()) } @@ -4799,20 +4804,24 @@ impl<'r> ::core::fmt::Debug for TransactionKeyReader<'r> { impl<'r> ::core::fmt::Display for TransactionKeyReader<'r> { fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { write!(f, "{} {{ ", Self::NAME)?; - write!(f, "{}: {}", "block_hash", self.block_hash())?; + write!(f, "{}: {}", "block_number", self.block_number())?; + write!(f, ", {}: {}", "block_hash", self.block_hash())?; write!(f, ", {}: {}", "index", self.index())?; write!(f, " }}") } } impl<'r> TransactionKeyReader<'r> { - pub const TOTAL_SIZE: usize = 36; - pub const FIELD_SIZES: [usize; 2] = [32, 4]; - pub const FIELD_COUNT: usize = 2; + pub const TOTAL_SIZE: usize = 44; + pub const FIELD_SIZES: [usize; 3] = [8, 32, 4]; + pub const FIELD_COUNT: usize = 3; + pub fn block_number(&self) -> BeUint64Reader<'r> { + BeUint64Reader::new_unchecked(&self.as_slice()[0..8]) + } pub fn block_hash(&self) -> Byte32Reader<'r> { - Byte32Reader::new_unchecked(&self.as_slice()[0..32]) + Byte32Reader::new_unchecked(&self.as_slice()[8..40]) } pub fn index(&self) -> BeUint32Reader<'r> { - BeUint32Reader::new_unchecked(&self.as_slice()[32..36]) + BeUint32Reader::new_unchecked(&self.as_slice()[40..44]) } } impl<'r> molecule::prelude::Reader<'r> for TransactionKeyReader<'r> { @@ -4838,13 +4847,18 @@ impl<'r> molecule::prelude::Reader<'r> for TransactionKeyReader<'r> { } #[derive(Debug, Default)] pub struct TransactionKeyBuilder { + pub(crate) block_number: BeUint64, pub(crate) block_hash: Byte32, pub(crate) index: BeUint32, } impl TransactionKeyBuilder { - pub const TOTAL_SIZE: usize = 36; - pub const FIELD_SIZES: [usize; 2] = [32, 4]; - pub const FIELD_COUNT: usize = 2; + pub const TOTAL_SIZE: usize = 44; + pub const FIELD_SIZES: [usize; 3] = [8, 32, 4]; + pub const FIELD_COUNT: usize = 3; + pub fn block_number(mut self, v: BeUint64) -> Self { + self.block_number = v; + self + } pub fn block_hash(mut self, v: Byte32) -> Self { self.block_hash = v; self @@ -4861,6 +4875,7 @@ impl molecule::prelude::Builder for TransactionKeyBuilder { Self::TOTAL_SIZE } fn write(&self, writer: &mut W) -> molecule::io::Result<()> { + writer.write_all(self.block_number.as_slice())?; writer.write_all(self.block_hash.as_slice())?; writer.write_all(self.index.as_slice())?; Ok(()) @@ -5071,12 +5086,12 @@ impl ::core::default::Default for TransactionInfo { } } impl TransactionInfo { - const DEFAULT_VALUE: [u8; 52] = [ + const DEFAULT_VALUE: [u8; 60] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; - pub const TOTAL_SIZE: usize = 52; - pub const FIELD_SIZES: [usize; 3] = [8, 8, 36]; + pub const TOTAL_SIZE: usize = 60; + pub const FIELD_SIZES: [usize; 3] = [8, 8, 44]; pub const FIELD_COUNT: usize = 3; pub fn block_number(&self) -> Uint64 { Uint64::new_unchecked(self.0.slice(0..8)) @@ -5085,7 +5100,7 @@ impl TransactionInfo { Uint64::new_unchecked(self.0.slice(8..16)) } pub fn key(&self) -> TransactionKey { - TransactionKey::new_unchecked(self.0.slice(16..52)) + TransactionKey::new_unchecked(self.0.slice(16..60)) } pub fn as_reader<'r>(&'r self) -> TransactionInfoReader<'r> { TransactionInfoReader::new_unchecked(self.as_slice()) @@ -5145,8 +5160,8 @@ impl<'r> ::core::fmt::Display for TransactionInfoReader<'r> { } } impl<'r> TransactionInfoReader<'r> { - pub const TOTAL_SIZE: usize = 52; - pub const FIELD_SIZES: [usize; 3] = [8, 8, 36]; + pub const TOTAL_SIZE: usize = 60; + pub const FIELD_SIZES: [usize; 3] = [8, 8, 44]; pub const FIELD_COUNT: usize = 3; pub fn block_number(&self) -> Uint64Reader<'r> { Uint64Reader::new_unchecked(&self.as_slice()[0..8]) @@ -5155,7 +5170,7 @@ impl<'r> TransactionInfoReader<'r> { Uint64Reader::new_unchecked(&self.as_slice()[8..16]) } pub fn key(&self) -> TransactionKeyReader<'r> { - TransactionKeyReader::new_unchecked(&self.as_slice()[16..52]) + TransactionKeyReader::new_unchecked(&self.as_slice()[16..60]) } } impl<'r> molecule::prelude::Reader<'r> for TransactionInfoReader<'r> { @@ -5186,8 +5201,8 @@ pub struct TransactionInfoBuilder { pub(crate) key: TransactionKey, } impl TransactionInfoBuilder { - pub const TOTAL_SIZE: usize = 52; - pub const FIELD_SIZES: [usize; 3] = [8, 8, 36]; + pub const TOTAL_SIZE: usize = 60; + pub const FIELD_SIZES: [usize; 3] = [8, 8, 44]; pub const FIELD_COUNT: usize = 3; pub fn block_number(mut self, v: Uint64) -> Self { self.block_number = v; diff --git a/util/indexer-sync/src/lib.rs b/util/indexer-sync/src/lib.rs index 135d516e173..992d532e664 100644 --- a/util/indexer-sync/src/lib.rs +++ b/util/indexer-sync/src/lib.rs @@ -57,13 +57,13 @@ pub trait IndexerSync { /// Construct new secondary db instance pub fn new_secondary_db(ckb_db_config: &DBConfig, config: &IndexerSyncConfig) -> SecondaryDB { let cf_names = vec![ - COLUMN_INDEX, - COLUMN_META, - COLUMN_BLOCK_HEADER, - COLUMN_BLOCK_BODY, - COLUMN_BLOCK_UNCLE, - COLUMN_BLOCK_PROPOSAL_IDS, - COLUMN_BLOCK_EXTENSION, + COLUMN_INDEX::NAME, + COLUMN_META::NAME, + COLUMN_BLOCK_HEADER::NAME, + COLUMN_BLOCK_BODY::NAME, + COLUMN_BLOCK_UNCLE::NAME, + COLUMN_BLOCK_PROPOSAL_IDS::NAME, + COLUMN_BLOCK_EXTENSION::NAME, ]; let secondary_opts = indexer_secondary_options(config); SecondaryDB::open_cf( diff --git a/util/light-client-protocol-server/src/components/get_blocks_proof.rs b/util/light-client-protocol-server/src/components/get_blocks_proof.rs index 991852309c4..e73ce9c5cf5 100644 --- a/util/light-client-protocol-server/src/components/get_blocks_proof.rs +++ b/util/light-client-protocol-server/src/components/get_blocks_proof.rs @@ -85,7 +85,7 @@ impl<'a> GetBlocksProofProcess<'a> { block_headers.push(header.data()); if ckb2023 { let uncles = snapshot - .get_block_uncles(&block_hash) + .get_block_uncles(header.num_hash()) .expect("block uncles must be stored"); let extension = snapshot.get_block_extension(&block_hash); diff --git a/util/light-client-protocol-server/src/components/get_transactions_proof.rs b/util/light-client-protocol-server/src/components/get_transactions_proof.rs index 1dd5700b538..f326f19a309 100644 --- a/util/light-client-protocol-server/src/components/get_transactions_proof.rs +++ b/util/light-client-protocol-server/src/components/get_transactions_proof.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use ckb_merkle_mountain_range::leaf_index_to_pos; use ckb_network::{CKBProtocolContext, PeerIndex}; use ckb_store::ChainStore; -use ckb_types::{packed, prelude::*, utilities::CBMT}; +use ckb_types::{packed, prelude::*, utilities::CBMT, BlockNumberAndHash}; use crate::{constant, LightClientProtocol, Status, StatusCode}; @@ -116,8 +116,9 @@ impl<'a> GetTransactionsProofProcess<'a> { positions.push(leaf_index_to_pos(block.number())); filtered_blocks.push(filtered_block); if ckb2023 { + let num_hash = BlockNumberAndHash::new(block.number(), block_hash.clone()); let uncles = snapshot - .get_block_uncles(&block_hash) + .get_block_uncles(num_hash) .expect("block uncles must be stored"); let extension = snapshot.get_block_extension(&block_hash); diff --git a/util/migrate/src/migrate.rs b/util/migrate/src/migrate.rs index 6446d4b37d9..ddf0554d036 100644 --- a/util/migrate/src/migrate.rs +++ b/util/migrate/src/migrate.rs @@ -42,7 +42,7 @@ impl Migrate { /// Open read only db pub fn open_read_only_db(&self) -> Result, Error> { // open cf meta column for empty check - ReadOnlyDB::open_cf(&self.path, vec![COLUMN_META]) + ReadOnlyDB::open_cf(&self.path, vec![COLUMN_META::NAME]) } /// Check if database's version is matched with the executable binary version. diff --git a/util/migrate/src/migrations/add_block_filter_hash.rs b/util/migrate/src/migrations/add_block_filter_hash.rs index ec73d3506a7..ca6d0610afa 100644 --- a/util/migrate/src/migrations/add_block_filter_hash.rs +++ b/util/migrate/src/migrations/add_block_filter_hash.rs @@ -6,6 +6,7 @@ use ckb_error::Error; use ckb_hash::blake2b_256; use ckb_store::{ChainDB, ChainStore}; use ckb_types::prelude::Entity; +use ckb_types::BlockNumberAndHash; use std::sync::Arc; pub struct AddBlockFilterHash; @@ -40,12 +41,12 @@ impl Migration for AddBlockFilterHash { let pb = ::std::sync::Arc::clone(&pb); let pbi = pb(latest_built_filter_data_block_number + 1); pbi.set_style( - ProgressStyle::default_bar() - .template( - "{prefix:.bold.dim} {spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta}) {msg}", - ) - .progress_chars("#>-"), - ); + ProgressStyle::default_bar() + .template( + "{prefix:.bold.dim} {spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta}) {msg}", + ) + .progress_chars("#>-"), + ); pbi.set_position(0); pbi.enable_steady_tick(5000); @@ -58,8 +59,9 @@ impl Migration for AddBlockFilterHash { break; } let block_hash = chain_db.get_block_hash(block_number).expect("index stored"); + let num_hash = BlockNumberAndHash::new(block_number, block_hash.clone()); let filter_data = chain_db - .get_block_filter(&block_hash) + .get_block_filter(&num_hash) .expect("filter data stored"); parent_block_filter_hash = blake2b_256( [ @@ -70,8 +72,8 @@ impl Migration for AddBlockFilterHash { ); db_txn .put( - COLUMN_BLOCK_FILTER_HASH, - block_hash.as_slice(), + COLUMN_BLOCK_FILTER_HASH::NAME, + COLUMN_BLOCK_FILTER_HASH::key(num_hash).as_ref(), parent_block_filter_hash.as_slice(), ) .expect("db transaction put should be ok"); diff --git a/util/migrate/src/migrations/add_extra_data_hash.rs b/util/migrate/src/migrations/add_extra_data_hash.rs index afb711424c8..0c8590bb09c 100644 --- a/util/migrate/src/migrations/add_extra_data_hash.rs +++ b/util/migrate/src/migrations/add_extra_data_hash.rs @@ -41,14 +41,18 @@ impl Migration for AddExtraDataHash { } else { &[] }; - wb.put(COLUMN_CELL_DATA_HASH, key, data_hash)?; + wb.put(COLUMN_CELL_DATA_HASH::NAME, key, data_hash)?; Ok(()) }; let mode = self.mode(&next_key); - let (_count, nk) = - db.traverse(COLUMN_CELL_DATA, &mut cell_data_migration, mode, LIMIT)?; + let (_count, nk) = db.traverse( + COLUMN_CELL_DATA::NAME, + &mut cell_data_migration, + mode, + LIMIT, + )?; next_key = nk; if !wb.is_empty() { diff --git a/util/migrate/src/migrations/add_number_hash_mapping.rs b/util/migrate/src/migrations/add_number_hash_mapping.rs index cf07ead605f..65e4bba0411 100644 --- a/util/migrate/src/migrations/add_number_hash_mapping.rs +++ b/util/migrate/src/migrations/add_number_hash_mapping.rs @@ -21,9 +21,9 @@ impl Migration for AddNumberHashMapping { { for number in i * chunk_size..end { let block_number: packed::Uint64 = number.pack(); - let raw_hash = chain_db.get(COLUMN_INDEX, block_number.as_slice()).expect("DB data integrity"); + let raw_hash = chain_db.get(COLUMN_INDEX::NAME, block_number.as_slice()).expect("DB data integrity"); let txs_len = chain_db.get_iter( - COLUMN_BLOCK_BODY, + COLUMN_BLOCK_BODY::NAME, IteratorMode::From(&raw_hash, Direction::Forward), ) .take_while(|(key, _)| key.starts_with(&raw_hash)) @@ -37,7 +37,7 @@ impl Migration for AddNumberHashMapping { let key = packed::NumberHash::new_unchecked(raw_key.into()); wb.put( - COLUMN_NUMBER_HASH, + COLUMN_NUMBER_HASH::NAME, key.as_slice(), raw_txs_len.as_slice(), ) diff --git a/util/migrate/src/migrations/cell.rs b/util/migrate/src/migrations/cell.rs index 7dee3fc9914..45f3eb1ec07 100644 --- a/util/migrate/src/migrations/cell.rs +++ b/util/migrate/src/migrations/cell.rs @@ -5,10 +5,12 @@ use ckb_db_schema::COLUMN_CELL; use ckb_error::Error; use ckb_migration_template::multi_thread_migration; use ckb_store::{ChainDB, ChainStore, StoreWriteBatch}; +use ckb_types::core::BlockNumber; use ckb_types::{ core::{BlockView, TransactionView}, packed, prelude::*, + BlockNumberAndHash, }; use std::sync::Arc; @@ -34,7 +36,8 @@ impl Migration for CellMigration { .and_then(|hash| chain_db.get_block(&hash)).expect("DB data integrity"); if block.transactions().len() > 1 { - hashes.push(block.hash()); + let num_hash = BlockNumberAndHash::new(block.number(), block.hash()); + hashes.push(num_hash); } insert_block_cell(&mut wb, &block); @@ -55,10 +58,12 @@ impl Migration for CellMigration { pbi.set_length(size + hashes.len() as u64); - for hash in hashes { - let txs = chain_db.get_block_body(&hash); + for num_hash in hashes { - delete_consumed_cell(&mut wb, &txs); + let block_number = num_hash.number(); + let txs = chain_db.get_block_body(num_hash); + + delete_consumed_cell(&mut wb, block_number, &txs); if wb.size_in_bytes() > MAX_DELETE_BATCH_SIZE { chain_db.write(&wb).unwrap(); wb.clear().unwrap(); @@ -76,8 +81,8 @@ impl Migration for CellMigration { // https://github.com/facebook/rocksdb/issues/1295 fn clean_cell_column(db: &mut RocksDB) -> Result<(), Error> { - db.drop_cf(COLUMN_CELL)?; - db.create_cf(COLUMN_CELL)?; + db.drop_cf(COLUMN_CELL::NAME)?; + db.create_cf(COLUMN_CELL::NAME)?; Ok(()) } @@ -129,12 +134,16 @@ fn insert_block_cell(batch: &mut StoreWriteBatch, block: &BlockView) { batch.insert_cells(new_cells).unwrap(); } -fn delete_consumed_cell(batch: &mut StoreWriteBatch, transactions: &[TransactionView]) { +fn delete_consumed_cell( + batch: &mut StoreWriteBatch, + block_number: BlockNumber, + transactions: &[TransactionView], +) { // mark inputs dead // skip cellbase let deads = transactions .iter() .skip(1) .flat_map(|tx| tx.input_pts_iter()); - batch.delete_cells(deads).unwrap(); + batch.delete_cells(block_number, deads).unwrap(); } diff --git a/util/migrate/src/migrations/set_2019_block_cycle_zero.rs b/util/migrate/src/migrations/set_2019_block_cycle_zero.rs index e45f6fd2535..5cdeea82ccd 100644 --- a/util/migrate/src/migrations/set_2019_block_cycle_zero.rs +++ b/util/migrate/src/migrations/set_2019_block_cycle_zero.rs @@ -49,7 +49,7 @@ impl Migration for BlockExt2019ToZero { let header = if tip_epoch_number < hard_fork_epoch_number { Some(tip_header) } else if let Some(epoch_hash) = - chain_db.get(COLUMN_EPOCH, hard_fork_epoch_number.as_slice()) + chain_db.get(COLUMN_EPOCH::NAME, hard_fork_epoch_number.as_slice()) { let epoch_ext = chain_db .get_epoch_ext( @@ -84,9 +84,10 @@ impl Migration for BlockExt2019ToZero { } for _ in 0..10000 { let hash = header.hash(); + let num_hash = header.num_hash(); let mut old_block_ext = db_txn.get_block_ext(&hash).unwrap(); old_block_ext.cycles = None; - db_txn.insert_block_ext(&hash, &old_block_ext)?; + db_txn.insert_block_ext(num_hash, &old_block_ext)?; if header.is_genesis() { break; diff --git a/util/migrate/src/migrations/table_to_struct.rs b/util/migrate/src/migrations/table_to_struct.rs index 76fef76901c..4fa8efa4c48 100644 --- a/util/migrate/src/migrations/table_to_struct.rs +++ b/util/migrate/src/migrations/table_to_struct.rs @@ -2,7 +2,6 @@ use ckb_db::{Direction, IteratorMode, Result, RocksDB}; use ckb_db_migration::{Migration, ProgressBar, ProgressStyle}; use ckb_db_schema::{ COLUMN_BLOCK_HEADER, COLUMN_EPOCH, COLUMN_META, COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, - META_CURRENT_EPOCH_KEY, }; use std::sync::Arc; @@ -28,7 +27,7 @@ impl ChangeMoleculeTableToStruct { let mut header_view_migration = |key: &[u8], value: &[u8]| -> Result<()> { // (1 total size field + 2 fields) * 4 byte per field if value.len() != HEADER_SIZE { - wb.put(COLUMN_BLOCK_HEADER, key, &value[12..])?; + wb.put(COLUMN_BLOCK_HEADER::NAME, key, &value[12..])?; } Ok(()) @@ -36,8 +35,12 @@ impl ChangeMoleculeTableToStruct { let mode = self.mode(&next_key); - let (_count, nk) = - db.traverse(COLUMN_BLOCK_HEADER, &mut header_view_migration, mode, LIMIT)?; + let (_count, nk) = db.traverse( + COLUMN_BLOCK_HEADER::NAME, + &mut header_view_migration, + mode, + LIMIT, + )?; next_key = nk; if !wb.is_empty() { @@ -57,13 +60,14 @@ impl ChangeMoleculeTableToStruct { let mut uncles_migration = |key: &[u8], value: &[u8]| -> Result<()> { // (1 total size field + 2 fields) * 4 byte per field if value.len() != HEADER_SIZE { - wb.put(COLUMN_UNCLES, key, &value[12..])?; + wb.put(COLUMN_UNCLES::NAME, key, &value[12..])?; } Ok(()) }; let mode = self.mode(&next_key); - let (_count, nk) = db.traverse(COLUMN_UNCLES, &mut uncles_migration, mode, LIMIT)?; + let (_count, nk) = + db.traverse(COLUMN_UNCLES::NAME, &mut uncles_migration, mode, LIMIT)?; next_key = nk; if !wb.is_empty() { @@ -82,15 +86,19 @@ impl ChangeMoleculeTableToStruct { let mut transaction_info_migration = |key: &[u8], value: &[u8]| -> Result<()> { // (1 total size field + 3 fields) * 4 byte per field if value.len() != TRANSACTION_INFO_SIZE { - wb.put(COLUMN_TRANSACTION_INFO, key, &value[16..])?; + wb.put(COLUMN_TRANSACTION_INFO::NAME, key, &value[16..])?; } Ok(()) }; let mode = self.mode(&next_key); - let (_count, nk) = - db.traverse(COLUMN_UNCLES, &mut transaction_info_migration, mode, LIMIT)?; + let (_count, nk) = db.traverse( + COLUMN_UNCLES::NAME, + &mut transaction_info_migration, + mode, + LIMIT, + )?; next_key = nk; if !wb.is_empty() { @@ -111,13 +119,14 @@ impl ChangeMoleculeTableToStruct { // only migrates epoch_ext if key.len() == 32 && value.len() != EPOCH_SIZE { // (1 total size field + 8 fields) * 4 byte per field - wb.put(COLUMN_EPOCH, key, &value[36..])?; + wb.put(COLUMN_EPOCH::NAME, key, &value[36..])?; } Ok(()) }; let mode = self.mode(&next_key); - let (_count, nk) = db.traverse(COLUMN_EPOCH, &mut epoch_ext_migration, mode, LIMIT)?; + let (_count, nk) = + db.traverse(COLUMN_EPOCH::NAME, &mut epoch_ext_migration, mode, LIMIT)?; next_key = nk; if !wb.is_empty() { @@ -166,9 +175,15 @@ impl Migration for ChangeMoleculeTableToStruct { pb.inc(1); let mut wb = db.new_write_batch(); - if let Some(current_epoch) = db.get_pinned(COLUMN_META, META_CURRENT_EPOCH_KEY)? { + if let Some(current_epoch) = + db.get_pinned(COLUMN_META::NAME, COLUMN_META::META_CURRENT_EPOCH_KEY)? + { if current_epoch.len() != 108 { - wb.put(COLUMN_META, META_CURRENT_EPOCH_KEY, ¤t_epoch[36..])?; + wb.put( + COLUMN_META::NAME, + COLUMN_META::META_CURRENT_EPOCH_KEY, + ¤t_epoch[36..], + )?; } } db.write(&wb)?; diff --git a/util/migrate/src/tests.rs b/util/migrate/src/tests.rs index f8335c6fc0c..ee1d51c5d91 100644 --- a/util/migrate/src/tests.rs +++ b/util/migrate/src/tests.rs @@ -5,7 +5,6 @@ use ckb_db::RocksDB; use ckb_db_schema::{ COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_HEADER, COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_EPOCH, COLUMN_INDEX, COLUMN_META, - META_CURRENT_EPOCH_KEY, META_TIP_HEADER_KEY, }; use ckb_systemtime::unix_time_as_millis; use ckb_types::{ @@ -47,29 +46,30 @@ fn test_mock_migration() { let uncles = genesis.uncles().pack(); let proposals = genesis.data().proposals(); db_txn - .put(COLUMN_INDEX, number.as_slice(), hash.as_slice()) + .put(COLUMN_INDEX::NAME, number.as_slice(), hash.as_slice()) .unwrap(); db_txn - .put(COLUMN_BLOCK_HEADER, hash.as_slice(), header.as_slice()) + .put( + COLUMN_BLOCK_HEADER::NAME, + hash.as_slice(), + header.as_slice(), + ) .unwrap(); db_txn - .put(COLUMN_BLOCK_UNCLE, hash.as_slice(), uncles.as_slice()) + .put(COLUMN_BLOCK_UNCLE::NAME, hash.as_slice(), uncles.as_slice()) .unwrap(); db_txn .put( - COLUMN_BLOCK_PROPOSAL_IDS, + COLUMN_BLOCK_PROPOSAL_IDS::NAME, hash.as_slice(), proposals.as_slice(), ) .unwrap(); for (index, tx) in genesis.transactions().into_iter().enumerate() { - let key = packed::TransactionKey::new_builder() - .block_hash(hash.clone()) - .index(index.pack()) - .build(); + let key = COLUMN_BLOCK_BODY::key(number.unpack(), hash.to_owned(), index); let tx_data = tx.pack(); db_txn - .put(COLUMN_BLOCK_BODY, key.as_slice(), tx_data.as_slice()) + .put(COLUMN_BLOCK_BODY::NAME, key.as_ref(), tx_data.as_slice()) .unwrap(); } } @@ -88,7 +88,7 @@ fn test_mock_migration() { { db_txn .put( - COLUMN_BLOCK_EPOCH, + COLUMN_BLOCK_EPOCH::NAME, genesis.header().hash().as_slice(), epoch_ext.last_block_hash_in_previous_epoch().as_slice(), ) @@ -98,7 +98,7 @@ fn test_mock_migration() { { db_txn .put( - COLUMN_EPOCH, + COLUMN_EPOCH::NAME, epoch_ext.last_block_hash_in_previous_epoch().as_slice(), epoch_ext.pack().as_slice(), ) @@ -106,7 +106,7 @@ fn test_mock_migration() { let epoch_number: packed::Uint64 = epoch_ext.number().pack(); db_txn .put( - COLUMN_EPOCH, + COLUMN_EPOCH::NAME, epoch_number.as_slice(), epoch_ext.last_block_hash_in_previous_epoch().as_slice(), ) @@ -117,8 +117,8 @@ fn test_mock_migration() { { db_txn .put( - COLUMN_META, - META_TIP_HEADER_KEY, + COLUMN_META::NAME, + COLUMN_META::META_TIP_HEADER_KEY, genesis.header().hash().as_slice(), ) .unwrap() @@ -128,8 +128,8 @@ fn test_mock_migration() { { db_txn .put( - COLUMN_BLOCK_EXT, - genesis.header().hash().as_slice(), + COLUMN_BLOCK_EXT::NAME, + COLUMN_BLOCK_EXT::key(genesis.header().num_hash()), ext.pack().as_slice(), ) .unwrap() @@ -139,8 +139,8 @@ fn test_mock_migration() { { db_txn .put( - COLUMN_META, - META_CURRENT_EPOCH_KEY, + COLUMN_META::NAME, + COLUMN_META::META_CURRENT_EPOCH_KEY, epoch_ext.pack().as_slice(), ) .unwrap() diff --git a/util/reward-calculator/src/lib.rs b/util/reward-calculator/src/lib.rs index e1975b383bd..3b0f02240ba 100644 --- a/util/reward-calculator/src/lib.rs +++ b/util/reward-calculator/src/lib.rs @@ -9,6 +9,7 @@ use ckb_types::{ core::{BlockReward, Capacity, CapacityResult, HeaderView}, packed::{Byte32, CellbaseWitness, ProposalShortId, Script}, prelude::*, + BlockNumberAndHash, }; use std::cmp; use std::collections::HashSet; @@ -88,7 +89,7 @@ impl<'a, CS: ChainStore> RewardCalculator<'a, CS> { &self .store .get_cellbase(&target.hash()) - .expect("target cellbase exist") + .expect(&format!("target cellbase {} exist", target.hash())) .witnesses() .get(0) .expect("target witness exist") @@ -172,7 +173,7 @@ impl<'a, CS: ChainStore> RewardCalculator<'a, CS> { parent: &HeaderView, target: &HeaderView, ) -> CapacityResult { - let mut target_proposals = self.get_proposal_ids_by_hash(&target.hash()); + let mut target_proposals = self.get_proposal_ids_by_hash(target.num_hash()); let proposal_window = self.consensus.tx_proposal_window(); let proposer_ratio = self.consensus.proposer_reward_ratio(); @@ -237,7 +238,12 @@ impl<'a, CS: ChainStore> RewardCalculator<'a, CS> { let previous_ids = store .get_block_hash(competing_proposal_start) - .map(|hash| self.get_proposal_ids_by_hash(&hash)) + .map(|hash| { + self.get_proposal_ids_by_hash(BlockNumberAndHash::new( + competing_proposal_start, + hash, + )) + }) .expect("finalize target exist"); proposed.extend(previous_ids); @@ -271,12 +277,12 @@ impl<'a, CS: ChainStore> RewardCalculator<'a, CS> { Ok((primary_block_reward, secondary_block_reward)) } - fn get_proposal_ids_by_hash(&self, hash: &Byte32) -> HashSet { + fn get_proposal_ids_by_hash(&self, num_hash: BlockNumberAndHash) -> HashSet { let mut ids_set = HashSet::new(); - if let Some(ids) = self.store.get_block_proposal_txs_ids(hash) { + if let Some(ids) = self.store.get_block_proposal_txs_ids(num_hash.clone()) { ids_set.extend(ids) } - if let Some(us) = self.store.get_block_uncles(hash) { + if let Some(us) = self.store.get_block_uncles(num_hash) { for u in us.data().into_iter() { ids_set.extend(u.proposals().into_iter()); } diff --git a/util/reward-calculator/src/tests.rs b/util/reward-calculator/src/tests.rs index 6ab899fbf9e..35a62ba190b 100644 --- a/util/reward-calculator/src/tests.rs +++ b/util/reward-calculator/src/tests.rs @@ -53,7 +53,7 @@ fn get_proposal_ids_by_hash() { let consensus = Consensus::default(); let reward_calculator = RewardCalculator::new(&consensus, &store); - let ids = reward_calculator.get_proposal_ids_by_hash(&block.hash()); + let ids = reward_calculator.get_proposal_ids_by_hash(block.number(), &block.hash()); assert_eq!(ids, expected); } diff --git a/util/types/src/core/extras.rs b/util/types/src/core/extras.rs index d42eb66ce65..e27ae89cc8a 100644 --- a/util/types/src/core/extras.rs +++ b/util/types/src/core/extras.rs @@ -48,6 +48,7 @@ impl TransactionInfo { /// TODO(doc): @quake pub fn key(&self) -> packed::TransactionKey { packed::TransactionKey::new_builder() + .block_number(self.block_number.pack()) .block_hash(self.block_hash.clone()) .index(self.index.pack()) .build() diff --git a/util/types/src/core/views.rs b/util/types/src/core/views.rs index 6e7a4730877..35b69f21a3e 100644 --- a/util/types/src/core/views.rs +++ b/util/types/src/core/views.rs @@ -11,7 +11,7 @@ use crate::{ packed, prelude::*, utilities::merkle_root, - U256, + BlockNumberAndHash, U256, }; /* @@ -498,6 +498,15 @@ impl HeaderView { self.hash = hash; self } + + /// Get Header's BlockNumberAndHash + pub fn num_hash(&self) -> BlockNumberAndHash { + BlockNumberAndHash::new(self.number(), self.hash()) + } + /// Get Header's Parent BlockNumberAndHash + pub fn parent_num_hash(&self) -> BlockNumberAndHash { + BlockNumberAndHash::new(self.number().saturating_sub(1), self.parent_hash()) + } } impl UncleBlockView { diff --git a/verification/contextual/src/contextual_block_verifier.rs b/verification/contextual/src/contextual_block_verifier.rs index af0c32a8734..006c1834656 100644 --- a/verification/contextual/src/contextual_block_verifier.rs +++ b/verification/contextual/src/contextual_block_verifier.rs @@ -170,10 +170,14 @@ impl<'a, CS: ChainStore + VersionbitsIndexer> TwoPhaseCommitVerifier<'a, CS> { break; } - if let Some(ids) = self.context.store.get_block_proposal_txs_ids(&block_hash) { + if let Some(ids) = self + .context + .store + .get_block_proposal_txs_ids(header.num_hash()) + { proposal_txs_ids.extend(ids); } - if let Some(uncles) = self.context.store.get_block_uncles(&block_hash) { + if let Some(uncles) = self.context.store.get_block_uncles(header.num_hash()) { uncles .data() .into_iter()