diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 8695d655b3..79a86531c8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -40,6 +40,7 @@ jobs: test-name: - tests::neon_integrations::miner_submit_twice - tests::neon_integrations::microblock_integration_test + - tests::neon_integrations::microblock_fork_poison_integration_test - tests::neon_integrations::size_check_integration_test - tests::neon_integrations::cost_voting_integration - tests::integrations::integration_test_get_info @@ -59,7 +60,6 @@ jobs: - tests::neon_integrations::antientropy_integration_test - tests::neon_integrations::filter_low_fee_tx_integration_test - tests::neon_integrations::filter_long_runtime_tx_integration_test - - tests::neon_integrations::mining_transactions_is_fair - tests::neon_integrations::microblock_large_tx_integration_test_FLAKY - tests::neon_integrations::block_large_tx_integration_test - tests::neon_integrations::microblock_limit_hit_integration_test diff --git a/CHANGELOG.md b/CHANGELOG.md index 008320906b..1222ff4f16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,31 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [2.05.0.5.0] + +### Changed + +- The act of walking the mempool will now cache address nonces in RAM and to a + temporary mempool table used for the purpose, instead of unconditionally +querying them from the chainstate MARF. This builds upon improvements to mempool +goodput over 2.05.0.4.0 (#3337). +- The node and miner implementation has been refactored to remove write-lock + contention that can arise when the node's chains-coordinator thread attempts to store and +process newly-discovered (or newly-mined) blocks, and when the node's relayer +thread attempts to mine a new block. In addition, the miner logic has been +moved to a separate thread in order to avoid starving the relayer thread (which +must handle block and transaction propagation, as well as block-processing). +The refactored miner thread will be preemptively terminated and restarted +by the arrival of new Stacks blocks or burnchain blocks, which further +prevents the miner from holding open write-locks in the underlying +chainstate databases when there is new chain data to discover (which would +invalidate the miner's work anyway). (#3335). + +### Fixed + +- Fixed `pow` documentation in Clarity (#3338). +- Backported unit tests that were omitted in the 2.05.0.3.0 release (#3348). + ## [2.05.0.4.0] ### Fixed diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 260048af4e..31fa05907f 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -89,7 +89,7 @@ use crate::chainstate::stacks::index::{ClarityMarfTrieId, MARFValue}; use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::chainstate::TrieHash; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksBlockId, VRFSeed, }; const BLOCK_HEIGHT_MAX: u64 = ((1 as u64) << 63) - 1; @@ -1434,7 +1434,7 @@ impl<'a> SortitionHandleTx<'a> { )?; } else { // see if this block builds off of a Stacks block mined on this burnchain fork - let height_opt = match SortitionDB::get_accepted_stacks_block_pointer( + let parent_height_opt = match SortitionDB::get_accepted_stacks_block_pointer( self, &burn_tip.consensus_hash, parent_stacks_block_hash, @@ -1452,10 +1452,11 @@ impl<'a> SortitionHandleTx<'a> { } } }; - match height_opt { - Some(height) => { + match parent_height_opt { + Some(parent_height) => { if stacks_block_height > burn_tip.canonical_stacks_tip_height { - assert!(stacks_block_height > height, "BUG: DB corruption -- block height {} <= {} means we accepted a block out-of-order", stacks_block_height, height); + assert!(stacks_block_height > parent_height, "BUG: DB corruption -- block height {} <= {} means we accepted a block out-of-order", stacks_block_height, parent_height); + // This block builds off of a parent that is _concurrent_ with the memoized canonical stacks chain pointer. // i.e. this block will reorg the Stacks chain on the canonical burnchain fork. // Memoize this new stacks chain tip to the canonical burn chain snapshot. @@ -1463,7 +1464,7 @@ impl<'a> SortitionHandleTx<'a> { // are guaranteed by the Stacks chain state code that Stacks blocks in a given // Stacks fork will be marked as accepted in sequential order (i.e. at height h, h+1, // h+2, etc., without any gaps). - debug!("Accepted Stacks block {}/{} builds on a previous canonical Stacks tip on this burnchain fork ({})", consensus_hash, stacks_block_hash, &burn_tip.burn_header_hash); + debug!("Accepted Stacks block {}/{} ({}) builds on a previous canonical Stacks tip on this burnchain fork ({})", consensus_hash, stacks_block_hash, stacks_block_height, &burn_tip.burn_header_hash); let args: &[&dyn ToSql] = &[ consensus_hash, stacks_block_hash, @@ -1477,7 +1478,7 @@ impl<'a> SortitionHandleTx<'a> { // This block was mined on this fork, but it's acceptance doesn't overtake // the current stacks chain tip. Remember it so that we can process its children, // which might do so later. - debug!("Accepted Stacks block {}/{} builds on a non-canonical Stacks tip in this burnchain fork ({})", consensus_hash, stacks_block_hash, &burn_tip.burn_header_hash); + debug!("Accepted Stacks block {}/{} ({}) builds on a non-canonical Stacks tip in this burnchain fork ({} height {})", consensus_hash, stacks_block_hash, stacks_block_height, &burn_tip.burn_header_hash, burn_tip.canonical_stacks_tip_height); } SortitionDB::insert_accepted_stacks_block_pointer( self, @@ -2475,8 +2476,8 @@ impl SortitionDB { pub fn is_db_version_supported_in_epoch(epoch: StacksEpochId, version: &str) -> bool { match epoch { StacksEpochId::Epoch10 => false, - StacksEpochId::Epoch20 => (version == "1" || version == "2" || version == "3"), - StacksEpochId::Epoch2_05 => (version == "2" || version == "3" || version == "4"), + StacksEpochId::Epoch20 => version == "1" || version == "2" || version == "3", + StacksEpochId::Epoch2_05 => version == "2" || version == "3" || version == "4", } } diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index aa9eae266b..d1c16d6e8e 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -19,6 +19,8 @@ use std::convert::{TryFrom, TryInto}; use std::fs; use std::path::PathBuf; use std::sync::mpsc::SyncSender; +use std::sync::Arc; +use std::sync::Mutex; use std::time::Duration; use crate::burnchains::{ @@ -39,6 +41,7 @@ use crate::chainstate::stacks::{ StacksHeaderInfo, }, events::{StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin}, + miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}, Error as ChainstateError, StacksBlock, TransactionPayload, }; use crate::core::StacksEpoch; @@ -272,6 +275,7 @@ impl<'a, T: BlockEventDispatcher, CE: CostEstimator + ?Sized, FE: FeeEstimator + atlas_config: AtlasConfig, cost_estimator: Option<&mut CE>, fee_estimator: Option<&mut FE>, + miner_status: Arc>, ) where T: BlockEventDispatcher, { @@ -311,18 +315,23 @@ impl<'a, T: BlockEventDispatcher, CE: CostEstimator + ?Sized, FE: FeeEstimator + // timeout so that we handle Ctrl-C a little gracefully match comms.wait_on() { CoordinatorEvents::NEW_STACKS_BLOCK => { + signal_mining_blocked(miner_status.clone()); debug!("Received new stacks block notice"); if let Err(e) = inst.handle_new_stacks_block() { warn!("Error processing new stacks block: {:?}", e); } + signal_mining_ready(miner_status.clone()); } CoordinatorEvents::NEW_BURN_BLOCK => { + signal_mining_blocked(miner_status.clone()); debug!("Received new burn block notice"); if let Err(e) = inst.handle_new_burnchain_block() { warn!("Error processing new burn block: {:?}", e); } + signal_mining_ready(miner_status.clone()); } CoordinatorEvents::STOP => { + signal_mining_blocked(miner_status.clone()); debug!("Received stop notice"); return; } diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index c36e9bf514..544e2da4ce 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -1501,7 +1501,7 @@ impl StacksChainState { } } - test_debug!( + debug!( "Loaded microblock {}/{}-{} (parent={}, expect_seq={})", &parent_consensus_hash, &parent_anchored_block_hash, @@ -1533,6 +1533,16 @@ impl StacksChainState { } } ret.reverse(); + + if ret.len() > 0 { + // should start with 0 + if ret[0].header.sequence != 0 { + warn!("Invalid microblock stream from {}/{} to {}: sequence does not start with 0, but with {}", + parent_consensus_hash, parent_anchored_block_hash, tip_microblock_hash, ret[0].header.sequence); + + return Ok(None); + } + } Ok(Some(ret)) } @@ -1617,10 +1627,11 @@ impl StacksChainState { return Ok(None); } - let mut ret = vec![]; + let mut ret: Vec = vec![]; let mut tip: Option = None; let mut fork_poison = None; let mut expected_sequence = start_seq; + let mut parents: HashMap = HashMap::new(); // load associated staging microblock data, but best-effort. // Stop loading once we find a fork juncture. @@ -1657,6 +1668,22 @@ impl StacksChainState { break; } + if let Some(idx) = parents.get(&mblock.header.prev_block) { + let conflict = ret[*idx].clone(); + warn!( + "Microblock fork found: microblocks {} and {} share parent {}", + mblock.block_hash(), + conflict.block_hash(), + &mblock.header.prev_block + ); + fork_poison = Some(TransactionPayload::PoisonMicroblock( + mblock.header, + conflict.header, + )); + ret.pop(); // last microblock pushed (i.e. the tip) conflicts with mblock + break; + } + // expect forks, so expected_sequence may not always increase expected_sequence = cmp::min(mblock.header.sequence, expected_sequence).saturating_add(1); @@ -1677,6 +1704,10 @@ impl StacksChainState { } tip = Some(mblock.clone()); + + let prev_block = mblock.header.prev_block.clone(); + parents.insert(prev_block, ret.len()); + ret.push(mblock); } if fork_poison.is_none() && ret.len() == 0 { @@ -3453,6 +3484,20 @@ impl StacksChainState { Ok(count - to_write) } + /// Check whether or not there exists a Stacks block at or higher than a given height that is + /// unprocessed. This is used by miners to determine whether or not the block-commit they're + /// about to send is about to be invalidated + pub fn has_higher_unprocessed_blocks(conn: &DBConn, height: u64) -> Result { + let sql = + "SELECT 1 FROM staging_blocks WHERE orphaned = 0 AND processed = 0 AND height >= ?1"; + let args: &[&dyn ToSql] = &[&u64_to_sql(height)?]; + let res = conn + .query_row(sql, args, |_r| Ok(())) + .optional() + .map(|x| x.is_some())?; + Ok(res) + } + fn extract_signed_microblocks( parent_anchored_block_header: &StacksBlockHeader, microblocks: &Vec, @@ -3793,6 +3838,49 @@ impl StacksChainState { Ok(Some((block_commit.burn_fee, sortition_burns))) } + /// Do we already have an anchored block? + pub fn has_anchored_block( + conn: &DBConn, + blocks_path: &str, + consensus_hash: &ConsensusHash, + block: &StacksBlock, + ) -> Result { + let index_block_hash = + StacksBlockHeader::make_index_block_hash(consensus_hash, &block.block_hash()); + if StacksChainState::has_stored_block( + &conn, + blocks_path, + consensus_hash, + &block.block_hash(), + )? { + debug!( + "Block already stored and processed: {}/{} ({})", + consensus_hash, + &block.block_hash(), + &index_block_hash + ); + return Ok(true); + } else if StacksChainState::has_staging_block(conn, consensus_hash, &block.block_hash())? { + debug!( + "Block already stored (but not processed): {}/{} ({})", + consensus_hash, + &block.block_hash(), + &index_block_hash + ); + return Ok(true); + } else if StacksChainState::has_block_indexed(&blocks_path, &index_block_hash)? { + debug!( + "Block already stored to chunk store: {}/{} ({})", + consensus_hash, + &block.block_hash(), + &index_block_hash + ); + return Ok(true); + } + + Ok(false) + } + /// Pre-process and store an anchored block to staging, queuing it up for /// subsequent processing once all of its ancestors have been processed. /// @@ -3828,43 +3916,21 @@ impl StacksChainState { let mainnet = self.mainnet; let chain_id = self.chain_id; let blocks_path = self.blocks_path.clone(); - let mut block_tx = self.db_tx_begin()?; - // already in queue or already processed? - let index_block_hash = - StacksBlockHeader::make_index_block_hash(consensus_hash, &block.block_hash()); - if StacksChainState::has_stored_block( - &block_tx, - &blocks_path, + // optimistic check (before opening a tx): already in queue or already processed? + if StacksChainState::has_anchored_block( + self.db(), + &self.blocks_path, consensus_hash, - &block.block_hash(), - )? { - debug!( - "Block already stored and processed: {}/{} ({})", - consensus_hash, - &block.block_hash(), - &index_block_hash - ); - return Ok(false); - } else if StacksChainState::has_staging_block( - &block_tx, - consensus_hash, - &block.block_hash(), + block, )? { - debug!( - "Block already stored (but not processed): {}/{} ({})", - consensus_hash, - &block.block_hash(), - &index_block_hash - ); return Ok(false); - } else if StacksChainState::has_block_indexed(&blocks_path, &index_block_hash)? { - debug!( - "Block already stored to chunk store: {}/{} ({})", - consensus_hash, - &block.block_hash(), - &index_block_hash - ); + } + + let mut block_tx = self.db_tx_begin()?; + + // already in queue or already processed (within the tx; things might have changed) + if StacksChainState::has_anchored_block(&block_tx, &blocks_path, consensus_hash, block)? { return Ok(false); } diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index c8ab50d545..81efdc9291 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -192,7 +192,7 @@ impl DBConfig { pub fn supports_epoch(&self, epoch_id: StacksEpochId) -> bool { match epoch_id { StacksEpochId::Epoch10 => false, - StacksEpochId::Epoch20 => (self.version == "1" || self.version == "2"), + StacksEpochId::Epoch20 => self.version == "1" || self.version == "2", StacksEpochId::Epoch2_05 => self.version == "2", } } diff --git a/src/chainstate/stacks/db/unconfirmed.rs b/src/chainstate/stacks/db/unconfirmed.rs index b973152d1f..1c1efe7993 100644 --- a/src/chainstate/stacks/db/unconfirmed.rs +++ b/src/chainstate/stacks/db/unconfirmed.rs @@ -24,6 +24,7 @@ use crate::chainstate::stacks::db::accounts::*; use crate::chainstate::stacks::db::blocks::*; use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::events::*; +use crate::chainstate::stacks::index::marf::MARFOpenOpts; use crate::chainstate::stacks::Error; use crate::chainstate::stacks::*; use crate::clarity_vm::clarity::{ClarityInstance, Error as clarity_error}; @@ -84,6 +85,10 @@ pub struct UnconfirmedState { num_mblocks_added: u64, have_state: bool, + mainnet: bool, + clarity_state_index_root: String, + marf_opts: Option, + // fault injection for testing pub disable_cost_check: bool, pub disable_bytes_check: bool, @@ -120,11 +125,51 @@ impl UnconfirmedState { num_mblocks_added: 0, have_state: false, + mainnet: chainstate.mainnet, + clarity_state_index_root: chainstate.clarity_state_index_root.clone(), + marf_opts: chainstate.marf_opts.clone(), + disable_cost_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_COST_CHECK), disable_bytes_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK), }) } + /// Make a read-only copy of this unconfirmed state. The resulting unconfiremd state cannot + /// be refreshed, but it will represent a snapshot of the existing unconfirmed state. + pub fn make_readonly_owned(&self) -> Result { + let marf = MarfedKV::open_unconfirmed( + &self.clarity_state_index_root, + None, + self.marf_opts.clone(), + )?; + + let clarity_instance = ClarityInstance::new(self.mainnet, marf); + + Ok(UnconfirmedState { + confirmed_chain_tip: self.confirmed_chain_tip.clone(), + unconfirmed_chain_tip: self.unconfirmed_chain_tip.clone(), + clarity_inst: clarity_instance, + mined_txs: self.mined_txs.clone(), + cost_so_far: self.cost_so_far.clone(), + bytes_so_far: self.bytes_so_far, + + last_mblock: self.last_mblock.clone(), + last_mblock_seq: self.last_mblock_seq, + + readonly: true, + dirty: false, + num_mblocks_added: self.num_mblocks_added, + have_state: self.have_state, + + mainnet: self.mainnet, + clarity_state_index_root: self.clarity_state_index_root.clone(), + marf_opts: self.marf_opts.clone(), + + disable_cost_check: self.disable_cost_check, + disable_bytes_check: self.disable_bytes_check, + }) + } + /// Make a new unconfirmed state, but don't do anything with it yet, and deny refreshes. fn new_readonly( chainstate: &StacksChainState, @@ -157,6 +202,10 @@ impl UnconfirmedState { num_mblocks_added: 0, have_state: false, + mainnet: chainstate.mainnet, + clarity_state_index_root: chainstate.clarity_state_index_root.clone(), + marf_opts: chainstate.marf_opts.clone(), + disable_cost_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_COST_CHECK), disable_bytes_check: check_fault_injection(FAULT_DISABLE_MICROBLOCKS_BYTES_CHECK), }) diff --git a/src/chainstate/stacks/index/storage.rs b/src/chainstate/stacks/index/storage.rs index c38ca09ada..0b8bf37444 100644 --- a/src/chainstate/stacks/index/storage.rs +++ b/src/chainstate/stacks/index/storage.rs @@ -1827,7 +1827,7 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { let size_hint = match self.data.uncommitted_writes { Some((_, ref trie_storage)) => 2 * trie_storage.size_hint(), - None => (1024), // don't try to guess _byte_ allocation here. + None => 1024, // don't try to guess _byte_ allocation here. }; let trie_buf = TrieRAM::new(bhh, size_hint, &self.data.cur_block); @@ -1869,7 +1869,7 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // new trie let size_hint = match self.data.uncommitted_writes { Some((_, ref trie_storage)) => 2 * trie_storage.size_hint(), - None => (1024), // don't try to guess _byte_ allocation here. + None => 1024, // don't try to guess _byte_ allocation here. }; ( diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index 1a3f2da999..302b33d31c 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -14,11 +14,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::cmp; use std::collections::HashMap; use std::collections::HashSet; use std::convert::From; use std::fs; use std::mem; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::sync::Mutex; +use std::thread::ThreadId; use crate::burnchains::PrivateKey; use crate::burnchains::PublicKey; @@ -68,10 +74,72 @@ use clarity::vm::clarity::TransactionConnection; use clarity::vm::errors::Error as InterpreterError; use clarity::vm::types::TypeSignature; +/// System status for mining. +/// The miner can be Ready, in which case a miner is allowed to run +/// The miner can be Blocked, in which case the miner *should not start* and/or *should terminate* +/// if running. +/// The inner u64 is a per-thread ID that lets threads querying the miner status identify whether +/// or not they or another thread were the last to modify the state. +#[derive(Debug, Clone, PartialEq)] +pub struct MinerStatus { + blockers: HashSet, +} + +impl MinerStatus { + pub fn make_ready() -> MinerStatus { + MinerStatus { + blockers: HashSet::new(), + } + } + + pub fn add_blocked(&mut self) { + self.blockers.insert(std::thread::current().id()); + } + + pub fn remove_blocked(&mut self) { + self.blockers.remove(&std::thread::current().id()); + } + + pub fn is_blocked(&self) -> bool { + self.blockers.len() > 0 + } +} + +impl std::fmt::Display for MinerStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", &self) + } +} + +/// halt mining +pub fn signal_mining_blocked(miner_status: Arc>) { + match miner_status.lock() { + Ok(mut status) => { + status.add_blocked(); + } + Err(_e) => { + panic!("FATAL: mutex poisoned"); + } + } +} + +/// resume mining if we blocked it earlier +pub fn signal_mining_ready(miner_status: Arc>) { + match miner_status.lock() { + Ok(mut status) => { + status.remove_blocked(); + } + Err(_e) => { + panic!("FATAL: mutex poisoned"); + } + } +} + #[derive(Debug, Clone)] pub struct BlockBuilderSettings { pub max_miner_time_ms: u64, pub mempool_settings: MemPoolWalkSettings, + pub miner_status: Arc>, } impl BlockBuilderSettings { @@ -79,6 +147,7 @@ impl BlockBuilderSettings { BlockBuilderSettings { max_miner_time_ms: u64::max_value(), mempool_settings: MemPoolWalkSettings::default(), + miner_status: Arc::new(Mutex::new(MinerStatus::make_ready())), } } @@ -86,6 +155,7 @@ impl BlockBuilderSettings { BlockBuilderSettings { max_miner_time_ms: u64::max_value(), mempool_settings: MemPoolWalkSettings::zero(), + miner_status: Arc::new(Mutex::new(MinerStatus::make_ready())), } } } @@ -682,7 +752,10 @@ impl<'a> StacksMicroblockBuilder<'a> { }; if ast_rules != ASTRules::Typical { - next_microblock_header.version = STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE; + next_microblock_header.version = cmp::max( + STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, + next_microblock_header.version, + ); } next_microblock_header.sign(miner_key).unwrap(); @@ -1021,6 +1094,8 @@ impl<'a> StacksMicroblockBuilder<'a> { "Microblock transaction selection begins (child of {}), bytes so far: {}", &self.anchor_block, bytes_so_far ); + let mut blocked = false; + let result = { let mut intermediate_result; loop { @@ -1042,6 +1117,12 @@ impl<'a> StacksMicroblockBuilder<'a> { return Ok(None); } + blocked = (*self.settings.miner_status.lock().expect("FATAL: mutex poisoned")).is_blocked(); + if blocked { + debug!("Microblock miner stopping due to preemption"); + return Ok(None); + } + if considered.contains(&mempool_tx.tx.txid()) { return Ok(Some(TransactionResult::skipped( &mempool_tx.tx, "Transaction already considered.".to_string()).convert_to_event())); @@ -1154,8 +1235,9 @@ impl<'a> StacksMicroblockBuilder<'a> { } intermediate_result }; + debug!( - "Microblock transaction selection finished (child of {}); {} transactions selected", + "Miner: Microblock transaction selection finished (child of {}); {} transactions selected", &self.anchor_block, num_selected ); @@ -1178,6 +1260,14 @@ impl<'a> StacksMicroblockBuilder<'a> { event_dispatcher.mempool_txs_dropped(invalidated_txs, MemPoolDropReason::TOO_EXPENSIVE); event_dispatcher.mempool_txs_dropped(to_drop_and_blacklist, MemPoolDropReason::PROBLEMATIC); + if blocked { + debug!( + "Miner: Microblock transaction selection aborted (child of {}); {} transactions selected", + &self.anchor_block, num_selected + ); + return Err(Error::MinerAborted); + } + match result { Ok(_) => {} Err(e) => { @@ -1794,7 +1884,6 @@ impl StacksBlockBuilder { chainstate: &mut StacksChainState, parent_consensus_hash: &ConsensusHash, parent_header_hash: &BlockHeaderHash, - parent_index_hash: &StacksBlockId, ) -> Result, Error> { if let Some(microblock_parent_hash) = self.parent_microblock_hash.as_ref() { // load up a microblock fork @@ -1806,9 +1895,20 @@ impl StacksBlockBuilder { )? .ok_or(Error::NoSuchBlockError)?; + debug!( + "Loaded {} microblocks made by {}/{} tipped at {}", + microblocks.len(), + &parent_consensus_hash, + &parent_header_hash, + µblock_parent_hash + ); Ok(microblocks) } else { // apply all known parent microblocks before beginning our tenure + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &self.parent_consensus_hash, + &self.parent_header_hash, + ); let (parent_microblocks, _) = match StacksChainState::load_descendant_staging_microblock_stream_with_poison( &chainstate.db(), @@ -1819,6 +1919,13 @@ impl StacksBlockBuilder { Some(x) => x, None => (vec![], None), }; + + debug!( + "Loaded {} microblocks made by {}/{}", + parent_microblocks.len(), + &parent_consensus_hash, + &parent_header_hash + ); Ok(parent_microblocks) } } @@ -1873,7 +1980,6 @@ impl StacksBlockBuilder { chainstate, &self.parent_consensus_hash.clone(), &self.parent_header_hash.clone(), - &parent_index_hash, ) { Ok(x) => x, Err(e) => { @@ -1908,6 +2014,7 @@ impl StacksBlockBuilder { let mainnet = chainstate.config().mainnet; + // data won't be committed, so do a concurrent transaction let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin()?; let ast_rules = @@ -2184,7 +2291,10 @@ impl StacksBlockBuilder { let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; let ast_rules = miner_epoch_info.ast_rules; if ast_rules != ASTRules::Typical { - builder.header.version = STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE; + builder.header.version = cmp::max( + STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, + builder.header.version, + ); } let (mut epoch_tx, confirmed_mblock_cost) = @@ -2215,6 +2325,7 @@ impl StacksBlockBuilder { let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; let deadline = ts_start + (max_miner_time_ms as u128); let mut num_txs = 0; + let mut blocked = false; debug!( "Anchored block transaction selection begins (child of {})", @@ -2230,6 +2341,14 @@ impl StacksBlockBuilder { tip_height, mempool_settings.clone(), |epoch_tx, to_consider, estimator| { + // first, have we been preempted? + blocked = (*settings.miner_status.lock().expect("FATAL: mutex poisoned")) + .is_blocked(); + if blocked { + debug!("Miner stopping due to preemption"); + return Ok(None); + } + let txinfo = &to_consider.tx; let update_estimator = to_consider.update_estimate; @@ -2404,6 +2523,14 @@ impl StacksBlockBuilder { } } + if blocked { + debug!( + "Miner: Anchored block transaction selection aborted (child of {})", + &parent_stacks_header.anchored_header.block_hash() + ); + return Err(Error::MinerAborted); + } + // the prior do_rebuild logic wasn't necessary // a transaction that caused a budget exception is rolled back in process_transaction diff --git a/src/chainstate/stacks/mod.rs b/src/chainstate/stacks/mod.rs index 291edd9306..629098a80d 100644 --- a/src/chainstate/stacks/mod.rs +++ b/src/chainstate/stacks/mod.rs @@ -79,9 +79,8 @@ pub use stacks_common::address::{ C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; -pub const STACKS_BLOCK_VERSION: u8 = 0; +pub const STACKS_BLOCK_VERSION: u8 = 2; pub const STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE: u8 = 1; -pub const STACKS_MICROBLOCK_VERSION: u8 = 0; pub const MAX_BLOCK_LEN: u32 = 2 * 1024 * 1024; pub const MAX_TRANSACTION_LEN: u32 = MAX_BLOCK_LEN; @@ -120,6 +119,8 @@ pub enum Error { PoxInsufficientBalance, PoxNoRewardCycle, ProblematicTransaction(Txid), + MinerAborted, + ChannelClosed(String), } impl From for Error { @@ -195,6 +196,8 @@ impl fmt::Display for Error { "Transaction {} is problematic and will not be mined again", txid ), + Error::MinerAborted => write!(f, "Mining attempt aborted by signal"), + Error::ChannelClosed(ref s) => write!(f, "Channel '{}' closed", s), } } } @@ -229,6 +232,8 @@ impl error::Error for Error { Error::PoxNoRewardCycle => None, Error::StacksTransactionSkipped(ref _r) => None, Error::ProblematicTransaction(ref _txid) => None, + Error::MinerAborted => None, + Error::ChannelClosed(ref _s) => None, } } } @@ -263,6 +268,8 @@ impl Error { Error::PoxNoRewardCycle => "PoxNoRewardCycle", Error::StacksTransactionSkipped(ref _r) => "StacksTransactionSkipped", Error::ProblematicTransaction(ref _txid) => "ProblematicTransaction", + Error::MinerAborted => "MinerAborted", + Error::ChannelClosed(ref _s) => "ChannelClosed", } } diff --git a/src/core/mempool.rs b/src/core/mempool.rs index 456e16db9d..ab5ccbf9f4 100644 --- a/src/core/mempool.rs +++ b/src/core/mempool.rs @@ -797,7 +797,21 @@ impl NonceCache { } } - fn get(&mut self, address: &StacksAddress, clarity_tx: &mut C, mempool_db: &DBConn) -> u64 + /// Get a nonce from the cache. + /// First, the RAM cache will be checked for this address. + /// If absent, then the `nonces` table will be queried for this address. + /// If absent, then the MARF will be queried for this address. + /// + /// If not in RAM, the nonce will be opportunistically stored to the `nonces` table. If that + /// fails due to lock contention, then the method will return `true` for its second tuple argument. + /// + /// Returns (nonce, should-try-store-again?) + fn get( + &mut self, + address: &StacksAddress, + clarity_tx: &mut C, + mempool_db: &DBConn, + ) -> (u64, bool) where C: ClarityConnection, { @@ -806,7 +820,7 @@ impl NonceCache { // Check in-memory cache match self.cache.get(address) { - Some(nonce) => *nonce, + Some(nonce) => (*nonce, false), None => { // Check sqlite cache let opt_nonce = match db_get_nonce(mempool_db, address) { @@ -822,33 +836,42 @@ impl NonceCache { if self.cache.len() < self.max_cache_size { self.cache.insert(address.clone(), nonce); } - nonce + (nonce, false) } None => { let nonce = StacksChainState::get_nonce(clarity_tx, &address.clone().into()); - match db_set_nonce(mempool_db, address, nonce) { - Ok(_) => (), - Err(e) => warn!("error caching nonce to sqlite: {}", e), - } + let should_store_again = match db_set_nonce(mempool_db, address, nonce) { + Ok(_) => false, + Err(e) => { + warn!("error caching nonce to sqlite: {}", e); + true + } + }; if self.cache.len() < self.max_cache_size { self.cache.insert(address.clone(), nonce); } - nonce + (nonce, should_store_again) } } } } } - fn update(&mut self, address: StacksAddress, value: u64, mempool_db: &DBConn) { + /// Store the (address, nonce) pair to the `nonces` table. + /// If storage fails, return false. + /// Otherwise return true. + fn update(&mut self, address: StacksAddress, value: u64, mempool_db: &DBConn) -> bool { // Sqlite cache - match db_set_nonce(mempool_db, &address, value) { - Ok(_) => (), - Err(e) => warn!("error caching nonce to sqlite: {}", e), - } + let success = match db_set_nonce(mempool_db, &address, value) { + Ok(_) => true, + Err(e) => { + warn!("error caching nonce to sqlite: {}", e); + false + } + }; // In-memory cache match self.cache.get_mut(&address) { @@ -857,6 +880,8 @@ impl NonceCache { } None => (), } + + success } } @@ -876,6 +901,22 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d query_row(conn, sql, rusqlite::params![&addr_str]) } +#[cfg(test)] +pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { + let sql = "SELECT * FROM nonces"; + let mut stmt = conn.prepare(&sql).map_err(|e| db_error::SqliteError(e))?; + let mut iter = stmt + .query(NO_PARAMS) + .map_err(|e| db_error::SqliteError(e))?; + let mut ret = vec![]; + while let Ok(Some(row)) = iter.next() { + let addr = StacksAddress::from_column(row, "address")?; + let nonce = u64::from_column(row, "nonce")?; + ret.push((addr, nonce)); + } + Ok(ret) +} + /// Cache potential candidate transactions for subsequent iterations. /// While walking the mempool, transactions that have nonces that are too high /// to process yet (but could be processed in the future) are added to `next`. @@ -1260,6 +1301,25 @@ impl MemPoolDB { Ok(updated) } + /// Helper method to record nonces to a retry-buffer. + /// This is needed for when we try to write-through a new (address, nonce) pair to the on-disk + /// `nonces` cache, but the write fails due to lock contention from another thread. The + /// retry-buffer will be used to later store this data in a single transaction. + fn save_nonce_for_retry( + retry_store: &mut HashMap, + max_size: u64, + addr: StacksAddress, + new_nonce: u64, + ) { + if (retry_store.len() as u64) < max_size { + if let Some(nonce) = retry_store.get_mut(&addr) { + *nonce = cmp::max(new_nonce, *nonce); + } else { + retry_store.insert(addr, new_nonce); + } + } + } + /// Iterate over candidates in the mempool /// `todo` will be called once for each transaction that is a valid /// candidate for inclusion in the next block, meaning its origin and @@ -1312,16 +1372,20 @@ impl MemPoolDB { let mut candidate_cache = CandidateCache::new(settings.candidate_retry_cache_size); let mut nonce_cache = NonceCache::new(settings.nonce_cache_size); + // set of (address, nonce) to store after the inner loop completes. This will be done in a + // single transaction. This cannot grow to more than `settings.nonce_cache_size` entries. + let mut retry_store = HashMap::new(); + let sql = " SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate FROM mempool WHERE fee_rate IS NULL "; - let mut query_stmt = self + let mut query_stmt_null = self .db .prepare(&sql) .map_err(|err| Error::SqliteError(err))?; - let mut null_iterator = query_stmt + let mut null_iterator = query_stmt_null .query(NO_PARAMS) .map_err(|err| Error::SqliteError(err))?; @@ -1331,11 +1395,11 @@ impl MemPoolDB { WHERE fee_rate IS NOT NULL ORDER BY fee_rate DESC "; - let mut query_stmt = self + let mut query_stmt_fee = self .db .prepare(&sql) .map_err(|err| Error::SqliteError(err))?; - let mut fee_iterator = query_stmt + let mut fee_iterator = query_stmt_fee .query(NO_PARAMS) .map_err(|err| Error::SqliteError(err))?; @@ -1392,10 +1456,30 @@ impl MemPoolDB { }; // Check the nonces. - let expected_origin_nonce = + let (expected_origin_nonce, retry_store_origin_nonce) = nonce_cache.get(&candidate.origin_address, clarity_tx, self.conn()); - let expected_sponsor_nonce = + let (expected_sponsor_nonce, retry_store_sponsor_nonce) = nonce_cache.get(&candidate.sponsor_address, clarity_tx, self.conn()); + + // Try storing these nonces later if we failed to do so here, e.g. due to some other + // thread holding the write-lock on the mempool DB. + if retry_store_origin_nonce { + Self::save_nonce_for_retry( + &mut retry_store, + settings.nonce_cache_size, + candidate.origin_address.clone(), + expected_origin_nonce, + ); + } + if retry_store_sponsor_nonce { + Self::save_nonce_for_retry( + &mut retry_store, + settings.nonce_cache_size, + candidate.sponsor_address.clone(), + expected_sponsor_nonce, + ); + } + match order_nonces( candidate.origin_nonce, expected_origin_nonce, @@ -1461,17 +1545,34 @@ impl MemPoolDB { match tx_event { TransactionEvent::Success(_) => { // Bump nonces in the cache for the executed transaction - nonce_cache.update( + let stored = nonce_cache.update( consider.tx.metadata.origin_address, expected_origin_nonce + 1, self.conn(), ); + if !stored { + Self::save_nonce_for_retry( + &mut retry_store, + settings.nonce_cache_size, + consider.tx.metadata.origin_address, + expected_origin_nonce + 1, + ); + } + if consider.tx.tx.auth.is_sponsored() { - nonce_cache.update( + let stored = nonce_cache.update( consider.tx.metadata.sponsor_address, expected_sponsor_nonce + 1, self.conn(), ); + if !stored { + Self::save_nonce_for_retry( + &mut retry_store, + settings.nonce_cache_size, + consider.tx.metadata.sponsor_address, + expected_sponsor_nonce + 1, + ); + } } output_events.push(tx_event); } @@ -1497,6 +1598,22 @@ impl MemPoolDB { candidate_cache.reset(); } + // drop these rusqlite statements and queries, since their existence as immutable borrows on the + // connection prevents us from beginning a transaction below (which requires a mutable + // borrow). + drop(null_iterator); + drop(fee_iterator); + drop(query_stmt_null); + drop(query_stmt_fee); + + if retry_store.len() > 0 { + let tx = self.tx_begin()?; + for (address, nonce) in retry_store.into_iter() { + nonce_cache.update(address, nonce, &tx); + } + tx.commit()?; + } + debug!( "Mempool iteration finished"; "considered_txs" => total_considered, @@ -1987,6 +2104,34 @@ impl MemPoolDB { Ok(()) } + /// Miner-driven submit (e.g. for poison microblocks), where no checks are performed + pub fn miner_submit( + &mut self, + chainstate: &mut StacksChainState, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + tx: &StacksTransaction, + event_observer: Option<&dyn MemPoolEventDispatcher>, + miner_estimate: f64, + ) -> Result<(), MemPoolRejection> { + let mut mempool_tx = self.tx_begin().map_err(MemPoolRejection::DBError)?; + + let fee_estimate = Some(miner_estimate); + + MemPoolDB::tx_submit( + &mut mempool_tx, + chainstate, + consensus_hash, + block_hash, + tx, + false, + event_observer, + fee_estimate, + )?; + mempool_tx.commit().map_err(MemPoolRejection::DBError)?; + Ok(()) + } + /// Directly submit to the mempool, and don't do any admissions checks. /// This method is only used during testing, but because it is used by the /// integration tests, it cannot be marked #[cfg(test)]. diff --git a/src/core/tests/mod.rs b/src/core/tests/mod.rs index 8e565202cf..7a11bf483c 100644 --- a/src/core/tests/mod.rs +++ b/src/core/tests/mod.rs @@ -15,6 +15,7 @@ // along with this program. If not, see . use std::cmp; +use std::collections::HashMap; use std::collections::HashSet; use std::io; @@ -39,6 +40,7 @@ use crate::chainstate::stacks::{ use crate::chainstate::stacks::{ C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; +use crate::core::mempool::db_get_all_nonces; use crate::core::mempool::MemPoolWalkSettings; use crate::core::mempool::TxTag; use crate::core::mempool::{BLOOM_COUNTER_DEPTH, BLOOM_COUNTER_ERROR_RATE, MAX_BLOOM_COUNTER_TXS}; @@ -63,6 +65,7 @@ use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::TrieHash; use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::util::sleep_ms; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use stacks_common::util::{hash::hex_bytes, hash::to_hex, hash::*, log, secp256k1::*}; @@ -791,7 +794,7 @@ fn test_iterate_candidates_consider_no_estimate_tx_prob() { #[test] /// This test verifies that when a transaction is skipped, other transactions -/// from the same address with higher nonces are not included in a block. +/// from the same address with higher nonces are not considered for inclusion in a block. fn test_iterate_candidates_skipped_transaction() { let mut chainstate = instantiate_chainstate_with_balances( false, @@ -908,7 +911,7 @@ fn test_iterate_candidates_skipped_transaction() { #[test] /// This test verifies that when a transaction reports a processing error, other transactions -/// from the same address with higher nonces are not included in a block. +/// from the same address with higher nonces are not considered for inclusion in a block. fn test_iterate_candidates_processing_error_transaction() { let mut chainstate = instantiate_chainstate_with_balances( false, @@ -1027,7 +1030,7 @@ fn test_iterate_candidates_processing_error_transaction() { #[test] /// This test verifies that when a transaction is skipped, other transactions -/// from the same address with higher nonces are not included in a block. +/// from the same address with higher nonces are not considered for inclusion in a block. fn test_iterate_candidates_problematic_transaction() { let mut chainstate = instantiate_chainstate_with_balances( false, @@ -1144,6 +1147,168 @@ fn test_iterate_candidates_problematic_transaction() { ); } +#[test] +/// This test verifies that all transactions are visited, and nonce cache on disk updated, even if +/// there's a concurrent write-lock on the mempool DB. +fn test_iterate_candidates_concurrent_write_lock() { + let mut chainstate = instantiate_chainstate_with_balances( + false, + 0x80000000, + "test_iterate_candidates_concurrent_write_lock", + vec![], + ); + let chainstate_path = chainstate_path("test_iterate_candidates_concurrent_write_lock"); + let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + let b_1 = make_block( + &mut chainstate, + ConsensusHash([0x1; 20]), + &( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ), + 1, + 1, + ); + let b_2 = make_block(&mut chainstate, ConsensusHash([0x2; 20]), &b_1, 2, 2); + + let mut mempool_settings = MemPoolWalkSettings::default(); + mempool_settings.min_tx_fee = 10; + let mut tx_events = Vec::new(); + + let mut txs = codec_all_transactions( + &TransactionVersion::Testnet, + 0x80000000, + &TransactionAnchorMode::Any, + &TransactionPostConditionMode::Allow, + ); + + let mut expected_addr_nonces = HashMap::new(); + + // Load 24 transactions into the mempool, alternating whether or not they have a fee-rate. + for nonce in 0..24 { + let mut tx = txs.pop().unwrap(); + let mut mempool_tx = mempool.tx_begin().unwrap(); + + let origin_address = tx.origin_address(); + let origin_nonce = tx.get_origin_nonce(); + let sponsor_address = tx.sponsor_address().unwrap_or(origin_address); + let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); + + if let Some(nonce) = expected_addr_nonces.get_mut(&origin_address) { + *nonce = cmp::max(*nonce, origin_nonce); + } else { + expected_addr_nonces.insert(origin_address.clone(), origin_nonce); + } + + if let Some(nonce) = expected_addr_nonces.get_mut(&sponsor_address) { + *nonce = cmp::max(*nonce, sponsor_nonce); + } else { + expected_addr_nonces.insert(sponsor_address.clone(), sponsor_nonce); + } + + tx.set_tx_fee(100); + let txid = tx.txid(); + let tx_bytes = tx.serialize_to_vec(); + let tx_fee = tx.get_tx_fee(); + let height = 100; + + MemPoolDB::try_add_tx( + &mut mempool_tx, + &mut chainstate, + &b_1.0, + &b_1.1, + txid, + tx_bytes, + tx_fee, + height, + &origin_address, + nonce, + &sponsor_address, + nonce, + None, + ) + .unwrap(); + + if nonce & 1 == 0 { + mempool_tx + .execute( + "UPDATE mempool SET fee_rate = ? WHERE txid = ?", + rusqlite::params![Some(123.0), &txid], + ) + .unwrap(); + } else { + let none: Option = None; + mempool_tx + .execute( + "UPDATE mempool SET fee_rate = ? WHERE txid = ?", + rusqlite::params![none, &txid], + ) + .unwrap(); + } + + mempool_tx.commit().unwrap(); + } + assert!(expected_addr_nonces.len() > 0); + + let all_addr_nonces = db_get_all_nonces(mempool.conn()).unwrap(); + assert_eq!(all_addr_nonces.len(), 0); + + // start a thread that holds a write-lock on the mempool + let write_thread = std::thread::spawn(move || { + let mut thread_mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); + let mempool_tx = thread_mempool.tx_begin().unwrap(); + sleep_ms(10_000); + }); + + sleep_ms(1_000); + + // 50% chance of considering a transaction with unknown fee estimate + mempool_settings.consider_no_estimate_tx_prob = 50; + chainstate.with_read_only_clarity_tx( + &TEST_BURN_STATE_DB, + &StacksBlockHeader::make_index_block_hash(&b_2.0, &b_2.1), + |clarity_conn| { + let mut count_txs = 0; + mempool + .iterate_candidates::<_, ChainstateError, _>( + clarity_conn, + &mut tx_events, + 2, + mempool_settings.clone(), + |_, available_tx, _| { + count_txs += 1; + Ok(Some( + // Generate any success result + TransactionResult::success( + &available_tx.tx.tx, + available_tx.tx.metadata.tx_fee, + StacksTransactionReceipt::from_stx_transfer( + available_tx.tx.tx.clone(), + vec![], + Value::okay(Value::Bool(true)).unwrap(), + ExecutionCost::zero(), + ), + ) + .convert_to_event(), + )) + }, + ) + .unwrap(); + assert_eq!(count_txs, 24, "Mempool should find all 24 transactions"); + }, + ); + + write_thread.join().unwrap(); + + let all_addr_nonces = db_get_all_nonces(mempool.conn()).unwrap(); + assert_eq!(all_addr_nonces.len(), expected_addr_nonces.len()); + + for (addr, nonce) in all_addr_nonces { + assert!(expected_addr_nonces.get(&addr).is_some()); + assert_eq!(nonce, 24); + } +} + #[test] fn mempool_do_not_replace_tx() { let mut chainstate = instantiate_chainstate_with_balances( diff --git a/src/net/mod.rs b/src/net/mod.rs index e9b1e5d099..24df9a6aba 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -1901,6 +1901,7 @@ pub struct NetworkResult { pub num_state_machine_passes: u64, pub num_inv_sync_passes: u64, pub num_download_passes: u64, + pub burn_height: u64, } impl NetworkResult { @@ -1908,6 +1909,7 @@ impl NetworkResult { num_state_machine_passes: u64, num_inv_sync_passes: u64, num_download_passes: u64, + burn_height: u64, ) -> NetworkResult { NetworkResult { unhandled_messages: HashMap::new(), @@ -1925,6 +1927,7 @@ impl NetworkResult { num_state_machine_passes: num_state_machine_passes, num_inv_sync_passes: num_inv_sync_passes, num_download_passes: num_download_passes, + burn_height, } } diff --git a/src/net/p2p.rs b/src/net/p2p.rs index 9bd5873891..c35c91e5eb 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -5282,12 +5282,6 @@ impl PeerNetwork { .remove(&self.http_network_handle) .expect("BUG: no poll state for http network handle"); - let mut network_result = NetworkResult::new( - self.num_state_machine_passes, - self.num_inv_sync_passes, - self.num_downloader_passes, - ); - // update local-peer state self.refresh_local_peer() .expect("FATAL: failed to read local peer from the peer DB"); @@ -5297,6 +5291,13 @@ impl PeerNetwork { .refresh_burnchain_view(sortdb, chainstate, ibd) .expect("FATAL: failed to refresh burnchain view"); + let mut network_result = NetworkResult::new( + self.num_state_machine_passes, + self.num_inv_sync_passes, + self.num_downloader_passes, + self.chain_view.burn_block_height, + ); + network_result.consume_unsolicited(unsolicited_buffered_messages); // update PoX view, before handling any HTTP connections diff --git a/src/net/relay.rs b/src/net/relay.rs index 927052a6ab..82f7e25842 100644 --- a/src/net/relay.rs +++ b/src/net/relay.rs @@ -96,6 +96,9 @@ pub struct RelayerStats { pub struct ProcessedNetReceipts { pub mempool_txs_added: Vec, pub processed_unconfirmed_state: ProcessedUnconfirmedState, + pub num_new_blocks: u64, + pub num_new_confirmed_microblocks: u64, + pub num_new_unconfirmed_microblocks: u64, } /// Private trait for keeping track of messages that can be relayed, so we can identify the peers @@ -581,13 +584,21 @@ impl Relayer { return Ok(false); } - chainstate.preprocess_anchored_block( + let res = chainstate.preprocess_anchored_block( sort_ic, consensus_hash, block, &parent_block_snapshot.consensus_hash, download_time, - ) + )?; + if res { + debug!( + "Stored incoming block {}/{}", + consensus_hash, + &block.block_hash() + ); + } + Ok(res) } /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by @@ -1600,8 +1611,16 @@ impl Relayer { coord_comms: Option<&CoordinatorChannels>, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result { + let mut num_new_blocks = 0; + let mut num_new_confirmed_microblocks = 0; + let mut num_new_unconfirmed_microblocks = 0; match Relayer::process_new_blocks(network_result, sortdb, chainstate, coord_comms) { Ok((new_blocks, new_confirmed_microblocks, new_microblocks, bad_block_neighbors)) => { + // report quantities of new data in the receipts + num_new_blocks = new_blocks.len() as u64; + num_new_confirmed_microblocks = new_confirmed_microblocks.len() as u64; + num_new_unconfirmed_microblocks = new_microblocks.len() as u64; + // attempt to relay messages (note that this is all best-effort). // punish bad peers if bad_block_neighbors.len() > 0 { @@ -1722,6 +1741,9 @@ impl Relayer { let receipts = ProcessedNetReceipts { mempool_txs_added, processed_unconfirmed_state, + num_new_blocks, + num_new_confirmed_microblocks, + num_new_unconfirmed_microblocks, }; Ok(receipts) @@ -5116,7 +5138,7 @@ pub mod test { let mut unsolicited = HashMap::new(); unsolicited.insert(nk.clone(), bad_msgs.clone()); - let mut network_result = NetworkResult::new(0, 0, 0); + let mut network_result = NetworkResult::new(0, 0, 0, 0); network_result.consume_unsolicited(unsolicited); assert!(network_result.has_blocks()); diff --git a/src/net/rpc.rs b/src/net/rpc.rs index 9b2bfcdf28..133b30aa6c 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -131,7 +131,7 @@ pub const STREAM_CHUNK_SIZE: u64 = 4096; #[derive(Default)] pub struct RPCHandlerArgs<'a> { - pub exit_at_block_height: Option<&'a u64>, + pub exit_at_block_height: Option, pub genesis_chainstate_hash: Sha256Sum, pub event_observer: Option<&'a dyn MemPoolEventDispatcher>, pub cost_estimator: Option<&'a dyn CostEstimator>, @@ -207,7 +207,7 @@ impl RPCPeerInfoData { pub fn from_network( network: &PeerNetwork, chainstate: &StacksChainState, - exit_at_block_height: &Option<&u64>, + exit_at_block_height: Option, genesis_chainstate_hash: &Sha256Sum, ) -> RPCPeerInfoData { let server_version = version_string( @@ -251,7 +251,7 @@ impl RPCPeerInfoData { .clone(), unanchored_tip: unconfirmed_tip, unanchored_seq: unconfirmed_seq, - exit_at_block_height: exit_at_block_height.cloned(), + exit_at_block_height: exit_at_block_height, genesis_chainstate_hash: genesis_chainstate_hash.clone(), node_public_key: Some(public_key_buf), node_public_key_hash: Some(public_key_hash), @@ -636,7 +636,7 @@ impl ConversationHttp { let pi = RPCPeerInfoData::from_network( network, chainstate, - &handler_args.exit_at_block_height, + handler_args.exit_at_block_height.clone(), &handler_args.genesis_chainstate_hash, ); let response = HttpResponseType::PeerInfo(response_metadata, pi); @@ -4088,7 +4088,7 @@ mod test { let peer_info = RPCPeerInfoData::from_network( &peer_server.network, &peer_server.stacks_node.as_ref().unwrap().chainstate, - &None, + None, &Sha256Sum::zero(), ); diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index ee25723459..7ddf20cab5 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -1,9 +1,10 @@ [package] name = "stacks-node" version = "0.1.0" -authors = ["Ludo Galabru "] +authors = ["Jude Nelson ", "Aaron Blankstein ", "Ludo Galabru "] edition = "2021" resolver = "2" +rust-version = "1.61" [dependencies] lazy_static = "1.4.0" diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 6a181505a6..226c86a449 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -80,7 +80,8 @@ pub struct BitcoinRegtestController { should_keep_running: Option>, } -struct OngoingBlockCommit { +#[derive(Clone)] +pub struct OngoingBlockCommit { payload: LeaderBlockCommitOp, utxos: UTXOSet, fees: LeaderBlockCommitFees, @@ -309,6 +310,23 @@ impl BitcoinRegtestController { } } + /// Creates a dummy bitcoin regtest controller, with the given ongoing block-commits + pub fn new_ongoing_dummy(config: Config, ongoing: Option) -> Self { + let mut ret = Self::new_dummy(config); + ret.ongoing_block_commit = ongoing; + ret + } + + /// Get an owned copy of the ongoing block commit state + pub fn get_ongoing_commit(&self) -> Option { + self.ongoing_block_commit.clone() + } + + /// Set the ongoing block commit state + pub fn set_ongoing_commit(&mut self, ongoing: Option) { + self.ongoing_block_commit = ongoing; + } + fn default_burnchain(&self) -> Burnchain { let (network_name, _network_type) = self.config.burnchain.get_bitcoin_network(); match &self.burnchain_config { @@ -1446,6 +1464,18 @@ impl BitcoinRegtestController { } } } + + #[cfg(test)] + pub fn get_mining_pubkey(&self) -> Option { + self.config.burnchain.local_mining_public_key.clone() + } + + #[cfg(test)] + pub fn set_mining_pubkey(&mut self, pubkey: String) -> Option { + let old_key = self.config.burnchain.local_mining_public_key.take(); + self.config.burnchain.local_mining_public_key = Some(pubkey); + old_key + } } impl BurnchainController for BitcoinRegtestController { @@ -1654,6 +1684,7 @@ impl SerializedTx { #[derive(Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] +#[allow(dead_code)] pub struct ParsedUTXO { txid: String, vout: u32, diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index b101cacd1f..8d83e62e77 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -2,6 +2,8 @@ use std::convert::TryInto; use std::fs; use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; +use std::sync::Arc; +use std::sync::Mutex; use rand::RngCore; @@ -10,6 +12,7 @@ use stacks::burnchains::{MagicBytes, BLOCKSTACK_MAGIC_MAINNET}; use stacks::chainstate::stacks::index::marf::MARFOpenOpts; use stacks::chainstate::stacks::index::storage::TrieHashCalculationMode; use stacks::chainstate::stacks::miner::BlockBuilderSettings; +use stacks::chainstate::stacks::miner::MinerStatus; use stacks::chainstate::stacks::MAX_BLOCK_LEN; use stacks::core::mempool::MemPoolWalkSettings; use stacks::core::StacksEpoch; @@ -401,7 +404,7 @@ impl Config { name: node.name.unwrap_or(default_node_config.name), seed: match node.seed { Some(seed) => hex_bytes(&seed) - .map_err(|e| format!("node.seed should be a hex encoded string"))?, + .map_err(|_e| format!("node.seed should be a hex encoded string"))?, None => default_node_config.seed, }, working_dir: node.working_dir.unwrap_or(default_node_config.working_dir), @@ -415,7 +418,7 @@ impl Config { None => format!("http://{}", rpc_bind), }, local_peer_seed: match node.local_peer_seed { - Some(seed) => hex_bytes(&seed).map_err(|e| { + Some(seed) => hex_bytes(&seed).map_err(|_e| { format!("node.local_peer_seed should be a hex encoded string") })?, None => default_node_config.local_peer_seed, @@ -434,6 +437,9 @@ impl Config { wait_time_for_microblocks: node .wait_time_for_microblocks .unwrap_or(default_node_config.wait_time_for_microblocks), + wait_time_for_blocks: node + .wait_time_for_blocks + .unwrap_or(default_node_config.wait_time_for_blocks), prometheus_bind: node.prometheus_bind, marf_cache_strategy: node.marf_cache_strategy, marf_defer_hashing: node @@ -595,6 +601,7 @@ impl Config { probability_pick_no_estimate_tx: miner .probability_pick_no_estimate_tx .unwrap_or(miner_default_config.probability_pick_no_estimate_tx), + wait_for_block_download: miner_default_config.wait_for_block_download, nonce_cache_size: miner .nonce_cache_size .unwrap_or(miner_default_config.nonce_cache_size), @@ -953,6 +960,7 @@ impl Config { &self, attempt: u64, microblocks: bool, + miner_status: Arc>, ) -> BlockBuilderSettings { BlockBuilderSettings { max_miner_time_ms: if microblocks { @@ -979,6 +987,7 @@ impl Config { nonce_cache_size: self.miner.nonce_cache_size, candidate_retry_cache_size: self.miner.candidate_retry_cache_size, }, + miner_status, } } } @@ -1141,6 +1150,7 @@ pub struct NodeConfig { pub microblock_frequency: u64, pub max_microblocks: u64, pub wait_time_for_microblocks: u64, + pub wait_time_for_blocks: u64, pub prometheus_bind: Option, pub marf_cache_strategy: Option, pub marf_defer_hashing: bool, @@ -1335,7 +1345,7 @@ impl FeeEstimationConfig { } } - pub fn make_scalar_fee_estimator( + pub fn make_scalar_fee_estimator( &self, mut estimates_path: PathBuf, metric: CM, @@ -1353,7 +1363,7 @@ impl FeeEstimationConfig { // Creates a fuzzed WeightedMedianFeeRateEstimator with window_size 5. The fuzz // is uniform with bounds [+/- 0.5]. - pub fn make_fuzzed_weighted_median_fee_estimator( + pub fn make_fuzzed_weighted_median_fee_estimator( &self, mut estimates_path: PathBuf, metric: CM, @@ -1414,6 +1424,7 @@ impl NodeConfig { microblock_frequency: 30_000, max_microblocks: u16::MAX as u64, wait_time_for_microblocks: 30_000, + wait_time_for_blocks: 30_000, prometheus_bind: None, marf_cache_strategy: None, marf_defer_hashing: true, @@ -1458,6 +1469,7 @@ impl NodeConfig { let (pubkey_str, hostport) = (parts[0], parts[1]); let pubkey = Secp256k1PublicKey::from_hex(pubkey_str) .expect(&format!("Invalid public key '{}'", pubkey_str)); + debug!("Resolve '{}'", &hostport); let sockaddr = hostport.to_socket_addrs().unwrap().next().unwrap(); let neighbor = NodeConfig::default_neighbor(sockaddr, pubkey, chain_id, peer_version); self.bootstrap_node.push(neighbor); @@ -1522,6 +1534,9 @@ pub struct MinerConfig { pub subsequent_attempt_time_ms: u64, pub microblock_attempt_time_ms: u64, pub probability_pick_no_estimate_tx: u8, + /// Wait for a downloader pass before mining. + /// This can only be disabled in testing; it can't be changed in the config file. + pub wait_for_block_download: bool, pub nonce_cache_size: u64, pub candidate_retry_cache_size: u64, } @@ -1534,6 +1549,7 @@ impl MinerConfig { subsequent_attempt_time_ms: 30_000, microblock_attempt_time_ms: 30_000, probability_pick_no_estimate_tx: 5, + wait_for_block_download: true, nonce_cache_size: 10_000, candidate_retry_cache_size: 10_000, } @@ -1601,6 +1617,7 @@ pub struct NodeConfigFile { pub microblock_frequency: Option, pub max_microblocks: Option, pub wait_time_for_microblocks: Option, + pub wait_time_for_blocks: Option, pub prometheus_bind: Option, pub marf_cache_strategy: Option, pub marf_defer_hashing: Option, diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 615bbb0c28..e290fb201d 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -38,6 +38,7 @@ use super::config::{EventKeyType, EventObserverConfig}; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use stacks::chainstate::stacks::miner::TransactionEvent; +use stacks::chainstate::stacks::TransactionPayload; #[derive(Debug, Clone)] struct EventObserver { @@ -205,7 +206,17 @@ impl EventObserver { } } (true, Value::Response(_)) => STATUS_RESP_POST_CONDITION, - _ => unreachable!(), // Transaction results should always be a Value::Response type + _ => { + if let TransactionOrigin::Stacks(inner_tx) = &tx { + if let TransactionPayload::PoisonMicroblock(..) = &inner_tx.payload { + STATUS_RESP_TRUE + } else { + unreachable!() // Transaction results should otherwise always be a Value::Response type + } + } else { + unreachable!() // Transaction results should always be a Value::Response type + } + } }; let (txid, raw_tx) = match tx { diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index bc98f73163..98506e308f 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -20,7 +20,6 @@ pub struct Keychain { microblocks_secret_keys: Vec, vrf_secret_keys: Vec, vrf_map: HashMap, - rotations: u64, } impl Keychain { @@ -46,7 +45,6 @@ impl Keychain { microblocks_secret_keys: vec![], secret_keys, threshold, - rotations: 0, vrf_secret_keys: vec![], vrf_map: HashMap::new(), } diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 220972425c..b7f20e5bb1 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -120,8 +120,8 @@ fn main() { process::exit(1); } }; - let conf = match Config::from_config_file(config_file) { - Ok(conf) => { + match Config::from_config_file(config_file) { + Ok(_) => { info!("Loaded config!"); process::exit(0); } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 156c05be93..fcef28f8ad 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1,14 +1,150 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +/// Main body of code for the Stacks node and miner. +/// +/// System schematic. +/// Legend: +/// |------| Thread +/// /------\ Shared memory +/// @------@ Database +/// .------. Code module +/// +/// +/// |------------------| +/// | RunLoop thread | [1,7] +/// | .----------. |--------------------------------------. +/// | .StacksNode. | | +/// |---.----------.---| | +/// [1] | | | [1] | +/// .----------------* | *---------------. | +/// | [3] | | | +/// V | V V +/// |----------------| | [9,10] |---------------| [11] |--------------------------| +/// .--- | Relayer thread | <-----------|-----------> | P2P Thread | <--- | ChainsCoordinator thread | <--. +/// | |----------------| V |---------------| |--------------------------| | +/// | | | /-------------\ [2,3] | | | | | +/// | [1] | *--------> / Globals \ <-----------*----|--------------* | [4] | +/// | | [2,3,7] /-------------\ | | | +/// | V V [5] V | +/// | |----------------| @--------------@ @------------------@ | +/// | | Miner thread | <------------------------------ @ Mempool DB @ @ Chainstate DBs @ | +/// | |----------------| [6] @--------------@ @------------------@ | +/// | ^ | +/// | [8] | | +/// *----------------------------------------------------------------------------------------* | +/// | [7] | +/// *--------------------------------------------------------------------------------------------------------* +/// +/// [1] Spawns +/// [2] Synchronize unconfirmed state +/// [3] Enable/disable miner +/// [4] Processes block data +/// [5] Stores unconfirmed transactions +/// [6] Reads unconfirmed transactions +/// [7] Signals block arrival +/// [8] Store blocks and microblocks +/// [9] Pushes retrieved blocks and microblocks +/// [10] Broadcasts new blocks, microblocks, and transactions +/// [11] Notifies about new transaction attachment events +/// +/// When the node is running, there are 4-5 active threads at once. They are: +/// +/// * **RunLoop Thread**: This is the main thread, whose code body lives in src/run_loop/neon.rs. +/// This thread is responsible for: +/// * Bootup +/// * Running the burnchain indexer +/// * Notifying the ChainsCoordinator thread when there are new burnchain blocks to process +/// +/// * **Relayer Thread**: This is the thread that stores and relays blocks and microblocks. Both +/// it and the ChainsCoordinator thread are very I/O-heavy threads, and care has been taken to +/// ensure that neither one attempts to acquire a write-lock in the underlying databases. +/// Specifically, this thread directs the ChainsCoordinator thread when to process new Stacks +/// blocks, and it directs the miner thread (if running) to stop when either it or the +/// ChainsCoordinator thread needs to acquire the write-lock. +/// This thread is responsible for: +/// * Receiving new blocks and microblocks from the P2P thread via a shared channel +/// * (Sychronously) requesting the CoordinatorThread to process newly-stored Stacks blocks and +/// microblocks +/// * Building up the node's unconfirmed microblock stream state, and sharing it with the P2P +/// thread so it can answer queries about the unconfirmed microblock chain +/// * Pushing newly-discovered blocks and microblocks to the P2P thread for broadcast +/// * Registering the VRF public key for the miner +/// * Spawning the block and microblock miner threads, and stopping them if their continued +/// execution would inhibit block or microblock storage or processing. +/// * Submitting the burnchain operation to commit to a freshly-mined block +/// +/// * **Miner thread**: This is the thread that actually produces new blocks and microblocks. It +/// is spawned only by the Relayer thread to carry out mining activity when the underlying +/// chainstate is not needed by either the Relayer or ChainsCoordinator threeads. +/// This thread does the following: +/// * Walk the mempool DB to build a new block or microblock +/// * Return the block or microblock to the Relayer thread +/// +/// * **P2P Thread**: This is the thread that communicates with the rest of the p2p network, and +/// handles RPC requests. It is meant to do as little storage-write I/O as possible to avoid lock +/// contention with the Miner, Relayer, and ChainsCoordinator threads. In particular, it forwards +/// data it receives from the p2p thread to the Relayer thread for I/O-bound processing. At the +/// time of this writing, it still requires holding a write-lock to handle some RPC request, but +/// future work will remove this so that this thread's execution will not interfere with the +/// others. This is the only thread that does socket I/O. +/// This thread runs the PeerNetwork state machines, which include the following: +/// * Learning the node's public IP address +/// * Discovering neighbor nodes +/// * Forwarding newly-discovered blocks, microblocks, and transactions from the Relayer thread to +/// other neighbors +/// * Synchronizing block and microblock inventory state with other neighbors +/// * Downloading blocks and microblocks, and passing them to the Relayer for storage and processing +/// * Downloading transaction attachments as their hashes are discovered during block processing +/// * Synchronizing the local mempool database with other neighbors +/// (notifications for new attachments come from a shared channel in the ChainsCoordinator thread) +/// * Handling HTTP requests +/// +/// * **ChainsCoordinator Thread**: This thread process sortitions and Stacks blocks and +/// microblocks, and handles PoX reorgs should they occur (this mainly happens in boot-up). It, +/// like the Relayer thread, is a very I/O-heavy thread, and it will hold a write-lock on the +/// chainstate DBs while it works. Its actions are controlled by a CoordinatorComms structure in +/// the Globals shared state, which the Relayer thread and RunLoop thread both drive (the former +/// drives Stacks blocks processing, the latter sortitions). +/// This thread is responsible for: +/// * Responding to requests from other threads to process sortitions +/// * Responding to requests from other threads to process Stacks blocks and microblocks +/// * Processing PoX chain reorgs, should they ever happen +/// * Detecting attachment creation events, and informing the P2P thread of them so it can go +/// and download them +/// +/// In addition to the mempool and chainstate databases, these threads share access to a Globals +/// singleton that contains soft state shared between threads. Mainly, the Globals struct is meant +/// to store inter-thread shared singleton communication media all in one convenient struct. Each +/// thread has a handle to the struct's shared state handles. Global state includes: +/// * The global flag as to whether or not the miner thread can be running +/// * The global shutdown flag that, when set, causes all threads to terminate +/// * Sender channel endpoints that can be shared between threads +/// * Metrics about the node's behavior (e.g. number of blocks processed, etc.) +/// +/// This file may be refactored in the future into a full-fledged module. use std::cmp; use std::collections::HashMap; use std::collections::{HashSet, VecDeque}; use std::convert::{TryFrom, TryInto}; use std::default::Default; -use std::fs; -use std::io::Write; +use std::mem; use std::net::SocketAddr; -use std::path::Path; -use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError}; -use std::sync::{atomic::Ordering, Arc, Mutex}; +use std::sync::mpsc::{Receiver, SyncSender, TrySendError}; +use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex}; use std::time::Duration; use std::{thread, thread::JoinHandle}; @@ -27,8 +163,8 @@ use stacks::chainstate::stacks::db::{StacksChainState, MINER_REWARD_MATURITY}; use stacks::chainstate::stacks::Error as ChainstateError; use stacks::chainstate::stacks::StacksPublicKey; use stacks::chainstate::stacks::{ - miner::BlockBuilderSettings, miner::StacksMicroblockBuilder, StacksBlockBuilder, - StacksBlockHeader, + miner::signal_mining_blocked, miner::signal_mining_ready, miner::BlockBuilderSettings, + miner::MinerStatus, miner::StacksMicroblockBuilder, StacksBlockBuilder, StacksBlockHeader, }; use stacks::chainstate::stacks::{ CoinbasePayload, StacksBlock, StacksMicroblock, StacksTransaction, StacksTransactionSigner, @@ -38,12 +174,15 @@ use stacks::codec::StacksMessageCodec; use stacks::core::mempool::MemPoolDB; use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use stacks::core::STACKS_EPOCH_2_05_MARKER; +use stacks::cost_estimates::metrics::CostMetric; use stacks::cost_estimates::metrics::UnitMetric; use stacks::cost_estimates::UnitEstimator; +use stacks::cost_estimates::{CostEstimator, FeeEstimator}; use stacks::monitoring::{increment_stx_blocks_mined_counter, update_active_miners_count_gauge}; use stacks::net::{ atlas::{AtlasConfig, AtlasDB, AttachmentInstance}, db::{LocalPeer, PeerDB}, + dns::DNSClient, dns::DNSResolver, p2p::PeerNetwork, relay::Relayer, @@ -63,58 +202,263 @@ use stacks::vm::costs::ExecutionCost; use stacks::{burnchains::BurnchainSigner, chainstate::stacks::db::StacksHeaderInfo}; use crate::burnchains::bitcoin_regtest_controller::BitcoinRegtestController; +use crate::burnchains::bitcoin_regtest_controller::OngoingBlockCommit; use crate::run_loop::neon::Counters; use crate::run_loop::neon::RunLoop; use crate::run_loop::RegisteredKey; use crate::ChainTip; -use super::{BurnchainController, BurnchainTip, Config, EventDispatcher, Keychain}; -use crate::stacks::vm::database::BurnStateDB; +use super::{BurnchainController, Config, EventDispatcher, Keychain}; +use crate::syncctl::PoxSyncWatchdogComms; use stacks::monitoring; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::util::vrf::VRFProof; + use clarity::vm::ast::ASTRules; pub const RELAYER_MAX_BUFFER: usize = 100; +const VRF_MOCK_MINER_KEY: u64 = 1; + +type MinedBlocks = HashMap; + +/// Result of running the miner thread. It could produce a Stacks block or a microblock. +enum MinerThreadResult { + Block( + AssembledAnchorBlock, + Keychain, + Secp256k1PrivateKey, + Option, + ), + Microblock( + Result, NetError>, + MinerTip, + ), +} +/// Fully-assembled Stacks anchored, block as well as some extra metadata pertaining to how it was +/// linked to the burnchain and what view(s) the miner had of the burnchain before and after +/// completing the block. +#[derive(Clone)] struct AssembledAnchorBlock { + /// Consensus hash of the parent Stacks block parent_consensus_hash: ConsensusHash, + /// Burnchain tip's block hash when we finished mining my_burn_hash: BurnchainHeaderHash, + /// Burnchain tip's block height when we finished mining + my_block_height: u64, + /// Burnchain tip's block hash when we started mining (could be different) + orig_burn_hash: BurnchainHeaderHash, + /// The block we produced anchored_block: StacksBlock, + /// The attempt count of this block (multiple blocks will be attempted per burnchain block) attempt: u64, + /// Epoch timestamp in milliseconds when we started producing the block. + tenure_begin: u128, } -struct MicroblockMinerState { - parent_consensus_hash: ConsensusHash, - parent_block_hash: BlockHeaderHash, - miner_key: Secp256k1PrivateKey, - frequency: u64, - last_mined: u128, - quantity: u64, - cost_so_far: ExecutionCost, - settings: BlockBuilderSettings, -} - -enum RelayerDirective { +/// Command types for the relayer thread, issued to it by other threads +pub enum RelayerDirective { + /// Handle some new data that arrived on the network (such as blocks, transactions, and + /// microblocks) HandleNetResult(NetworkResult), + /// Announce a new sortition. Process and broadcast the block if we won. ProcessTenure(ConsensusHash, BurnchainHeaderHash, BlockHeaderHash), + /// Try to mine a block RunTenure(RegisteredKey, BlockSnapshot, u128), // (vrf key, chain tip, time of issuance in ms) + /// Try to register a VRF public key RegisterKey(BlockSnapshot), - RunMicroblockTenure(BlockSnapshot, u128), // time of issuance in ms + /// Stop the relayer thread Exit, } +/// Inter-thread communication structure, shared between threads +#[derive(Clone)] +pub struct Globals { + /// Last sortition processed + last_sortition: Arc>>, + /// Status of the miner + miner_status: Arc>, + /// Communication link to the coordinator thread + coord_comms: CoordinatorChannels, + /// Unconfirmed transactions (shared between the relayer and p2p threads) + unconfirmed_txs: Arc>, + /// Writer endpoint to the relayer thread + relay_send: SyncSender, + /// Cointer state in the main thread + counters: Counters, + /// Connection to the PoX sync watchdog + sync_comms: PoxSyncWatchdogComms, + /// Global flag to see if we should keep running + pub should_keep_running: Arc, +} + +/// Miner chain tip, on top of which to build microblocks +#[derive(Debug, Clone, PartialEq)] +pub struct MinerTip { + /// tip's consensus hash + consensus_hash: ConsensusHash, + /// tip's Stacks block header hash + block_hash: BlockHeaderHash, + /// Microblock private key to use to sign microblocks + microblock_privkey: Secp256k1PrivateKey, + /// Stacks height + stacks_height: u64, + /// burnchain height + burn_height: u64, +} + +impl MinerTip { + pub fn new( + ch: ConsensusHash, + bh: BlockHeaderHash, + pk: Secp256k1PrivateKey, + stacks_height: u64, + burn_height: u64, + ) -> MinerTip { + MinerTip { + consensus_hash: ch, + block_hash: bh, + microblock_privkey: pk, + stacks_height, + burn_height, + } + } +} + +impl Globals { + pub fn new( + coord_comms: CoordinatorChannels, + miner_status: Arc>, + relay_send: SyncSender, + counters: Counters, + sync_comms: PoxSyncWatchdogComms, + should_keep_running: Arc, + ) -> Globals { + Globals { + last_sortition: Arc::new(Mutex::new(None)), + miner_status, + coord_comms, + unconfirmed_txs: Arc::new(Mutex::new(UnconfirmedTxMap::new())), + relay_send, + counters, + sync_comms, + should_keep_running, + } + } + + /// Get the last sortition processed by the relayer thread + pub fn get_last_sortition(&self) -> Option { + match self.last_sortition.lock() { + Ok(sort_opt) => sort_opt.clone(), + Err(_) => { + error!("Sortition mutex poisoned!"); + panic!(); + } + } + } + + /// Set the last sortition processed + pub fn set_last_sortition(&self, block_snapshot: BlockSnapshot) { + match self.last_sortition.lock() { + Ok(mut sortition_opt) => { + sortition_opt.replace(block_snapshot); + } + Err(_) => { + error!("Sortition mutex poisoned!"); + panic!(); + } + }; + } + + /// Get the status of the miner (blocked or ready) + pub fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + + /// Get the main thread's counters + pub fn get_counters(&self) -> Counters { + self.counters.clone() + } + + /// Called by the relayer to pass unconfirmed txs to the p2p thread, so the p2p thread doesn't + /// need to do the disk I/O needed to instantiate the unconfirmed state trie they represent. + /// Clears the unconfirmed transactions, and replaces them with the chainstate's. + pub fn send_unconfirmed_txs(&self, chainstate: &StacksChainState) { + if let Some(ref unconfirmed) = chainstate.unconfirmed_state { + match self.unconfirmed_txs.lock() { + Ok(mut txs) => { + txs.clear(); + txs.extend(unconfirmed.mined_txs.clone()); + } + Err(e) => { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed tx arc mutex is poisoned: {:?}", &e); + panic!(); + } + }; + } + } + + /// Called by the p2p thread to accept the unconfirmed tx state processed by the relayer. + /// Puts the shared unconfirmed transactions to chainstate. + pub fn recv_unconfirmed_txs(&self, chainstate: &mut StacksChainState) { + if let Some(ref mut unconfirmed) = chainstate.unconfirmed_state { + match self.unconfirmed_txs.lock() { + Ok(txs) => { + unconfirmed.mined_txs.clear(); + unconfirmed.mined_txs.extend(txs.clone()); + } + Err(e) => { + // can only happen due to a thread panic in the relayer + error!("FATAL: unconfirmed arc mutex is poisoned: {:?}", &e); + panic!(); + } + }; + } + } + + /// Signal system-wide stop + pub fn signal_stop(&self) { + self.should_keep_running.store(false, Ordering::SeqCst); + } + + /// Should we keep running? + pub fn keep_running(&self) -> bool { + self.should_keep_running.load(Ordering::SeqCst) + } + + /// Get the handle to the coordinator + pub fn coord(&self) -> &CoordinatorChannels { + &self.coord_comms + } +} + +/// Node implementation for both miners and followers. +/// This struct is used to set up the node proper and launch the p2p thread and relayer thread. +/// It is further used by the main thread to communicate with these two threads. pub struct StacksNode { + /// Node configuration config: Config, - relay_channel: SyncSender, - last_sortition: Arc>>, + /// Atlas network configuration + pub atlas_config: AtlasConfig, + /// Global inter-thread communication handle + pub globals: Globals, + /// Stringy representation of our keychain (the authoritative keychain is stored in the + /// subordinate RelayerThread instance) burnchain_signer: BurnchainSigner, + /// True if we're a miner is_miner: bool, - pub atlas_config: AtlasConfig, + /// VRF public key registration state machine leader_key_registration_state: LeaderKeyRegistrationState, + /// handle to the p2p thread pub p2p_thread_handle: JoinHandle<()>, + /// handle to the relayer thread pub relayer_thread_handle: JoinHandle<()>, } +/// Fault injection logic to artificially increase the length of a tenure. +/// Only used in testing #[cfg(test)] fn fault_injection_long_tenure() { // simulated slow block @@ -139,14 +483,23 @@ fn fault_injection_long_tenure() { #[cfg(not(test))] fn fault_injection_long_tenure() {} +/// Types of errors that can arise during mining enum Error { + /// Can't find the header record for the chain tip HeaderNotFoundForChainTip, + /// Can't find the stacks block's offset in the burnchain block WinningVtxNotFoundForChainTip, + /// Can't find the block sortition snapshot for the chain tip SnapshotNotFoundForChainTip, + /// The burnchain tip changed while this operation was in progress BurnchainTipChanged, + /// The coordinator channel closed + CoordinatorClosed, } -struct MiningTenureInformation { +/// Metadata required for beginning a new tenure +struct ParentStacksBlockInfo { + /// Header metadata for the Stacks block we're going to build on top of stacks_parent_header: StacksHeaderInfo, /// the consensus hash of the sortition that selected the Stacks block parent parent_consensus_hash: ConsensusHash, @@ -154,2164 +507,3485 @@ struct MiningTenureInformation { parent_block_burn_height: u64, /// the total amount burned in the sortition that selected the Stacks block parent parent_block_total_burn: u64, + /// offset in the burnchain block where the parent's block-commit was parent_winning_vtxindex: u16, + /// nonce to use for this new block's coinbase transaction coinbase_nonce: u64, } -/// Process artifacts from the tenure. -/// At this point, we're modifying the chainstate, and merging the artifacts from the previous tenure. -fn inner_process_tenure( - anchored_block: &StacksBlock, - consensus_hash: &ConsensusHash, - parent_consensus_hash: &ConsensusHash, - burn_db: &mut SortitionDB, - chain_state: &mut StacksChainState, - coord_comms: &CoordinatorChannels, -) -> Result { - let stacks_blocks_processed = coord_comms.get_stacks_blocks_processed(); - - if StacksChainState::has_stored_block( - &chain_state.db(), - &chain_state.blocks_path, - consensus_hash, - &anchored_block.block_hash(), - )? { - // already processed my tenure - return Ok(true); - } - let burn_height = SortitionDB::get_block_snapshot_consensus(burn_db.conn(), consensus_hash) - .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); - e - })? - .ok_or_else(|| { - error!("Failed to find block snapshot for mined block"); - ChainstateError::NoSuchBlockError - })? - .block_height; - - let ast_rules = SortitionDB::get_ast_rules(burn_db.conn(), burn_height)?; - - // failsafe - if !Relayer::static_check_problematic_relayed_block( - chain_state.mainnet, - &anchored_block, - ASTRules::PrecheckSize, - ) { - // nope! - warn!( - "Our mined block {} was problematic", - &anchored_block.block_hash() - ); - #[cfg(any(test, feature = "testing"))] - { - if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { - // record this block somewhere - if !fs::metadata(&path).is_ok() { - fs::create_dir_all(&path) - .expect(&format!("FATAL: could not create '{}'", &path)); - } - - let mut path = Path::new(&path); - let path = path.join(Path::new(&format!("{}", &anchored_block.block_hash()))); - let mut file = fs::File::create(&path) - .expect(&format!("FATAL: could not create '{:?}'", &path)); - - let block_bits = anchored_block.serialize_to_vec(); - let block_bits_hex = to_hex(&block_bits); - let block_json = format!( - r#"{{"block":"{}","consensus":"{}"}}"#, - &block_bits_hex, &consensus_hash - ); - file.write_all(&block_json.as_bytes()).expect(&format!( - "FATAL: failed to write block bits to '{:?}'", - &path - )); - info!( - "Fault injection: bad block {} saved to {}", - &anchored_block.block_hash(), - &path.to_str().unwrap() - ); - } - } - if !Relayer::process_mined_problematic_blocks(ast_rules, ASTRules::PrecheckSize) { - // don't process it - warn!( - "Will NOT process our problematic mined block {}", - &anchored_block.block_hash() - ); - return Err(ChainstateError::NoTransactionsToMine); - } else { - warn!( - "Will process our problematic mined block {}", - &anchored_block.block_hash() - ) - } - } - - // Preprocess the anchored block - let ic = burn_db.index_conn(); - chain_state.preprocess_anchored_block( - &ic, - consensus_hash, - &anchored_block, - &parent_consensus_hash, - 0, - )?; - - if !coord_comms.announce_new_stacks_block() { - return Ok(false); - } - if !coord_comms.wait_for_stacks_blocks_processed(stacks_blocks_processed, 15000) { - warn!("ChainsCoordinator timed out while waiting for new stacks block to be processed"); - } - - Ok(true) -} - -fn inner_generate_coinbase_tx( - keychain: &mut Keychain, - nonce: u64, - is_mainnet: bool, - chain_id: u32, -) -> StacksTransaction { - let mut tx_auth = keychain.get_transaction_auth().unwrap(); - tx_auth.set_origin_nonce(nonce); - - let version = if is_mainnet { - TransactionVersion::Mainnet - } else { - TransactionVersion::Testnet - }; - let mut tx = StacksTransaction::new( - version, - tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32])), - ); - tx.chain_id = chain_id; - tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let mut tx_signer = StacksTransactionSigner::new(&tx); - keychain.sign_as_origin(&mut tx_signer); - - tx_signer.get_tx().unwrap() -} - -fn inner_generate_poison_microblock_tx( - keychain: &mut Keychain, - nonce: u64, - poison_payload: TransactionPayload, - is_mainnet: bool, - chain_id: u32, -) -> StacksTransaction { - let mut tx_auth = keychain.get_transaction_auth().unwrap(); - tx_auth.set_origin_nonce(nonce); - - let version = if is_mainnet { - TransactionVersion::Mainnet - } else { - TransactionVersion::Testnet - }; - let mut tx = StacksTransaction::new(version, tx_auth, poison_payload); - tx.chain_id = chain_id; - tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let mut tx_signer = StacksTransactionSigner::new(&tx); - keychain.sign_as_origin(&mut tx_signer); - - tx_signer.get_tx().unwrap() +/// States we can be in when registering a leader VRF key +enum LeaderKeyRegistrationState { + /// Not started yet + Inactive, + /// Waiting for burnchain confirmation + Pending, + /// Ready to go! + Active(RegisteredKey), } -/// Constructs and returns a LeaderKeyRegisterOp out of the provided params -fn inner_generate_leader_key_register_op( - address: StacksAddress, - vrf_public_key: VRFPublicKey, - consensus_hash: &ConsensusHash, -) -> BlockstackOperationType { - BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { - public_key: vrf_public_key, - memo: vec![], - address, - consensus_hash: consensus_hash.clone(), - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - }) +/// Relayer thread +/// * accepts network results and stores blocks and microblocks +/// * forwards new blocks, microblocks, and transactions to the p2p thread +/// * processes burnchain state +/// * if mining, runs the miner and broadcasts blocks (via a subordinate MinerThread) +pub struct RelayerThread { + /// Node config + config: Config, + /// Handle to the sortition DB (optional so we can take/replace it) + sortdb: Option, + /// Handle to the chainstate DB (optional so we can take/replace it) + chainstate: Option, + /// Handle to the mempool DB (optional so we can take/replace it) + mempool: Option, + /// Handle to global state and inter-thread communication channels + globals: Globals, + /// Authoritative copy of the keychain state + keychain: Keychain, + /// Burnchian configuration + burnchain: Burnchain, + /// Set of blocks that we have mined, but are still potentially-broadcastable + last_mined_blocks: MinedBlocks, + /// client to the burnchain (used only for sending block-commits) + bitcoin_controller: BitcoinRegtestController, + /// client to the event dispatcher + event_dispatcher: EventDispatcher, + + /// copy of the local peer state + local_peer: LocalPeer, + /// last time we tried to mine a block (in millis) + last_tenure_issue_time: u128, + /// last observed burnchain block height from the p2p thread (obtained from network results) + last_network_block_height: u64, + /// time at which we observed a change in the network block height (epoch time in millis) + last_network_block_height_ts: u128, + /// last observed number of downloader state-machine passes from the p2p thread (obtained from + /// network results) + last_network_download_passes: u64, + /// last observed number of inventory state-machine passes from the p2p thread (obtained from + /// network results) + last_network_inv_passes: u64, + /// minimum number of downloader state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_download_passes: u64, + /// minimum number of inventory state-machine passes that must take place before mining (this + /// is used to ensure that the p2p thread attempts to download new Stacks block data before + /// this thread tries to mine a block) + min_network_inv_passes: u64, + + /// consensus hash of the last sortition we saw, even if we weren't the winner + last_tenure_consensus_hash: Option, + /// tip of last tenure we won (used for mining microblocks) + miner_tip: Option, + /// last time we mined a microblock, in millis + last_microblock_tenure_time: u128, + /// when should we run the next microblock tenure, in millis + microblock_deadline: u128, + /// cost of the last-produced microblock stream + microblock_stream_cost: ExecutionCost, + + /// Inner relayer instance for forwarding broadcasted data back to the p2p thread for dispatch + /// to neighbors + relayer: Relayer, + + /// handle to the subordinate miner thread + miner_thread: Option>>, + /// if true, then the last time the miner thread was launched, it was used to mine a Stacks + /// block (used to alternate between mining microblocks and Stacks blocks that confirm them) + mined_stacks_block: bool, } -fn rotate_vrf_and_register( - is_mainnet: bool, - keychain: &mut Keychain, - burn_block: &BlockSnapshot, - btc_controller: &mut BitcoinRegtestController, -) -> bool { - let vrf_pk = keychain.rotate_vrf_keypair(burn_block.block_height); - let burnchain_tip_consensus_hash = &burn_block.consensus_hash; - let op = inner_generate_leader_key_register_op( - keychain.get_address(is_mainnet), - vrf_pk, - burnchain_tip_consensus_hash, - ); - - let mut one_off_signer = keychain.generate_op_signer(); - btc_controller.submit_operation(op, &mut one_off_signer, 1) +struct BlockMinerThread { + /// node config struct + config: Config, + /// handle to global state + globals: Globals, + /// copy of the node's keychain + keychain: Keychain, + /// burnchain configuration + burnchain: Burnchain, + /// Set of blocks that we have mined, but are still potentially-broadcastable + /// (copied from RelayerThread since we need the info to determine the strategy for mining the + /// next block during this tenure). + last_mined_blocks: MinedBlocks, + /// Copy of the node's last ongoing block commit from the last time this thread was run + ongoing_commit: Option, + /// Copy of the node's registered VRF key + registered_key: RegisteredKey, + /// Burnchain block snapshot at the time this thread was initialized + burn_block: BlockSnapshot, + /// Handle to the node's event dispatcher + event_dispatcher: EventDispatcher, } -/// Constructs and returns a LeaderBlockCommitOp out of the provided params -fn inner_generate_block_commit_op( - sender: BurnchainSigner, - block_header_hash: BlockHeaderHash, - burn_fee: u64, - key: &RegisteredKey, - parent_burnchain_height: u32, - parent_winning_vtx: u16, - vrf_seed: VRFSeed, - commit_outs: Vec, - sunset_burn: u64, - current_burn_height: u64, -) -> BlockstackOperationType { - let (parent_block_ptr, parent_vtxindex) = (parent_burnchain_height, parent_winning_vtx); - let burn_parent_modulus = (current_burn_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; - - BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - sunset_burn, - block_header_hash, - burn_fee, - input: (Txid([0; 32]), 0), - apparent_sender: sender, - key_block_ptr: key.block_height as u32, - key_vtxindex: key.op_vtxindex as u16, - memo: vec![STACKS_EPOCH_2_05_MARKER], - new_seed: vrf_seed, - parent_block_ptr, - parent_vtxindex, - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - burn_parent_modulus, - commit_outs, - }) +/// State representing the microblock miner. +struct MicroblockMinerThread { + /// handle to global state + globals: Globals, + /// handle to chainstate DB (optional so we can take/replace it) + chainstate: Option, + /// handle to sortition DB (optional so we can take/replace it) + sortdb: Option, + /// handle to mempool DB (optional so we can take/replace it) + mempool: Option, + /// Handle to the node's event dispatcher + event_dispatcher: EventDispatcher, + /// Parent Stacks block's sortition's consensus hash + parent_consensus_hash: ConsensusHash, + /// Parent Stacks block's hash + parent_block_hash: BlockHeaderHash, + /// Microblock signing key + miner_key: Secp256k1PrivateKey, + /// How often to make microblocks, in milliseconds + frequency: u64, + /// Epoch timestamp, in milliseconds, when the last microblock was produced + last_mined: u128, + /// How many microblocks produced so far + quantity: u64, + /// Block budget consumed so far by this tenure (initialized to the cost of the Stacks block + /// itself; microblocks fill up the remaining budget) + cost_so_far: ExecutionCost, + /// Block builder settings for the microblock miner. + settings: BlockBuilderSettings, } -/// Mine and broadcast a single microblock, unconditionally. -fn mine_one_microblock( - microblock_state: &mut MicroblockMinerState, - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - mempool: &mut MemPoolDB, - event_dispatcher: &EventDispatcher, -) -> Result { - debug!( - "Try to mine one microblock off of {}/{} (total: {})", - µblock_state.parent_consensus_hash, - µblock_state.parent_block_hash, - chainstate - .unconfirmed_state - .as_ref() - .map(|us| us.num_microblocks()) - .unwrap_or(0) - ); - - let burn_height = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - µblock_state.parent_consensus_hash, - ) - .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); - e - })? - .ok_or_else(|| { - error!("Failed to find block snapshot for mined block"); - ChainstateError::NoSuchBlockError - })? - .block_height; - - let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { - error!("Failed to get AST rules for microblock: {}", e); - e - })?; - - let mint_result = { - let ic = sortdb.index_conn(); - let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( - chainstate, - &ic, - µblock_state.cost_so_far, - microblock_state.settings.clone(), - ) { - Ok(x) => x, - Err(e) => { - let msg = format!( - "Failed to create a microblock miner at chaintip {}/{}: {:?}", - µblock_state.parent_consensus_hash, - µblock_state.parent_block_hash, - &e - ); - error!("{}", msg); - return Err(e); +impl MicroblockMinerThread { + /// Instantiate the miner thread state from the relayer thread. + /// May fail if: + /// * we didn't win the last sortition + /// * we couldn't open or read the DBs for some reason + /// * we couldn't find the anchored block (i.e. it's not processed yet) + pub fn from_relayer_thread(relayer_thread: &RelayerThread) -> Option { + let globals = relayer_thread.globals.clone(); + let config = relayer_thread.config.clone(); + let miner_tip = match relayer_thread.miner_tip.clone() { + Some(tip) => tip, + None => { + debug!("Relayer: cannot instantiate microblock miner: did not win Stacks tip sortition"); + return None; } }; - let t1 = get_epoch_time_ms(); - - let mblock = microblock_miner.mine_next_microblock( - mempool, - µblock_state.miner_key, - event_dispatcher, - )?; - let new_cost_so_far = microblock_miner.get_cost_so_far().expect("BUG: cannot read cost so far from miner -- indicates that the underlying Clarity Tx is somehow in use still."); - let t2 = get_epoch_time_ms(); - - info!( - "Mined microblock {} ({}) with {} transactions in {}ms", - mblock.block_hash(), - mblock.header.sequence, - mblock.txs.len(), - t2.saturating_sub(t1) - ); - - Ok((mblock, new_cost_so_far)) - }; - - let (mined_microblock, new_cost) = match mint_result { - Ok(x) => x, - Err(e) => { - warn!("Failed to mine microblock: {}", e); - return Err(e); - } - }; - - // failsafe - if !Relayer::static_check_problematic_relayed_microblock( - chainstate.mainnet, - &mined_microblock, - ASTRules::PrecheckSize, - ) { - // nope! - warn!( - "Our mined microblock {} was problematic", - &mined_microblock.block_hash() - ); - - #[cfg(any(test, feature = "testing"))] - { - if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { - // record this microblock somewhere - if !fs::metadata(&path).is_ok() { - fs::create_dir_all(&path) - .expect(&format!("FATAL: could not create '{}'", &path)); - } - - let mut path = Path::new(&path); - let path = path.join(Path::new(&format!("{}", &mined_microblock.block_hash()))); - let mut file = fs::File::create(&path) - .expect(&format!("FATAL: could not create '{:?}'", &path)); - - let mblock_bits = mined_microblock.serialize_to_vec(); - let mblock_bits_hex = to_hex(&mblock_bits); + let stacks_chainstate_path = config.get_chainstate_path_str(); + let burn_db_path = config.get_burn_db_file_path(); + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); - let mblock_json = format!( - r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, - &mblock_bits_hex, - µblock_state.parent_consensus_hash, - µblock_state.parent_block_hash - ); - file.write_all(&mblock_json.as_bytes()).expect(&format!( - "FATAL: failed to write microblock bits to '{:?}'", - &path - )); - info!( - "Fault injection: bad microblock {} saved to {}", - &mined_microblock.block_hash(), - &path.to_str().unwrap() + // NOTE: read-write access is needed in order to be able to query the recipient set. + // This is an artifact of the way the MARF is built (see #1449) + let sortdb = SortitionDB::open(&burn_db_path, true) + .map_err(|e| { + error!( + "Relayer: Could not open sortdb '{}' ({:?}); skipping tenure", + &burn_db_path, &e ); - } - } - if !Relayer::process_mined_problematic_blocks(ast_rules, ASTRules::PrecheckSize) { - // don't process it - warn!( - "Will NOT process our problematic mined microblock {}", - &mined_microblock.block_hash() + e + }) + .ok()?; + + let (mut chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &stacks_chainstate_path, + Some(config.node.get_marf_opts()), + ) + .map_err(|e| { + error!( + "Relayer: Could not open chainstate '{}' ({:?}); skipping microblock tenure", + &stacks_chainstate_path, &e ); - return Err(ChainstateError::NoTransactionsToMine); - } else { - warn!( - "Will process our problematic mined microblock {}", - &mined_microblock.block_hash() - ) - } - } + e + }) + .ok()?; - // preprocess the microblock locally - chainstate.preprocess_streamed_microblock( - µblock_state.parent_consensus_hash, - µblock_state.parent_block_hash, - &mined_microblock, - )?; + let mempool = MemPoolDB::open( + config.is_mainnet(), + config.burnchain.chain_id, + &stacks_chainstate_path, + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); - // update unconfirmed state cost - microblock_state.cost_so_far = new_cost; - microblock_state.quantity += 1; - return Ok(mined_microblock); -} + let MinerTip { + consensus_hash: ch, + block_hash: bhh, + microblock_privkey: miner_key, + .. + } = miner_tip; -fn try_mine_microblock( - config: &Config, - microblock_miner_state: &mut Option, - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - mem_pool: &mut MemPoolDB, - winning_tip: (ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey), - event_dispatcher: &EventDispatcher, -) -> Result, NetError> { - let ch = winning_tip.0; - let bhh = winning_tip.1; - let microblock_privkey = winning_tip.2; - - let mut next_microblock = None; - if microblock_miner_state.is_none() { debug!( - "Instantiate microblock mining state off of {}/{}", + "Relayer: Instantiate microblock mining state off of {}/{}", &ch, &bhh ); + // we won a block! proceed to build a microblock tail if we've stored it match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { Ok(Some(_)) => { let parent_index_hash = StacksBlockHeader::make_index_block_hash(&ch, &bhh); - let cost_so_far = StacksChainState::get_stacks_block_anchored_cost( - chainstate.db(), - &parent_index_hash, - )? - .ok_or(NetError::NotFoundError)?; - microblock_miner_state.replace(MicroblockMinerState { + let cost_so_far = if relayer_thread.microblock_stream_cost == ExecutionCost::zero() + { + // unknown cost, or this is idempotent. + StacksChainState::get_stacks_block_anchored_cost( + chainstate.db(), + &parent_index_hash, + ) + .expect("FATAL: failed to get anchored block cost") + .expect("FATAL: no anchored block cost stored for processed anchored block") + } else { + relayer_thread.microblock_stream_cost.clone() + }; + + let frequency = config.node.microblock_frequency; + let settings = + config.make_block_builder_settings(0, true, globals.get_miner_status()); + + // port over unconfirmed state to this thread + chainstate.unconfirmed_state = if let Some(unconfirmed_state) = + relayer_thread.chainstate_ref().unconfirmed_state.as_ref() + { + Some(unconfirmed_state.make_readonly_owned().ok()?) + } else { + None + }; + + Some(MicroblockMinerThread { + globals, + chainstate: Some(chainstate), + sortdb: Some(sortdb), + mempool: Some(mempool), + event_dispatcher: relayer_thread.event_dispatcher.clone(), parent_consensus_hash: ch.clone(), parent_block_hash: bhh.clone(), - miner_key: microblock_privkey.clone(), - frequency: config.node.microblock_frequency, + miner_key, + frequency, last_mined: 0, quantity: 0, cost_so_far: cost_so_far, - settings: config.make_block_builder_settings(0, true), - }); + settings, + }) } Ok(None) => { warn!( - "No such anchored block: {}/{}. Cannot mine microblocks", + "Relayer: No such anchored block: {}/{}. Cannot mine microblocks", ch, bhh ); + None } Err(e) => { warn!( - "Failed to get anchored block cost for {}/{}: {:?}", + "Relayer: Failed to get anchored block cost for {}/{}: {:?}", ch, bhh, &e ); + None } } } - if let Some(mut microblock_miner) = microblock_miner_state.take() { - if microblock_miner.parent_consensus_hash == ch && microblock_miner.parent_block_hash == bhh - { - if microblock_miner.last_mined + (microblock_miner.frequency as u128) - < get_epoch_time_ms() - { - // opportunistically try and mine, but only if there are no attachable blocks in - // recent history (i.e. in the last 10 minutes) - let num_attachable = StacksChainState::count_attachable_staging_blocks( - chainstate.db(), - 1, - get_epoch_time_secs() - 600, - )?; - if num_attachable == 0 { - match mine_one_microblock( - &mut microblock_miner, - sortdb, - chainstate, - mem_pool, - event_dispatcher, - ) { - Ok(microblock) => { - // will need to relay this - next_microblock = Some(microblock); - } - Err(ChainstateError::NoTransactionsToMine) => { - info!("Will keep polling mempool for transactions to include in a microblock"); - } - Err(e) => { - warn!("Failed to mine one microblock: {:?}", &e); - } - } - } else { - debug!("Will not mine microblocks yet -- have {} attachable blocks that arrived in the last 10 minutes", num_attachable); - } - } - microblock_miner.last_mined = get_epoch_time_ms(); - microblock_miner_state.replace(microblock_miner); - } - // otherwise, we're not the sortition winner, and the microblock miner state can be - // discarded. + /// Do something with the inner chainstate DBs (borrowed mutably). + /// Used to fool the borrow-checker. + /// NOT COMPOSIBLE - WILL PANIC IF CALLED FROM WITHIN ITSELF. + fn with_chainstate(&mut self, func: F) -> R + where + F: FnOnce(&mut Self, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, + { + let mut sortdb = self.sortdb.take().expect("FATAL: already took sortdb"); + let mut chainstate = self + .chainstate + .take() + .expect("FATAL: already took chainstate"); + let mut mempool = self.mempool.take().expect("FATAL: already took mempool"); + + let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); + + self.sortdb = Some(sortdb); + self.chainstate = Some(chainstate); + self.mempool = Some(mempool); + + res } - Ok(next_microblock) -} + /// Unconditionally mine one microblock. + /// Can fail if the miner thread gets cancelled (most likely cause), or if there's some kind of + /// DB error. + fn inner_mine_one_microblock( + &mut self, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + mempool: &mut MemPoolDB, + ) -> Result { + debug!( + "Try to mine one microblock off of {}/{} (total: {})", + &self.parent_consensus_hash, + &self.parent_block_hash, + chainstate + .unconfirmed_state + .as_ref() + .map(|us| us.num_microblocks()) + .unwrap_or(0) + ); -fn run_microblock_tenure( - config: &Config, - microblock_miner_state: &mut Option, - chainstate: &mut StacksChainState, - sortdb: &mut SortitionDB, - mem_pool: &mut MemPoolDB, - relayer: &mut Relayer, - miner_tip: (ConsensusHash, BlockHeaderHash, Secp256k1PrivateKey), - counters: &Counters, - event_dispatcher: &EventDispatcher, -) { - // TODO: this is sensitive to poll latency -- can we call this on a fixed - // schedule, regardless of network activity? - let parent_consensus_hash = &miner_tip.0; - let parent_block_hash = &miner_tip.1; - - debug!( - "Run microblock tenure for {}/{}", - parent_consensus_hash, parent_block_hash - ); - - // Mine microblocks, if we're active - let next_microblock_opt = match try_mine_microblock( - &config, - microblock_miner_state, - chainstate, - sortdb, - mem_pool, - miner_tip.clone(), - event_dispatcher, - ) { - Ok(x) => x, - Err(e) => { - warn!("Failed to mine next microblock: {:?}", &e); - None - } - }; - - // did we mine anything? - if let Some(next_microblock) = next_microblock_opt { - // apply it - let microblock_hash = next_microblock.block_hash(); - - let processed_unconfirmed_state = Relayer::refresh_unconfirmed(chainstate, sortdb); - let num_mblocks = chainstate - .unconfirmed_state - .as_ref() - .map(|ref unconfirmed| unconfirmed.num_microblocks()) - .unwrap_or(0); + let burn_height = + SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) + .map_err(|e| { + error!("Failed to find block snapshot for mined block: {}", e); + e + })? + .ok_or_else(|| { + error!("Failed to find block snapshot for mined block"); + ChainstateError::NoSuchBlockError + })? + .block_height; - info!( - "Mined one microblock: {} seq {} (total processed: {})", - µblock_hash, next_microblock.header.sequence, num_mblocks - ); - counters.set_microblocks_processed(num_mblocks); + let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { + error!("Failed to get AST rules for microblock: {}", e); + e + })?; + + let mint_result = { + let ic = sortdb.index_conn(); + let mut microblock_miner = match StacksMicroblockBuilder::resume_unconfirmed( + chainstate, + &ic, + &self.cost_so_far, + self.settings.clone(), + ) { + Ok(x) => x, + Err(e) => { + let msg = format!( + "Failed to create a microblock miner at chaintip {}/{}: {:?}", + &self.parent_consensus_hash, &self.parent_block_hash, &e + ); + error!("{}", msg); + return Err(e); + } + }; - let parent_index_block_hash = - StacksBlockHeader::make_index_block_hash(parent_consensus_hash, parent_block_hash); - event_dispatcher - .process_new_microblocks(parent_index_block_hash, processed_unconfirmed_state); + let t1 = get_epoch_time_ms(); - // send it off - if let Err(e) = - relayer.broadcast_microblock(parent_consensus_hash, parent_block_hash, next_microblock) - { - error!( - "Failure trying to broadcast microblock {}: {}", - microblock_hash, e + let mblock = microblock_miner.mine_next_microblock( + mempool, + &self.miner_key, + &self.event_dispatcher, + )?; + let new_cost_so_far = microblock_miner.get_cost_so_far().expect("BUG: cannot read cost so far from miner -- indicates that the underlying Clarity Tx is somehow in use still."); + let t2 = get_epoch_time_ms(); + + info!( + "Mined microblock {} ({}) with {} transactions in {}ms", + mblock.block_hash(), + mblock.header.sequence, + mblock.txs.len(), + t2.saturating_sub(t1) ); - } - } -} -/// Grant the p2p thread a copy of the unconfirmed microblock transaction list, so it can serve it -/// out via the unconfirmed transaction API. -/// Not the prettiest way to do this, but the least disruptive way to do this. -fn send_unconfirmed_txs( - chainstate: &StacksChainState, - unconfirmed_txs: Arc>, -) { - if let Some(ref unconfirmed) = chainstate.unconfirmed_state { - match unconfirmed_txs.lock() { - Ok(mut txs) => { - txs.clear(); - txs.extend(unconfirmed.mined_txs.clone()); - } - Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed tx arc mutex is poisoned: {:?}", &e); - panic!(); - } + Ok((mblock, new_cost_so_far)) }; - } -} -/// Have the p2p thread receive unconfirmed txs -fn recv_unconfirmed_txs( - chainstate: &mut StacksChainState, - unconfirmed_txs: Arc>, -) { - if let Some(ref mut unconfirmed) = chainstate.unconfirmed_state { - match unconfirmed_txs.lock() { - Ok(txs) => { - unconfirmed.mined_txs.clear(); - unconfirmed.mined_txs.extend(txs.clone()); - } + let (mined_microblock, new_cost) = match mint_result { + Ok(x) => x, Err(e) => { - // can only happen due to a thread panic in the relayer - error!("FATAL: unconfirmed arc mutex is poisoned: {:?}", &e); - panic!(); + warn!("Failed to mine microblock: {}", e); + return Err(e); } }; - } -} -fn spawn_peer( - runloop: &RunLoop, - mut this: PeerNetwork, - p2p_sock: &SocketAddr, - rpc_sock: &SocketAddr, - poll_timeout: u64, - relay_channel: SyncSender, - attachments_rx: Receiver>, - unconfirmed_txs: Arc>, -) -> Result, NetError> { - let config = runloop.config().clone(); - let mut sync_comms = runloop.get_pox_sync_comms(); - let event_dispatcher = runloop.get_event_dispatcher(); - let should_keep_running = runloop.get_termination_switch(); - - let is_mainnet = config.is_mainnet(); - let burn_db_path = config.get_burn_db_file_path(); - let stacks_chainstate_path = config.get_chainstate_path_str(); - let exit_at_block_height = config.burnchain.process_exit_at_block_height; - - this.bind(p2p_sock, rpc_sock).unwrap(); - let (mut dns_resolver, mut dns_client) = DNSResolver::new(10); - let sortdb = SortitionDB::open(&burn_db_path, false).map_err(NetError::DBError)?; - - let (mut chainstate, _) = StacksChainState::open( - is_mainnet, - config.burnchain.chain_id, - &stacks_chainstate_path, - Some(config.node.get_marf_opts()), - ) - .map_err(|e| NetError::ChainstateError(e.to_string()))?; - - // buffer up blocks to store without stalling the p2p thread - let mut results_with_data = VecDeque::new(); - - let server_thread = thread::Builder::new() - .name("p2p".to_string()) - .spawn(move || { - // create estimators, metric instances for RPC handler - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); - let fee_estimator = config.make_fee_estimator(); - - let mut mem_pool = MemPoolDB::open( - is_mainnet, - config.burnchain.chain_id, - &stacks_chainstate_path, - cost_estimator, - metric, - ) - .expect("Database failure opening mempool"); + // failsafe + if !Relayer::static_check_problematic_relayed_microblock( + chainstate.mainnet, + &mined_microblock, + ASTRules::PrecheckSize, + ) { + // nope! + warn!( + "Our mined microblock {} was problematic", + &mined_microblock.block_hash() + ); - let cost_estimator = config - .make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config - .make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); + #[cfg(any(test, feature = "testing"))] + { + use std::fs; + use std::io::Write; + use std::path::Path; + if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { + // record this microblock somewhere + if !fs::metadata(&path).is_ok() { + fs::create_dir_all(&path) + .expect(&format!("FATAL: could not create '{}'", &path)); + } - let handler_args = RPCHandlerArgs { - exit_at_block_height: exit_at_block_height.as_ref(), - genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) - .unwrap(), - event_observer: Some(&event_dispatcher), - cost_estimator: Some(cost_estimator.as_ref()), - cost_metric: Some(metric.as_ref()), - fee_estimator: fee_estimator.as_ref().map(|x| x.as_ref()), - ..RPCHandlerArgs::default() - }; + let path = Path::new(&path); + let path = path.join(Path::new(&format!("{}", &mined_microblock.block_hash()))); + let mut file = fs::File::create(&path) + .expect(&format!("FATAL: could not create '{:?}'", &path)); - let mut num_p2p_state_machine_passes = 0; - let mut num_inv_sync_passes = 0; - let mut num_download_passes = 0; - let mut mblock_deadline = 0; - - while should_keep_running.load(Ordering::SeqCst) { - // initial block download? - let ibd = sync_comms.get_ibd(); - let download_backpressure = results_with_data.len() > 0; - let poll_ms = if !download_backpressure && this.has_more_downloads() { - // keep getting those blocks -- drive the downloader state-machine - debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, - this.has_more_downloads() - ); - 1 - } else { - cmp::min(poll_timeout, config.node.microblock_frequency) - }; + let mblock_bits = mined_microblock.serialize_to_vec(); + let mblock_bits_hex = to_hex(&mblock_bits); - let mut expected_attachments = match attachments_rx.try_recv() { - Ok(expected_attachments) => { - debug!("Atlas: received attachments: {:?}", &expected_attachments); - expected_attachments - } - _ => { - debug!("Atlas: attachment channel is empty"); - HashSet::new() - } - }; + let mblock_json = format!( + r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, + &mblock_bits_hex, &self.parent_consensus_hash, &self.parent_block_hash + ); + file.write_all(&mblock_json.as_bytes()).expect(&format!( + "FATAL: failed to write microblock bits to '{:?}'", + &path + )); + info!( + "Fault injection: bad microblock {} saved to {}", + &mined_microblock.block_hash(), + &path.to_str().unwrap() + ); + } + } + if !Relayer::process_mined_problematic_blocks(ast_rules, ASTRules::PrecheckSize) { + // don't process it + warn!( + "Will NOT process our problematic mined microblock {}", + &mined_microblock.block_hash() + ); + return Err(ChainstateError::NoTransactionsToMine); + } else { + warn!( + "Will process our problematic mined microblock {}", + &mined_microblock.block_hash() + ) + } + } - let _ = Relayer::setup_unconfirmed_state_readonly(&mut chainstate, &sortdb); - recv_unconfirmed_txs(&mut chainstate, unconfirmed_txs.clone()); + // cancelled? + let is_miner_blocked = self + .globals + .get_miner_status() + .lock() + .expect("FATAL: mutex poisoned") + .is_blocked(); + if is_miner_blocked { + return Err(ChainstateError::MinerAborted); + } - match this.run( - &sortdb, - &mut chainstate, - &mut mem_pool, - Some(&mut dns_client), - download_backpressure, - ibd, - poll_ms, - &handler_args, - &mut expected_attachments, - ) { - Ok(network_result) => { - if num_p2p_state_machine_passes < network_result.num_state_machine_passes { - // p2p state-machine did a full pass. Notify anyone listening. - sync_comms.notify_p2p_state_pass(); - num_p2p_state_machine_passes = network_result.num_state_machine_passes; - } + // preprocess the microblock locally + chainstate.preprocess_streamed_microblock( + &self.parent_consensus_hash, + &self.parent_block_hash, + &mined_microblock, + )?; - if num_inv_sync_passes < network_result.num_inv_sync_passes { - // inv-sync state-machine did a full pass. Notify anyone listening. - sync_comms.notify_inv_sync_pass(); - num_inv_sync_passes = network_result.num_inv_sync_passes; - } + // update unconfirmed state cost + self.cost_so_far = new_cost; + self.quantity += 1; + return Ok(mined_microblock); + } - if num_download_passes < network_result.num_download_passes { - // download state-machine did a full pass. Notify anyone listening. - sync_comms.notify_download_pass(); - num_download_passes = network_result.num_download_passes; - } + /// Can this microblock miner mine off of this given tip? + pub fn can_mine_on_tip( + &self, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + ) -> bool { + self.parent_consensus_hash == *consensus_hash && self.parent_block_hash == *block_hash + } - if network_result.has_data_to_store() { - results_with_data - .push_back(RelayerDirective::HandleNetResult(network_result)); - } + /// Body of try_mine_microblock() + fn inner_try_mine_microblock( + &mut self, + miner_tip: MinerTip, + sortdb: &SortitionDB, + chainstate: &mut StacksChainState, + mem_pool: &mut MemPoolDB, + ) -> Result, NetError> { + if !self.can_mine_on_tip(&self.parent_consensus_hash, &self.parent_block_hash) { + // not configured to mine on this tip + return Ok(None); + } + if !self.can_mine_on_tip(&miner_tip.consensus_hash, &miner_tip.block_hash) { + // this tip isn't what this miner is meant to mine on + return Ok(None); + } - // only do this on the Ok() path, even if we're mining, because an error in - // network dispatching is likely due to resource exhaustion - if mblock_deadline < get_epoch_time_ms() { - debug!("P2P: schedule microblock tenure"); - results_with_data.push_back(RelayerDirective::RunMicroblockTenure( - this.burnchain_tip.clone(), - get_epoch_time_ms(), - )); - mblock_deadline = - get_epoch_time_ms() + (config.node.microblock_frequency as u128); - } - } - Err(e) => { - // this is only reachable if the network is not instantiated correctly -- - // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); - } - }; + if self.last_mined + (self.frequency as u128) >= get_epoch_time_ms() { + // too soon to mine + return Ok(None); + } - while let Some(next_result) = results_with_data.pop_front() { - // have blocks, microblocks, and/or transactions (don't care about anything else), - // or a directive to mine microblocks - if let Err(e) = relay_channel.try_send(next_result) { - debug!( - "P2P: {:?}: download backpressure detected", - &this.local_peer - ); - match e { - TrySendError::Full(directive) => { - if let RelayerDirective::RunMicroblockTenure(..) = directive { - // can drop this - } else if let RelayerDirective::RunTenure(..) = directive { - // can drop this - } else { - // don't lose this data -- just try it again - results_with_data.push_front(directive); - } - break; - } - TrySendError::Disconnected(_) => { - info!("P2P: Relayer hang up with p2p channel"); - should_keep_running.store(false, Ordering::SeqCst); - break; - } - } - } else { - debug!("P2P: Dispatched result to Relayer!"); - } + let mut next_microblock_and_runtime = None; + + // opportunistically try and mine, but only if there are no attachable blocks in + // recent history (i.e. in the last 10 minutes) + let num_attachable = StacksChainState::count_attachable_staging_blocks( + chainstate.db(), + 1, + get_epoch_time_secs() - 600, + )?; + if num_attachable == 0 { + match self.inner_mine_one_microblock(sortdb, chainstate, mem_pool) { + Ok(microblock) => { + // will need to relay this + next_microblock_and_runtime = Some((microblock, self.cost_so_far.clone())); + } + Err(ChainstateError::NoTransactionsToMine) => { + info!("Will keep polling mempool for transactions to include in a microblock"); + } + Err(e) => { + warn!("Failed to mine one microblock: {:?}", &e); } } + } else { + debug!("Will not mine microblocks yet -- have {} attachable blocks that arrived in the last 10 minutes", num_attachable); + } - while let Err(TrySendError::Full(_)) = relay_channel.try_send(RelayerDirective::Exit) { - warn!("Failed to direct relayer thread to exit, sleeping and trying again"); - thread::sleep(Duration::from_secs(5)); - } - info!("P2P thread exit!"); - }) - .unwrap(); + self.last_mined = get_epoch_time_ms(); - let _jh = thread::Builder::new() - .name("dns-resolver".to_string()) - .spawn(move || { - dns_resolver.thread_main(); - }) - .unwrap(); + Ok(next_microblock_and_runtime) + } - Ok(server_thread) + /// Try to mine one microblock, given the current chain tip and access to the chain state DBs. + /// If we succeed, return the microblock and log the tx events to the given event dispatcher. + /// May return None if any of the following are true: + /// * `miner_tip` does not match this miner's miner tip + /// * it's been too soon (less than microblock_frequency milliseconds) since we tried this call + /// * there are simply no transactions to mine + /// * there are still stacks blocks to be processed in the staging db + /// * the miner thread got cancelled + pub fn try_mine_microblock( + &mut self, + cur_tip: MinerTip, + ) -> Result, NetError> { + self.with_chainstate(|mblock_miner, sortdb, chainstate, mempool| { + mblock_miner.inner_try_mine_microblock(cur_tip, sortdb, chainstate, mempool) + }) + } } -fn get_last_sortition(last_sortition: &Arc>>) -> Option { - match last_sortition.lock() { - Ok(sort_opt) => sort_opt.clone(), - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); +impl BlockMinerThread { + /// Instantiate the miner thread from its parent RelayerThread + pub fn from_relayer_thread( + rt: &RelayerThread, + registered_key: RegisteredKey, + burn_block: BlockSnapshot, + ) -> BlockMinerThread { + BlockMinerThread { + config: rt.config.clone(), + globals: rt.globals.clone(), + keychain: rt.keychain.clone(), + burnchain: rt.burnchain.clone(), + last_mined_blocks: rt.last_mined_blocks.clone(), + ongoing_commit: rt.bitcoin_controller.get_ongoing_commit(), + registered_key, + burn_block, + event_dispatcher: rt.event_dispatcher.clone(), } } -} -fn set_last_sortition( - last_sortition: &mut Arc>>, - block_snapshot: BlockSnapshot, -) { - match last_sortition.lock() { - Ok(mut sortition_opt) => { - sortition_opt.replace(block_snapshot); - } - Err(_) => { - error!("Sortition mutex poisoned!"); - panic!(); - } - }; -} + /// Create a coinbase transaction. + fn inner_generate_coinbase_tx(&mut self, nonce: u64) -> StacksTransaction { + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); -fn spawn_miner_relayer( - runloop: &RunLoop, - mut relayer: Relayer, - local_peer: LocalPeer, - mut keychain: Keychain, - relay_channel: Receiver, - last_sortition: Arc>>, - coord_comms: CoordinatorChannels, - unconfirmed_txs: Arc>, -) -> Result, NetError> { - let config = runloop.config().clone(); - let event_dispatcher = runloop.get_event_dispatcher(); - let counters = runloop.get_counters(); - let sync_comms = runloop.get_pox_sync_comms(); - let burnchain = runloop.get_burnchain(); - - let is_mainnet = config.is_mainnet(); - let chain_id = config.burnchain.chain_id; - let burn_db_path = config.get_burn_db_file_path(); - let stacks_chainstate_path = config.get_chainstate_path_str(); - - // Note: the chainstate coordinator is *the* block processor, it is responsible for writes to - // the chainstate -- eventually, no other codepaths should be writing to it. - // - // the relayer _should not_ be modifying the sortdb, - // however, it needs a mut reference to create read TXs. - // should address via #1449 - let mut sortdb = SortitionDB::open(&burn_db_path, true).map_err(NetError::DBError)?; - - let (mut chainstate, _) = StacksChainState::open( - is_mainnet, - chain_id, - &stacks_chainstate_path, - Some(config.node.get_marf_opts()), - ) - .map_err(|e| NetError::ChainstateError(e.to_string()))?; - - let mut last_mined_blocks: HashMap< - BurnchainHeaderHash, - Vec<(AssembledAnchorBlock, Secp256k1PrivateKey)>, - > = HashMap::new(); - let burn_fee_cap = config.burnchain.burn_fee_cap; - - let mut bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); - let mut microblock_miner_state: Option = None; - let mut miner_tip = None; // only set if we won the last sortition - let mut last_microblock_tenure_time = 0; - let mut last_tenure_issue_time = 0; - - let relayer_handle = thread::Builder::new().name("relayer".to_string()).spawn(move || { - let cost_estimator = config.make_cost_estimator() - .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config.make_cost_metric() - .unwrap_or_else(|| Box::new(UnitMetric)); + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + let mut tx = StacksTransaction::new( + version, + tx_auth, + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32])), + ); + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); - let mut mem_pool = MemPoolDB::open(is_mainnet, chain_id, &stacks_chainstate_path, cost_estimator, metric) - .expect("Database failure opening mempool"); - - while let Ok(mut directive) = relay_channel.recv() { - match directive { - RelayerDirective::HandleNetResult(ref mut net_result) => { - debug!("Relayer: Handle network result"); - let net_receipts = relayer - .process_network_result( - &local_peer, - net_result, - &mut sortdb, - &mut chainstate, - &mut mem_pool, - sync_comms.get_ibd(), - Some(&coord_comms), - Some(&event_dispatcher), - ) - .expect("BUG: failure processing network results"); + tx_signer.get_tx().unwrap() + } - let mempool_txs_added = net_receipts.mempool_txs_added.len(); - if mempool_txs_added > 0 { - event_dispatcher.process_new_mempool_txs(net_receipts.mempool_txs_added); - } + /// Create a poison microblock transaction. + fn inner_generate_poison_microblock_tx( + &mut self, + nonce: u64, + poison_payload: TransactionPayload, + ) -> StacksTransaction { + let is_mainnet = self.config.is_mainnet(); + let chain_id = self.config.burnchain.chain_id; + let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); + tx_auth.set_origin_nonce(nonce); + + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + let mut tx = StacksTransaction::new(version, tx_auth, poison_payload); + tx.chain_id = chain_id; + tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + let mut tx_signer = StacksTransactionSigner::new(&tx); + self.keychain.sign_as_origin(&mut tx_signer); - let num_unconfirmed_microblock_tx_receipts = net_receipts.processed_unconfirmed_state.receipts.len(); - if num_unconfirmed_microblock_tx_receipts > 0 { - if let Some(unconfirmed_state) = chainstate.unconfirmed_state.as_ref() { - let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); - event_dispatcher.process_new_microblocks(canonical_tip, net_receipts.processed_unconfirmed_state); - } else { - warn!("Relayer: oops, unconfirmed state is uninitialized but there are microblock events"); - } - } + tx_signer.get_tx().unwrap() + } - // Dispatch retrieved attachments, if any. - if net_result.has_attachments() { - event_dispatcher.process_new_attachments(&net_result.attachments); - } + /// Constructs and returns a LeaderBlockCommitOp out of the provided params. + fn inner_generate_block_commit_op( + &self, + block_header_hash: BlockHeaderHash, + burn_fee: u64, + key: &RegisteredKey, + parent_burnchain_height: u32, + parent_winning_vtx: u16, + vrf_seed: VRFSeed, + commit_outs: Vec, + sunset_burn: u64, + current_burn_height: u64, + ) -> BlockstackOperationType { + let (parent_block_ptr, parent_vtxindex) = (parent_burnchain_height, parent_winning_vtx); + let burn_parent_modulus = (current_burn_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; + let sender = self.keychain.get_burnchain_signer(); + BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { + sunset_burn, + block_header_hash, + burn_fee, + input: (Txid([0; 32]), 0), + apparent_sender: sender, + key_block_ptr: key.block_height as u32, + key_vtxindex: key.op_vtxindex as u16, + memo: vec![STACKS_EPOCH_2_05_MARKER], + new_seed: vrf_seed, + parent_block_ptr, + parent_vtxindex, + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + burn_parent_modulus, + commit_outs, + }) + } - // synchronize unconfirmed tx index to p2p thread - send_unconfirmed_txs(&chainstate, unconfirmed_txs.clone()); - } - RelayerDirective::ProcessTenure(consensus_hash, burn_hash, block_header_hash) => { - debug!( - "Relayer: Process tenure {}/{} in {}", - &consensus_hash, &block_header_hash, &burn_hash - ); - if let Some(last_mined_blocks_at_burn_hash) = - last_mined_blocks.remove(&burn_hash) - { - for (last_mined_block, microblock_privkey) in - last_mined_blocks_at_burn_hash.into_iter() - { - let AssembledAnchorBlock { - parent_consensus_hash, - anchored_block: mined_block, - my_burn_hash: mined_burn_hash, - attempt: _, - } = last_mined_block; - if mined_block.block_hash() == block_header_hash - && burn_hash == mined_burn_hash - { - // we won! - let reward_block_height = mined_block.header.total_work.work + MINER_REWARD_MATURITY; - info!("Won sortition! Mining reward will be received in {} blocks (block #{})", MINER_REWARD_MATURITY, reward_block_height); - debug!("Won sortition!"; - "stacks_header" => %block_header_hash, - "burn_hash" => %mined_burn_hash, - ); + /// Get references to the inner assembled anchor block data we've produced for a given burnchain block height + fn find_inflight_mined_blocks( + burn_height: u64, + last_mined_blocks: &MinedBlocks, + ) -> Vec<&AssembledAnchorBlock> { + let mut ret = vec![]; + for (_, (assembled_block, _)) in last_mined_blocks.iter() { + if assembled_block.my_block_height >= burn_height { + ret.push(assembled_block); + } + } + ret + } - increment_stx_blocks_mined_counter(); - match inner_process_tenure( - &mined_block, - &consensus_hash, - &parent_consensus_hash, - &mut sortdb, - &mut chainstate, - &coord_comms, - ) { - Ok(coordinator_running) => { - if !coordinator_running { - warn!( - "Coordinator stopped, stopping relayer thread..." - ); - return; - } - } - Err(e) => { - warn!( - "Error processing my tenure, bad block produced: {}", - e - ); - warn!( - "Bad block"; - "stacks_header" => %block_header_hash, - "data" => %to_hex(&mined_block.serialize_to_vec()), - ); - continue; - } - }; - - // advertize _and_ push blocks for now - let blocks_available = Relayer::load_blocks_available_data( - &sortdb, - vec![consensus_hash.clone()], - ) - .expect("Failed to obtain block information for a block we mined."); - - let block_data = { - let mut bd = HashMap::new(); - bd.insert(consensus_hash.clone(), mined_block.clone()); - bd - }; - - if let Err(e) = relayer.advertize_blocks(blocks_available, block_data) { - warn!("Failed to advertise new block: {}", e); - } - - let snapshot = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &consensus_hash, - ) - .expect("Failed to obtain snapshot for block") - .expect("Failed to obtain snapshot for block"); - if !snapshot.pox_valid { - warn!( - "Snapshot for {} is no longer valid; discarding {}...", - &consensus_hash, - &mined_block.block_hash() - ); - miner_tip = None; - - } else { - let ch = snapshot.consensus_hash.clone(); - let bh = mined_block.block_hash(); - - if let Err(e) = relayer - .broadcast_block(snapshot.consensus_hash, mined_block) - { - warn!("Failed to push new block: {}", e); - } - - // proceed to mine microblocks - debug!( - "Microblock miner tip is now {}/{} ({})", - &consensus_hash, &block_header_hash, StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_header_hash) - ); - miner_tip = Some((ch, bh, microblock_privkey)); - - Relayer::refresh_unconfirmed(&mut chainstate, &mut sortdb); - send_unconfirmed_txs(&chainstate, unconfirmed_txs.clone()); - } - } else { - debug!("Did not win sortition, my blocks [burn_hash= {}, block_hash= {}], their blocks [parent_consenus_hash= {}, burn_hash= {}, block_hash ={}]", - mined_burn_hash, mined_block.block_hash(), parent_consensus_hash, burn_hash, block_header_hash); - - miner_tip = None; - } - } - } + /// Load up the parent block info for mining. + /// If there's no parent because this is the first block, then return the genesis block's info. + /// If we can't find the parent in the DB but we expect one, return None. + fn load_block_parent_info( + &self, + burn_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + ) -> Option { + if let Some(stacks_tip) = chain_state + .get_stacks_chain_tip(burn_db) + .expect("FATAL: could not query chain tip") + { + let miner_address = self + .keychain + .origin_address(self.config.is_mainnet()) + .unwrap(); + match ParentStacksBlockInfo::lookup( + chain_state, + burn_db, + &self.burn_block, + miner_address, + &stacks_tip.consensus_hash, + &stacks_tip.anchored_block_hash, + ) { + Ok(parent_info) => Some(parent_info), + Err(Error::BurnchainTipChanged) => { + self.globals.counters.bump_missed_tenures(); + None } - RelayerDirective::RunTenure(registered_key, last_burn_block, issue_timestamp_ms) => { - if let Some(cur_sortition) = get_last_sortition(&last_sortition) { - if last_burn_block.sortition_id != cur_sortition.sortition_id { - debug!("Drop stale RunTenure for {}: current sortition is for {}", &last_burn_block.burn_header_hash, &cur_sortition.burn_header_hash); - counters.bump_missed_tenures(); - continue; - } - } + Err(..) => None, + } + } else { + debug!("No Stacks chain tip known, will return a genesis block"); + let (network, _) = self.config.burnchain.get_bitcoin_network(); + let burnchain_params = + BurnchainParameters::from_params(&self.config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); - let burn_header_hash = last_burn_block.burn_header_hash.clone(); - let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) - .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - - let burn_chain_tip = burn_chain_sn - .burn_header_hash - .clone(); - - let mut burn_tenure_snapshot = last_burn_block.clone(); - if burn_chain_tip == burn_header_hash { - // no burnchain change, so only re-run block tenure every so often in order - // to give microblocks a chance to collect - if issue_timestamp_ms < last_tenure_issue_time + (config.node.wait_time_for_microblocks as u128) { - debug!("Relayer: will NOT run tenure since issuance at {} is too fresh (wait until {} + {} = {})", - issue_timestamp_ms / 1000, last_tenure_issue_time / 1000, config.node.wait_time_for_microblocks / 1000, (last_tenure_issue_time + (config.node.wait_time_for_microblocks as u128)) / 1000); - continue; - } - } - else { - // burnchain has changed since this directive was sent, so mine immediately - burn_tenure_snapshot = burn_chain_sn; - if issue_timestamp_ms + (config.node.wait_time_for_microblocks as u128) < get_epoch_time_ms() { - // still waiting for microblocks to arrive - debug!("Relayer: will NOT run tenure since still waiting for microblocks to arrive ({} <= {})", (issue_timestamp_ms + (config.node.wait_time_for_microblocks as u128)) / 1000, get_epoch_time_secs()); - continue; - } - debug!("Relayer: burnchain has advanced from {} to {}", &burn_header_hash, &burn_chain_tip); - } + let chain_tip = ChainTip::genesis( + &burnchain_params.first_block_hash, + burnchain_params.first_block_height.into(), + burnchain_params.first_block_timestamp.into(), + ); - debug!( - "Relayer: Run tenure"; - "height" => last_burn_block.block_height, - "burn_header_hash" => %burn_chain_tip, - "last_burn_header_hash" => %burn_header_hash - ); + Some(ParentStacksBlockInfo { + stacks_parent_header: chain_tip.metadata, + parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_block_burn_height: 0, + parent_block_total_burn: 0, + parent_winning_vtxindex: 0, + coinbase_nonce: 0, + }) + } + } - let tenure_begin = get_epoch_time_ms(); - fault_injection_long_tenure(); - - let mut last_mined_blocks_vec = last_mined_blocks - .remove(&burn_header_hash) - .unwrap_or_default(); - - let last_mined_block_opt = StacksNode::relayer_run_tenure( - &config, - registered_key, - &mut chainstate, - &mut sortdb, - &burnchain, - burn_tenure_snapshot, - &mut keychain, - &mut mem_pool, - burn_fee_cap, - &mut bitcoin_controller, - &last_mined_blocks_vec.iter().map(|(blk, _)| blk).collect(), - &event_dispatcher, - ); - if let Some((last_mined_block, microblock_privkey)) = last_mined_block_opt { - if last_mined_blocks_vec.len() == 0 { - counters.bump_blocks_processed(); - } - last_mined_blocks_vec.push((last_mined_block, microblock_privkey)); - } - last_mined_blocks.insert(burn_header_hash, last_mined_blocks_vec); + /// Determine which attempt this will be when mining a block, and whether or not an attempt + /// should even be made. + /// Returns Some(attempt) if we should attempt to mine (and what attempt it will be) + /// Returns None if we should not mine. + fn get_mine_attempt( + &self, + chain_state: &StacksChainState, + parent_block_info: &ParentStacksBlockInfo, + ) -> Option { + let parent_consensus_hash = &parent_block_info.parent_consensus_hash; + let stacks_parent_header = &parent_block_info.stacks_parent_header; + let parent_block_burn_height = parent_block_info.parent_block_burn_height; + + let last_mined_blocks = + Self::find_inflight_mined_blocks(self.burn_block.block_height, &self.last_mined_blocks); - last_tenure_issue_time = get_epoch_time_ms(); - debug!("Relayer: RunTenure finished at {} (in {}ms)", last_tenure_issue_time, last_tenure_issue_time.saturating_sub(tenure_begin)); - } - RelayerDirective::RegisterKey(ref last_burn_block) => { - rotate_vrf_and_register( - is_mainnet, - &mut keychain, - last_burn_block, - &mut bitcoin_controller, - ); - counters.bump_blocks_processed(); + // has the tip changed from our previously-mined block for this epoch? + let attempt = if last_mined_blocks.len() <= 1 { + // always mine if we've not mined a block for this epoch yet, or + // if we've mined just one attempt, unconditionally try again (so we + // can use `subsequent_miner_time_ms` in this attempt) + if last_mined_blocks.len() == 1 { + debug!("Have only attempted one block; unconditionally trying again"); + } + last_mined_blocks.len() as u64 + 1 + } else { + let mut best_attempt = 0; + debug!( + "Consider {} in-flight Stacks tip(s)", + &last_mined_blocks.len() + ); + for prev_block in last_mined_blocks.iter() { + debug!( + "Consider in-flight block {} on Stacks tip {}/{} in {} with {} txs", + &prev_block.anchored_block.block_hash(), + &prev_block.parent_consensus_hash, + &prev_block.anchored_block.header.parent_block, + &prev_block.my_burn_hash, + &prev_block.anchored_block.txs.len() + ); + + if prev_block.anchored_block.txs.len() == 1 && prev_block.attempt == 1 { + // Don't let the fact that we've built an empty block during this sortition + // prevent us from trying again. + best_attempt = 1; + continue; } - RelayerDirective::RunMicroblockTenure(burnchain_tip, tenure_issue_ms) => { - if last_microblock_tenure_time > tenure_issue_ms { - // stale request - continue; - } - if let Some(cur_sortition) = get_last_sortition(&last_sortition) { - if burnchain_tip.sortition_id != cur_sortition.sortition_id { - debug!("Drop stale RunMicroblockTenure for {}/{}: current sortition is for {} ({})", &burnchain_tip.consensus_hash, &burnchain_tip.winning_stacks_block_hash, &cur_sortition.consensus_hash, &cur_sortition.burn_header_hash); - continue; - } - } + if prev_block.parent_consensus_hash == *parent_consensus_hash + && prev_block.my_burn_hash == self.burn_block.burn_header_hash + && prev_block.anchored_block.header.parent_block + == stacks_parent_header.anchored_header.block_hash() + { + // the anchored chain tip hasn't changed since we attempted to build a block. + // But, have discovered any new microblocks worthy of being mined? + if let Ok(Some(stream)) = + StacksChainState::load_descendant_staging_microblock_stream( + chain_state.db(), + &StacksBlockHeader::make_index_block_hash( + &prev_block.parent_consensus_hash, + &stacks_parent_header.anchored_header.block_hash(), + ), + 0, + u16::MAX, + ) + { + if (prev_block.anchored_block.header.parent_microblock + == BlockHeaderHash([0u8; 32]) + && stream.len() == 0) + || (prev_block.anchored_block.header.parent_microblock + != BlockHeaderHash([0u8; 32]) + && stream.len() + <= (prev_block.anchored_block.header.parent_microblock_sequence + as usize) + + 1) + { + // the chain tip hasn't changed since we attempted to build a block. Use what we + // already have. + debug!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); - debug!("Relayer: Run microblock tenure"); + return None; + } else { + // there are new microblocks! + // TODO: only consider rebuilding our anchored block if we (a) have + // time, and (b) the new microblocks are worth more than the new BTC + // fee minus the old BTC fee + debug!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); - // unconfirmed state must be consistent with the chain tip, as must the - // microblock mining state. - if let Some((ch, bh, mblock_pkey)) = miner_tip.clone() { - if let Some(miner_state) = microblock_miner_state.take() { - if miner_state.parent_consensus_hash == ch || miner_state.parent_block_hash == bh { - // preserve -- chaintip is unchanged - microblock_miner_state = Some(miner_state); - } - else { - debug!("Relayer: reset microblock miner state"); - microblock_miner_state = None; - counters.set_microblocks_processed(0); - } + best_attempt = cmp::max(best_attempt, prev_block.attempt); } + } else { + // no microblock stream to confirm, and the stacks tip hasn't changed + debug!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", + &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, + prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height); - run_microblock_tenure( - &config, - &mut microblock_miner_state, - &mut chainstate, - &mut sortdb, - &mut mem_pool, - &mut relayer, - (ch, bh, mblock_pkey), - &counters, - &event_dispatcher, - ); - - // synchronize unconfirmed tx index to p2p thread - send_unconfirmed_txs(&chainstate, unconfirmed_txs.clone()); - last_microblock_tenure_time = get_epoch_time_ms(); + return None; } - else { - debug!("Relayer: reset unconfirmed state to 0 microblocks"); - counters.set_microblocks_processed(0); - microblock_miner_state = None; + } else { + if self.burn_block.burn_header_hash == prev_block.my_burn_hash { + // only try and re-mine if there was no sortition since the last chain tip + debug!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", + parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.my_burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); + best_attempt = cmp::max(best_attempt, prev_block.attempt); + } else { + debug!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.my_burn_hash); } } - RelayerDirective::Exit => break } - } - debug!("Relayer exit!"); - }).unwrap(); - - Ok(relayer_handle) -} + best_attempt + 1 + }; + Some(attempt) + } -enum LeaderKeyRegistrationState { - Inactive, - Pending, - Active(RegisteredKey), -} + /// Generate the VRF proof for the block we're going to build. + /// Returns Some(proof) if we could make the proof + /// Return None if we could not make the proof + fn make_vrf_proof(&mut self) -> Option { + // if we're a mock miner, then make sure that the keychain has a keypair for the mocked VRF + // key + if self.config.node.mock_mining { + self.keychain.rotate_vrf_keypair(VRF_MOCK_MINER_KEY); + } -impl StacksNode { - pub fn spawn( - runloop: &RunLoop, - last_burn_block: Option, - coord_comms: CoordinatorChannels, - attachments_rx: Receiver>, - ) -> StacksNode { - let config = runloop.config().clone(); - let miner = runloop.is_miner(); - let burnchain = runloop.get_burnchain(); - let atlas_config = AtlasConfig::default(config.is_mainnet()); - let mut keychain = Keychain::default(config.node.seed.clone()); - - // we can call _open_ here rather than _connect_, since connect is first called in - // make_genesis_block - let mut sortdb = SortitionDB::open(&config.get_burn_db_file_path(), true) - .expect("Error while instantiating sortition db"); + // Generates a proof out of the sortition hash provided in the params. + let vrf_proof = match self.keychain.generate_proof( + &self.registered_key.vrf_public_key, + self.burn_block.sortition_hash.as_bytes(), + ) { + Some(vrfp) => vrfp, + None => { + // Try to recover a key registered in a former session. + // registered_key.block_height gives us a pointer to the height of the block + // holding the key register op, but the VRF was derived using the height of one + // of the parents blocks. + let _ = self + .keychain + .rotate_vrf_keypair(self.registered_key.block_height - 1); + match self.keychain.generate_proof( + &self.registered_key.vrf_public_key, + self.burn_block.sortition_hash.as_bytes(), + ) { + Some(vrfp) => vrfp, + None => { + error!( + "Relayer: Failed to generate proof with {:?}", + &self.registered_key.vrf_public_key + ); + return None; + } + } + } + }; - let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) - .expect("Error while loading stacks epochs"); + debug!( + "Generated VRF Proof: {} over {} with key {}", + vrf_proof.to_hex(), + &self.burn_block.sortition_hash, + &self.registered_key.vrf_public_key.to_hex() + ); + Some(vrf_proof) + } - let view = { - let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) - .expect("Failed to get sortition tip"); - SortitionDB::get_burnchain_view(&sortdb.conn(), &burnchain, &sortition_tip).unwrap() + /// Get the microblock private key we'll be using for this tenure, should we win. + /// Return the private key on success + /// return None if we were unable to generate the key. + fn make_microblock_private_key(&mut self, attempt: u64) -> Option { + // Generates a new secret key for signing the trail of microblocks + // of the upcoming tenure. + let microblock_secret_key = if attempt > 1 { + match self.keychain.get_microblock_key() { + Some(k) => k, + None => { + error!( + "Relayer: Failed to obtain microblock key for mining attempt"; + "attempt" => %attempt + ); + return None; + } + } + } else { + // NOTE: this is a no-op if run in a separate thread with a moved copy of the keychain + self.keychain + .rotate_microblock_keypair(self.burn_block.block_height) }; - if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { - info!( - "Override burnchain height of {:?} to {}", - ASTRules::PrecheckSize, - ast_precheck_size_height - ); - let mut tx = sortdb - .tx_begin() - .expect("FATAL: failed to begin tx on sortition DB"); - SortitionDB::override_ast_rule_height( - &mut tx, - ASTRules::PrecheckSize, - ast_precheck_size_height, - ) - .expect("FATAL: failed to override AST PrecheckSize rule height"); - tx.commit() - .expect("FATAL: failed to commit sortition DB transaction"); - } + Some(microblock_secret_key) + } - // create a new peerdb - let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); - let initial_neighbors = config.node.bootstrap_node.clone(); - if initial_neighbors.len() > 0 { - info!( - "Will bootstrap from peers {}", - VecDisplay(&initial_neighbors) - ); - } else { - warn!("Without a peer to bootstrap from, the node will start mining a new chain"); - } + /// Load the parent microblock stream and vet it for the absence of forks. + /// If there is a fork, then mine and relay a poison microblock transaction. + /// Update stacks_parent_header's microblock tail to point to the end of the stream we load. + /// Return the microblocks we'll confirm, if there are any. + fn load_and_vet_parent_microblocks( + &mut self, + chain_state: &mut StacksChainState, + mem_pool: &mut MemPoolDB, + parent_block_info: &mut ParentStacksBlockInfo, + ) -> Option> { + let parent_consensus_hash = &parent_block_info.parent_consensus_hash; + let stacks_parent_header = &mut parent_block_info.stacks_parent_header; - let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( - "Failed to parse socket: {}", - &config.node.p2p_bind - )); - let rpc_sock = config.node.rpc_bind.parse().expect(&format!( - "Failed to parse socket: {}", - &config.node.rpc_bind - )); - let p2p_addr: SocketAddr = config.node.p2p_address.parse().expect(&format!( - "Failed to parse socket: {}", - &config.node.p2p_address - )); - let node_privkey = { - let mut re_hashed_seed = config.node.local_peer_seed.clone(); - let my_private_key = loop { - match Secp256k1PrivateKey::from_slice(&re_hashed_seed[..]) { - Ok(sk) => break sk, - Err(_) => { - re_hashed_seed = Sha256Sum::from_data(&re_hashed_seed[..]) - .as_bytes() - .to_vec() - } + let microblock_info_opt = + match StacksChainState::load_descendant_staging_microblock_stream_with_poison( + chain_state.db(), + &StacksBlockHeader::make_index_block_hash( + parent_consensus_hash, + &stacks_parent_header.anchored_header.block_hash(), + ), + 0, + u16::MAX, + ) { + Ok(x) => { + let num_mblocks = x.as_ref().map(|(mblocks, ..)| mblocks.len()).unwrap_or(0); + debug!( + "Loaded {} microblocks descending from {}/{} (data: {})", + num_mblocks, + parent_consensus_hash, + &stacks_parent_header.anchored_header.block_hash(), + x.is_some() + ); + x + } + Err(e) => { + warn!( + "Failed to load descendant microblock stream from {}/{}: {:?}", + parent_consensus_hash, + &stacks_parent_header.anchored_header.block_hash(), + &e + ); + None } }; - my_private_key - }; - let mut peerdb = PeerDB::connect( - &config.get_peer_db_file_path(), - true, - config.burnchain.chain_id, - burnchain.network_id, - Some(node_privkey), - config.connection_options.private_key_lifetime.clone(), - PeerAddress::from_socketaddr(&p2p_addr), - p2p_sock.port(), - data_url, - &vec![], - Some(&initial_neighbors), - ) - .map_err(|e| { - eprintln!( - "Failed to open {}: {:?}", - &config.get_peer_db_file_path(), - &e - ); - panic!(); - }) - .unwrap(); + if let Some((ref microblocks, ref poison_opt)) = µblock_info_opt { + if let Some(ref tail) = microblocks.last() { + debug!( + "Confirm microblock stream tailed at {} (seq {})", + &tail.block_hash(), + tail.header.sequence + ); + } - { - // bootstrap nodes *always* allowed - let mut tx = peerdb.tx_begin().unwrap(); - for initial_neighbor in initial_neighbors.iter() { - // update peer in case public key changed - PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); - PeerDB::set_allow_peer( - &mut tx, - initial_neighbor.addr.network_id, - &initial_neighbor.addr.addrbytes, - initial_neighbor.addr.port, - -1, - ) - .unwrap(); + // try and confirm as many microblocks as we can (but note that the stream itself may + // be too long; we'll try again if that happens). + stacks_parent_header.microblock_tail = + microblocks.last().clone().map(|blk| blk.header.clone()); + + if let Some(poison_payload) = poison_opt { + debug!("Detected poisoned microblock fork: {:?}", &poison_payload); + + // submit it multiple times with different nonces, so it'll have a good chance of + // eventually getting picked up (even if the miner sends other transactions from + // the same address) + for i in 0..10 { + let poison_microblock_tx = self.inner_generate_poison_microblock_tx( + parent_block_info.coinbase_nonce + 1 + i, + poison_payload.clone(), + ); + + // submit the poison payload, privately, so we'll mine it when building the + // anchored block. + if let Err(e) = mem_pool.miner_submit( + chain_state, + &parent_consensus_hash, + &stacks_parent_header.anchored_header.block_hash(), + &poison_microblock_tx, + Some(&self.event_dispatcher), + 1_000_000_000.0, // prioritize this for inclusion + ) { + warn!( + "Detected but failed to mine poison-microblock transaction: {:?}", + &e + ); + } else { + debug!( + "Submit poison-microblock transaction {:?}", + &poison_microblock_tx + ); + } + } } - tx.commit().unwrap(); } - if !config.node.deny_nodes.is_empty() { - warn!("Will ignore nodes {:?}", &config.node.deny_nodes); - } + microblock_info_opt.map(|(stream, _)| stream) + } - { - let mut tx = peerdb.tx_begin().unwrap(); - for denied in config.node.deny_nodes.iter() { - PeerDB::set_deny_peer( - &mut tx, - denied.addr.network_id, - &denied.addr.addrbytes, - denied.addr.port, - get_epoch_time_secs() + 24 * 365 * 3600, - ) - .unwrap(); + /// Produce the block-commit for this anchored block, if we can. + /// Returns the op on success + /// Returns None if we fail somehow. + pub fn make_block_commit( + &self, + burn_db: &mut SortitionDB, + chain_state: &mut StacksChainState, + block_hash: BlockHeaderHash, + parent_block_burn_height: u64, + parent_winning_vtxindex: u16, + vrf_proof: &VRFProof, + ) -> Option { + // let's figure out the recipient set! + let recipients = match get_next_recipients( + &self.burn_block, + chain_state, + burn_db, + &self.burnchain, + &OnChainRewardSetProvider(), + ) { + Ok(x) => x, + Err(e) => { + error!("Relayer: Failure fetching recipient set: {:?}", e); + return None; } - tx.commit().unwrap(); - } + }; - // update services to indicate we can support mempool sync + let burn_fee_cap = self.config.burnchain.burn_fee_cap; + let sunset_burn = self + .burnchain + .expected_sunset_burn(self.burn_block.block_height + 1, burn_fee_cap); + let rest_commit = burn_fee_cap - sunset_burn; + + let commit_outs = if self.burn_block.block_height + 1 + < self.burnchain.pox_constants.sunset_end + && !self + .burnchain + .is_in_prepare_phase(self.burn_block.block_height + 1) { - let mut tx = peerdb.tx_begin().unwrap(); - PeerDB::set_local_services( - &mut tx, - (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), - ) - .unwrap(); - tx.commit().unwrap(); - } + RewardSetInfo::into_commit_outs(recipients, self.config.is_mainnet()) + } else { + vec![StacksAddress::burn_address(self.config.is_mainnet())] + }; - let atlasdb = - AtlasDB::connect(atlas_config.clone(), &config.get_atlas_db_file_path(), true).unwrap(); + // let's commit, but target the current burnchain tip with our modulus + let op = self.inner_generate_block_commit_op( + block_hash, + rest_commit, + &self.registered_key, + parent_block_burn_height + .try_into() + .expect("Could not convert parent block height into u32"), + parent_winning_vtxindex, + VRFSeed::from_proof(vrf_proof), + commit_outs, + sunset_burn, + self.burn_block.block_height, + ); + Some(op) + } - let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { - Ok(local_peer) => local_peer, - _ => panic!("Unable to retrieve local peer"), - }; + /// Try to mine a Stacks block by assembling one from mempool transactions and sending a + /// burnchain block-commit transaction. If we succeed, then return the assembled block data as + /// well as the microblock private key to use to produce microblocks. + /// Return None if we couldn't build a block for whatever reason. + pub fn run_tenure(&mut self) -> Option { + fault_injection_long_tenure(); - // force early mempool instantiation - let cost_estimator = config + let burn_db_path = self.config.get_burn_db_file_path(); + let stacks_chainstate_path = self.config.get_chainstate_path_str(); + + let cost_estimator = self + .config .make_cost_estimator() .unwrap_or_else(|| Box::new(UnitEstimator)); - let metric = config + let metric = self + .config .make_cost_metric() .unwrap_or_else(|| Box::new(UnitMetric)); - let _ = MemPoolDB::open( - config.is_mainnet(), - config.burnchain.chain_id, - &config.get_chainstate_path_str(), - cost_estimator, + let mut bitcoin_controller = BitcoinRegtestController::new_ongoing_dummy( + self.config.clone(), + self.ongoing_commit.clone(), + ); + + // NOTE: read-write access is needed in order to be able to query the recipient set. + // This is an artifact of the way the MARF is built (see #1449) + let mut burn_db = + SortitionDB::open(&burn_db_path, true).expect("FATAL: could not open sortition DB"); + + let (mut chain_state, _) = StacksChainState::open( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &stacks_chainstate_path, + Some(self.config.node.get_marf_opts()), + ) + .expect("FATAL: could not open chainstate DB"); + + let mut mem_pool = MemPoolDB::open( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &stacks_chainstate_path, + cost_estimator, metric, ) - .expect("BUG: failed to instantiate mempool"); + .expect("Database failure opening mempool"); - // now we're ready to instantiate a p2p network object, the relayer, and the event dispatcher - let mut p2p_net = PeerNetwork::new( - peerdb, - atlasdb, - local_peer.clone(), - config.burnchain.peer_version, - burnchain.clone(), - view, - config.connection_options.clone(), - epochs, + let tenure_begin = get_epoch_time_ms(); + + let mut parent_block_info = self.load_block_parent_info(&mut burn_db, &mut chain_state)?; + let attempt = self.get_mine_attempt(&chain_state, &parent_block_info)?; + let vrf_proof = self.make_vrf_proof()?; + + // Generates a new secret key for signing the trail of microblocks + // of the upcoming tenure. + let microblock_private_key = self.make_microblock_private_key(attempt)?; + let mblock_pubkey_hash = + Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_private_key)); + + // create our coinbase + let coinbase_tx = self.inner_generate_coinbase_tx(parent_block_info.coinbase_nonce); + + // find the longest microblock tail we can build off of. + // target it to the microblock tail in parent_block_info + let microblocks_opt = self.load_and_vet_parent_microblocks( + &mut chain_state, + &mut mem_pool, + &mut parent_block_info, ); - // setup the relayer channel - let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); + // build the block itself + let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( + &chain_state, + &burn_db.index_conn(), + &mut mem_pool, + &parent_block_info.stacks_parent_header, + parent_block_info.parent_block_total_burn, + vrf_proof.clone(), + mblock_pubkey_hash, + &coinbase_tx, + self.config.make_block_builder_settings( + attempt, + false, + self.globals.get_miner_status(), + ), + Some(&self.event_dispatcher), + ) { + Ok(block) => block, + Err(ChainstateError::InvalidStacksMicroblock(msg, mblock_header_hash)) => { + // part of the parent microblock stream is invalid, so try again + info!("Parent microblock stream is invalid; trying again without the offender {} (msg: {})", &mblock_header_hash, &msg); - let last_sortition = Arc::new(Mutex::new(last_burn_block)); + // truncate the stream + parent_block_info.stacks_parent_header.microblock_tail = match microblocks_opt { + Some(microblocks) => { + let mut tail = None; + for mblock in microblocks.into_iter() { + if mblock.block_hash() == mblock_header_hash { + break; + } + tail = Some(mblock); + } + if let Some(ref t) = &tail { + debug!( + "New parent microblock stream tail is {} (seq {})", + t.block_hash(), + t.header.sequence + ); + } + tail.map(|t| t.header) + } + None => None, + }; - let burnchain_signer = keychain.get_burnchain_signer(); - match monitoring::set_burnchain_signer(burnchain_signer.clone()) { + // try again + match StacksBlockBuilder::build_anchored_block( + &chain_state, + &burn_db.index_conn(), + &mut mem_pool, + &parent_block_info.stacks_parent_header, + parent_block_info.parent_block_total_burn, + vrf_proof.clone(), + mblock_pubkey_hash, + &coinbase_tx, + self.config.make_block_builder_settings( + attempt, + false, + self.globals.get_miner_status(), + ), + Some(&self.event_dispatcher), + ) { + Ok(block) => block, + Err(e) => { + error!("Relayer: Failure mining anchor block even after removing offending microblock {}: {}", &mblock_header_hash, &e); + return None; + } + } + } Err(e) => { - warn!("Failed to set global burnchain signer: {:?}", &e); + error!("Relayer: Failure mining anchored block: {}", e); + return None; + } + }; + + info!( + "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {}", + if parent_block_info.parent_block_total_burn == 0 { + "Genesis" + } else { + "Stacks" + }, + anchored_block.header.total_work.work, + anchored_block.block_hash(), + anchored_block.txs.len(), + attempt + ); + + // let's commit + let op = self.make_block_commit( + &mut burn_db, + &mut chain_state, + anchored_block.block_hash(), + parent_block_info.parent_block_burn_height, + parent_block_info.parent_winning_vtxindex, + &vrf_proof, + )?; + + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long + // enough to build this block that another block could have arrived), and confirm that all + // Stacks blocks with heights higher than the canoincal tip are processed. + let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if let Some(stacks_tip) = chain_state + .get_stacks_chain_tip(&burn_db) + .expect("FATAL: could not query chain tip") + { + let is_miner_blocked = self + .globals + .get_miner_status() + .lock() + .expect("FATAL: mutex poisoned") + .is_blocked(); + let has_unprocessed = StacksChainState::has_higher_unprocessed_blocks( + chain_state.db(), + stacks_tip.height, + ) + .expect("FATAL: failed to query staging blocks"); + if stacks_tip.anchored_block_hash != anchored_block.header.parent_block + || parent_block_info.parent_consensus_hash != stacks_tip.consensus_hash + || cur_burn_chain_tip.burn_header_hash != self.burn_block.burn_header_hash + || is_miner_blocked + || has_unprocessed + { + debug!( + "Relayer: Cancel block-commit; chain tip(s) have changed or cancelled"; + "block_hash" => %anchored_block.block_hash(), + "tx_count" => anchored_block.txs.len(), + "target_height" => %anchored_block.header.total_work.work, + "parent_consensus_hash" => %parent_block_info.parent_consensus_hash, + "parent_block_hash" => %anchored_block.header.parent_block, + "parent_microblock_hash" => %anchored_block.header.parent_microblock, + "parent_microblock_seq" => anchored_block.header.parent_microblock_sequence, + "old_tip_burn_block_hash" => %self.burn_block.burn_header_hash, + "old_tip_burn_block_height" => self.burn_block.block_height, + "old_tip_burn_block_sortition_id" => %self.burn_block.sortition_id, + "attempt" => attempt, + "new_stacks_tip_block_hash" => %stacks_tip.anchored_block_hash, + "new_stacks_tip_consensus_hash" => %stacks_tip.consensus_hash, + "new_tip_burn_block_height" => cur_burn_chain_tip.block_height, + "new_tip_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id, + "new_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id, + "miner_blocked" => %is_miner_blocked, + "has_unprocessed" => %has_unprocessed + ); + self.globals.counters.bump_missed_tenures(); + return None; } - _ => {} } - let relayer = Relayer::from_p2p(&mut p2p_net); - let shared_unconfirmed_txs = Arc::new(Mutex::new(UnconfirmedTxMap::new())); + let mut op_signer = self.keychain.generate_op_signer(); + debug!( + "Relayer: Submit block-commit"; + "block_hash" => %anchored_block.block_hash(), + "tx_count" => anchored_block.txs.len(), + "target_height" => anchored_block.header.total_work.work, + "parent_consensus_hash" => %parent_block_info.parent_consensus_hash, + "parent_block_hash" => %anchored_block.header.parent_block, + "parent_microblock_hash" => %anchored_block.header.parent_microblock, + "parent_microblock_seq" => anchored_block.header.parent_microblock_sequence, + "tip_burn_block_hash" => %self.burn_block.burn_header_hash, + "tip_burn_block_height" => self.burn_block.block_height, + "tip_burn_block_sortition_id" => %self.burn_block.sortition_id, + "cur_burn_block_hash" => %cur_burn_chain_tip.burn_header_hash, + "cur_burn_block_height" => %cur_burn_chain_tip.block_height, + "cur_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id, + "attempt" => attempt + ); - let leader_key_registration_state = if config.node.mock_mining { - // mock mining, pretend to have a registered key - let vrf_public_key = keychain.rotate_vrf_keypair(1); - LeaderKeyRegistrationState::Active(RegisteredKey { - block_height: 1, - op_vtxindex: 1, - vrf_public_key, - }) - } else { - LeaderKeyRegistrationState::Inactive - }; + let res = bitcoin_controller.submit_operation(op, &mut op_signer, attempt); + if !res { + if !self.config.node.mock_mining { + warn!("Relayer: Failed to submit Bitcoin transaction"); + return None; + } else { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); + } + } - let relayer_thread_handle = spawn_miner_relayer( - runloop, - relayer, - local_peer, - keychain, - relay_recv, - last_sortition.clone(), - coord_comms, - shared_unconfirmed_txs.clone(), + Some(MinerThreadResult::Block( + AssembledAnchorBlock { + parent_consensus_hash: parent_block_info.parent_consensus_hash, + my_burn_hash: cur_burn_chain_tip.burn_header_hash, + my_block_height: cur_burn_chain_tip.block_height, + orig_burn_hash: self.burn_block.burn_header_hash, + anchored_block, + attempt, + tenure_begin, + }, + self.keychain.clone(), + microblock_private_key, + bitcoin_controller.get_ongoing_commit(), + )) + } +} + +impl RelayerThread { + /// Instantiate off of a StacksNode, a runloop, and a relayer. + pub fn new(runloop: &RunLoop, local_peer: LocalPeer, relayer: Relayer) -> RelayerThread { + let config = runloop.config().clone(); + let globals = runloop.get_globals(); + let burn_db_path = config.get_burn_db_file_path(); + let stacks_chainstate_path = config.get_chainstate_path_str(); + let is_mainnet = config.is_mainnet(); + let chain_id = config.burnchain.chain_id; + + let sortdb = + SortitionDB::open(&burn_db_path, true).expect("FATAL: failed to open burnchain DB"); + + let (chainstate, _) = StacksChainState::open( + is_mainnet, + chain_id, + &stacks_chainstate_path, + Some(config.node.get_marf_opts()), ) - .expect("Failed to initialize mine/relay thread"); - - let p2p_thread_handle = spawn_peer( - runloop, - p2p_net, - &p2p_sock, - &rpc_sock, - 5000, - relay_send.clone(), - attachments_rx, - shared_unconfirmed_txs, + .expect("FATAL: failed to open chainstate DB"); + + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + is_mainnet, + chain_id, + &stacks_chainstate_path, + cost_estimator, + metric, ) - .expect("Failed to initialize p2p thread"); + .expect("Database failure opening mempool"); - info!("Start HTTP server on: {}", &config.node.rpc_bind); - info!("Start P2P server on: {}", &config.node.p2p_bind); + let keychain = Keychain::default(config.node.seed.clone()); + let bitcoin_controller = BitcoinRegtestController::new_dummy(config.clone()); - let is_miner = miner; + RelayerThread { + config: config.clone(), + sortdb: Some(sortdb), + chainstate: Some(chainstate), + mempool: Some(mempool), + globals, + keychain, + burnchain: runloop.get_burnchain(), + last_mined_blocks: MinedBlocks::new(), + bitcoin_controller, + event_dispatcher: runloop.get_event_dispatcher(), + local_peer, - StacksNode { - config, - relay_channel: relay_send, - last_sortition, - burnchain_signer, - is_miner, - atlas_config, - leader_key_registration_state, - p2p_thread_handle, - relayer_thread_handle, + last_tenure_issue_time: 0, + last_network_block_height: 0, + last_network_block_height_ts: 0, + last_network_download_passes: 0, + min_network_download_passes: 0, + last_network_inv_passes: 0, + min_network_inv_passes: 0, + + last_tenure_consensus_hash: None, + miner_tip: None, + last_microblock_tenure_time: 0, + microblock_deadline: 0, + microblock_stream_cost: ExecutionCost::zero(), + + relayer, + + miner_thread: None, + mined_stacks_block: false, } } - /// Tell the relayer to fire off a tenure and a block commit op, - /// if it is time to do so. - pub fn relayer_issue_tenure(&mut self) -> bool { - if !self.is_miner { - // node is a follower, don't try to issue a tenure - return true; - } + /// Get an immutible ref to the sortdb + pub fn sortdb_ref(&self) -> &SortitionDB { + self.sortdb + .as_ref() + .expect("FATAL: tried to access sortdb while taken") + } - if let Some(burnchain_tip) = get_last_sortition(&self.last_sortition) { - match self.leader_key_registration_state { - LeaderKeyRegistrationState::Active(ref key) => { - debug!( - "Tenure: Using key {:?} off of {}", - &key.vrf_public_key, &burnchain_tip.burn_header_hash - ); + /// Get an immutible ref to the chainstate + pub fn chainstate_ref(&self) -> &StacksChainState { + self.chainstate + .as_ref() + .expect("FATAL: tried to access chainstate while it was taken") + } - self.relay_channel - .send(RelayerDirective::RunTenure( - key.clone(), - burnchain_tip, - get_epoch_time_ms(), - )) - .is_ok() - } - LeaderKeyRegistrationState::Inactive => { - warn!( - "Tenure: skipped tenure because no active VRF key. Trying to register one." - ); - self.leader_key_registration_state = LeaderKeyRegistrationState::Pending; - self.relay_channel - .send(RelayerDirective::RegisterKey(burnchain_tip)) - .is_ok() - } - LeaderKeyRegistrationState::Pending => true, - } - } else { - warn!("Tenure: Do not know the last burn block. As a miner, this is bad."); - true - } + /// Fool the borrow checker into letting us do something with the chainstate databases. + /// DOES NOT COMPOSE -- do NOT call this, or self.sortdb_ref(), or self.chainstate_ref(), within + /// `func`. You will get a runtime panic. + pub fn with_chainstate(&mut self, func: F) -> R + where + F: FnOnce(&mut RelayerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, + { + let mut sortdb = self + .sortdb + .take() + .expect("FATAL: tried to take sortdb while taken"); + let mut chainstate = self + .chainstate + .take() + .expect("FATAL: tried to take chainstate while taken"); + let mut mempool = self + .mempool + .take() + .expect("FATAL: tried to take mempool while taken"); + let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); + self.sortdb = Some(sortdb); + self.chainstate = Some(chainstate); + self.mempool = Some(mempool); + res } - /// Notify the relayer of a sortition, telling it to process the block - /// and advertize it if it was mined by the node. - /// returns _false_ if the relayer hung up the channel. - pub fn relayer_sortition_notify(&self) -> bool { - if !self.is_miner { - // node is a follower, don't try to process my own tenure. - return true; - } + /// have we waited for the right conditions under which to start mining a block off of our + /// chain tip? + pub fn has_waited_for_latest_blocks(&self) -> bool { + // a network download pass took place + (self.min_network_download_passes <= self.last_network_download_passes + // a network inv pass took place + && self.min_network_download_passes <= self.last_network_download_passes) + // we waited long enough for a download pass, but timed out waiting + || self.last_network_block_height_ts + (self.config.node.wait_time_for_blocks as u128) < get_epoch_time_ms() + // we're not supposed to wait at all + || !self.config.miner.wait_for_block_download + } - if let Some(snapshot) = get_last_sortition(&self.last_sortition) { + /// Return debug string for waiting for latest blocks + pub fn debug_waited_for_latest_blocks(&self) -> String { + format!( + "({} <= {} && {} <= {}) || {} + {} < {} || {}", + self.min_network_download_passes, + self.last_network_download_passes, + self.min_network_inv_passes, + self.last_network_inv_passes, + self.last_network_block_height_ts, + self.config.node.wait_time_for_blocks, + get_epoch_time_ms(), + self.config.miner.wait_for_block_download + ) + } + + /// Handle a NetworkResult from the p2p/http state machine. Usually this is the act of + /// * preprocessing and storing new blocks and microblocks + /// * relaying blocks, microblocks, and transacctions + /// * updating unconfirmed state views + pub fn process_network_result(&mut self, mut net_result: NetworkResult) { + debug!( + "Relayer: Handle network result (from {})", + net_result.burn_height + ); + + if self.last_network_block_height != net_result.burn_height { + // burnchain advanced; disable mining until we also do a download pass. + self.last_network_block_height = net_result.burn_height; + self.min_network_download_passes = net_result.num_download_passes + 1; + self.min_network_inv_passes = net_result.num_inv_sync_passes + 1; + self.last_network_block_height_ts = get_epoch_time_ms(); debug!( - "Tenure: Notify sortition!"; - "consensus_hash" => %snapshot.consensus_hash, - "burn_block_hash" => %snapshot.burn_header_hash, - "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, - "burn_block_height" => &snapshot.block_height, - "sortition_id" => %snapshot.sortition_id + "Relayer: block mining until the next download pass {}", + self.min_network_download_passes ); - if snapshot.sortition { - return self - .relay_channel - .send(RelayerDirective::ProcessTenure( - snapshot.consensus_hash.clone(), - snapshot.parent_burn_header_hash.clone(), - snapshot.winning_stacks_block_hash.clone(), - )) - .is_ok(); - } - } else { - debug!("Tenure: Notify sortition! No last burn block"); + signal_mining_blocked(self.globals.get_miner_status()); } - true - } - /// Determine where in the set of forks to attempt to mine the next anchored block. - /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. - /// `check_burn_block` identifies what we believe to be the burn chain's sortition history tip. - /// This is used to mitigate (but not eliminate) a TOCTTOU issue with mining: the caller's - /// conception of the sortition history tip may have become stale by the time they call this - /// method, in which case, mining should *not* happen (since the block will be invalid). - fn get_mining_tenure_information( - chain_state: &mut StacksChainState, - burn_db: &mut SortitionDB, - check_burn_block: &BlockSnapshot, - miner_address: StacksAddress, - mine_tip_ch: &ConsensusHash, - mine_tip_bh: &BlockHeaderHash, - ) -> Result { - let stacks_tip_header = StacksChainState::get_anchored_block_header_info( - chain_state.db(), - &mine_tip_ch, - &mine_tip_bh, - ) - .unwrap() - .ok_or_else(|| { - error!( - "Could not mine new tenure, since could not find header for known chain tip."; - "tip_consensus_hash" => %mine_tip_ch, - "tip_stacks_block_hash" => %mine_tip_bh + let net_receipts = self.with_chainstate(|relayer_thread, sortdb, chainstate, mempool| { + relayer_thread + .relayer + .process_network_result( + &relayer_thread.local_peer, + &mut net_result, + sortdb, + chainstate, + mempool, + relayer_thread.globals.sync_comms.get_ibd(), + Some(&relayer_thread.globals.coord_comms), + Some(&relayer_thread.event_dispatcher), + ) + .expect("BUG: failure processing network results") + }); + + if net_receipts.num_new_blocks > 0 || net_receipts.num_new_confirmed_microblocks > 0 { + // if we received any new block data that could invalidate our view of the chain tip, + // then stop mining until we process it + debug!("Relayer: block mining to process newly-arrived blocks or microblocks"); + signal_mining_blocked(self.globals.get_miner_status()); + } + + let mempool_txs_added = net_receipts.mempool_txs_added.len(); + if mempool_txs_added > 0 { + self.event_dispatcher + .process_new_mempool_txs(net_receipts.mempool_txs_added); + } + + let num_unconfirmed_microblock_tx_receipts = + net_receipts.processed_unconfirmed_state.receipts.len(); + if num_unconfirmed_microblock_tx_receipts > 0 { + if let Some(unconfirmed_state) = self.chainstate_ref().unconfirmed_state.as_ref() { + let canonical_tip = unconfirmed_state.confirmed_chain_tip.clone(); + self.event_dispatcher.process_new_microblocks( + canonical_tip, + net_receipts.processed_unconfirmed_state, + ); + } else { + warn!("Relayer: oops, unconfirmed state is uninitialized but there are microblock events"); + } + } + + // Dispatch retrieved attachments, if any. + if net_result.has_attachments() { + self.event_dispatcher + .process_new_attachments(&net_result.attachments); + } + + // synchronize unconfirmed tx index to p2p thread + self.with_chainstate(|relayer_thread, _sortdb, chainstate, _mempool| { + relayer_thread.globals.send_unconfirmed_txs(chainstate); + }); + + // resume mining if we blocked it, and if we've done the requisite download + // passes + self.last_network_download_passes = net_result.num_download_passes; + self.last_network_inv_passes = net_result.num_inv_sync_passes; + if self.has_waited_for_latest_blocks() { + debug!("Relayer: did a download pass, so unblocking mining"); + signal_mining_ready(self.globals.get_miner_status()); + } + } + + /// Process the block and microblocks from a sortition that we won. + /// At this point, we're modifying the chainstate, and merging the artifacts from the previous tenure. + /// Blocks until the given stacks block is processed. + /// Returns true if we accepted this block as new. + /// Returns false if we already processed this block. + fn accept_winning_tenure( + &mut self, + anchored_block: &StacksBlock, + consensus_hash: &ConsensusHash, + parent_consensus_hash: &ConsensusHash, + ) -> Result { + if StacksChainState::has_stored_block( + self.chainstate_ref().db(), + &self.chainstate_ref().blocks_path, + consensus_hash, + &anchored_block.block_hash(), + )? { + // already processed my tenure + return Ok(false); + } + let burn_height = + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), consensus_hash) + .map_err(|e| { + error!("Failed to find block snapshot for mined block: {}", e); + e + })? + .ok_or_else(|| { + error!("Failed to find block snapshot for mined block"); + ChainstateError::NoSuchBlockError + })? + .block_height; + + let ast_rules = SortitionDB::get_ast_rules(self.sortdb_ref().conn(), burn_height)?; + + // failsafe + if !Relayer::static_check_problematic_relayed_block( + self.chainstate_ref().mainnet, + &anchored_block, + ASTRules::PrecheckSize, + ) { + // nope! + warn!( + "Our mined block {} was problematic", + &anchored_block.block_hash() ); - Error::HeaderNotFoundForChainTip + #[cfg(any(test, feature = "testing"))] + { + use std::fs; + use std::io::Write; + use std::path::Path; + if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { + // record this block somewhere + if !fs::metadata(&path).is_ok() { + fs::create_dir_all(&path) + .expect(&format!("FATAL: could not create '{}'", &path)); + } + + let path = Path::new(&path); + let path = path.join(Path::new(&format!("{}", &anchored_block.block_hash()))); + let mut file = fs::File::create(&path) + .expect(&format!("FATAL: could not create '{:?}'", &path)); + + let block_bits = anchored_block.serialize_to_vec(); + let block_bits_hex = to_hex(&block_bits); + let block_json = format!( + r#"{{"block":"{}","consensus":"{}"}}"#, + &block_bits_hex, &consensus_hash + ); + file.write_all(&block_json.as_bytes()).expect(&format!( + "FATAL: failed to write block bits to '{:?}'", + &path + )); + info!( + "Fault injection: bad block {} saved to {}", + &anchored_block.block_hash(), + &path.to_str().unwrap() + ); + } + } + if !Relayer::process_mined_problematic_blocks(ast_rules, ASTRules::PrecheckSize) { + // don't process it + warn!( + "Will NOT process our problematic mined block {}", + &anchored_block.block_hash() + ); + return Err(ChainstateError::NoTransactionsToMine); + } else { + warn!( + "Will process our problematic mined block {}", + &anchored_block.block_hash() + ) + } + } + + // Preprocess the anchored block + self.with_chainstate(|_relayer_thread, sort_db, chainstate, _mempool| { + let ic = sort_db.index_conn(); + chainstate.preprocess_anchored_block( + &ic, + consensus_hash, + &anchored_block, + &parent_consensus_hash, + 0, + ) })?; - // the stacks block I'm mining off of's burn header hash and vtxindex: - let parent_snapshot = - SortitionDB::get_block_snapshot_consensus(burn_db.conn(), mine_tip_ch) - .expect("Failed to look up block's parent snapshot") - .expect("Failed to look up block's parent snapshot"); + Ok(true) + } - let parent_sortition_id = &parent_snapshot.sortition_id; - let parent_winning_vtxindex = - SortitionDB::get_block_winning_vtxindex(burn_db.conn(), parent_sortition_id) - .expect("SortitionDB failure.") - .ok_or_else(|| { - error!( - "Failed to find winning vtx index for the parent sortition"; - "parent_sortition_id" => %parent_sortition_id + /// Process a new block we mined + /// Return true if we processed it + /// Return false if we timed out waiting for it + /// Return Err(..) if we couldn't reach the chains coordiantor thread + fn process_new_block(&self) -> Result { + // process the block + if !self.globals.coord_comms.announce_new_stacks_block() { + return Err(Error::CoordinatorClosed); + } + let stacks_blocks_processed = self.globals.coord_comms.get_stacks_blocks_processed(); + if !self + .globals + .coord_comms + .wait_for_stacks_blocks_processed(stacks_blocks_processed, u64::MAX) + { + // basically unreachable + warn!("ChainsCoordinator timed out while waiting for new stacks block to be processed"); + return Ok(false); + } + Ok(true) + } + + /// Given the two miner tips, return the newer tip. + fn pick_higher_tip(cur: Option, new: Option) -> Option { + match (cur, new) { + (Some(cur), None) => Some(cur), + (None, Some(new)) => Some(new), + (None, None) => None, + (Some(cur), Some(new)) => { + if cur.stacks_height < new.stacks_height { + Some(new) + } else if cur.stacks_height > new.stacks_height { + Some(cur) + } else if cur.burn_height < new.burn_height { + Some(new) + } else if cur.burn_height > new.burn_height { + Some(cur) + } else { + assert_eq!(cur, new); + Some(cur) + } + } + } + } + + /// Given the pointer to a recently-discovered tenure, see if we won the sortition and if so, + /// store it, preprocess it, and forward it to our neighbors. All the while, keep track of the + /// latest Stacks mining tip we have produced so far. + /// + /// Returns (true, Some(tip)) if the coordinator is still running and we have a miner tip to + /// build on (i.e. we won this last sortition). + /// + /// Returns (true, None) if the coordinator is still running, and we do NOT have a miner tip to + /// build on (i.e. we did not win this last sortition) + /// + /// Returns (false, _) if the coordinator could not be reached, meaning this thread should die. + pub fn process_one_tenure( + &mut self, + consensus_hash: ConsensusHash, + block_header_hash: BlockHeaderHash, + burn_hash: BurnchainHeaderHash, + ) -> (bool, Option) { + let mut miner_tip = None; + let sn = + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &consensus_hash) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown consensus hash"); + + debug!( + "Relayer: Process tenure {}/{} in {} burn height {}", + &consensus_hash, &block_header_hash, &burn_hash, sn.block_height + ); + + if let Some((last_mined_block_data, microblock_privkey)) = + self.last_mined_blocks.remove(&block_header_hash) + { + // we won! + let AssembledAnchorBlock { + parent_consensus_hash, + anchored_block: mined_block, + my_burn_hash: mined_burn_hash, + attempt: _, + .. + } = last_mined_block_data; + + let reward_block_height = mined_block.header.total_work.work + MINER_REWARD_MATURITY; + info!( + "Relayer: Won sortition! Mining reward will be received in {} blocks (block #{})", + MINER_REWARD_MATURITY, reward_block_height + ); + debug!("Relayer: Won sortition!"; + "stacks_header" => %block_header_hash, + "burn_hash" => %mined_burn_hash, + ); + + increment_stx_blocks_mined_counter(); + let has_new_data = match self.accept_winning_tenure( + &mined_block, + &consensus_hash, + &parent_consensus_hash, + ) { + Ok(accepted) => accepted, + Err(ChainstateError::ChannelClosed(_)) => { + warn!("Coordinator stopped, stopping relayer thread..."); + return (false, None); + } + Err(e) => { + warn!("Error processing my tenure, bad block produced: {}", e); + warn!( + "Bad block"; + "stacks_header" => %block_header_hash, + "data" => %to_hex(&mined_block.serialize_to_vec()), ); - Error::WinningVtxNotFoundForChainTip - })?; + return (true, None); + } + }; - let parent_block = SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) - .expect("SortitionDB failure.") - .ok_or_else(|| { - error!( - "Failed to find block snapshot for the parent sortition"; - "parent_sortition_id" => %parent_sortition_id + // advertize _and_ push blocks for now + let blocks_available = Relayer::load_blocks_available_data( + self.sortdb_ref(), + vec![consensus_hash.clone()], + ) + .expect("Failed to obtain block information for a block we mined."); + + let block_data = { + let mut bd = HashMap::new(); + bd.insert(consensus_hash.clone(), mined_block.clone()); + bd + }; + + if let Err(e) = self.relayer.advertize_blocks(blocks_available, block_data) { + warn!("Failed to advertise new block: {}", e); + } + + let snapshot = SortitionDB::get_block_snapshot_consensus( + self.sortdb_ref().conn(), + &consensus_hash, + ) + .expect("Failed to obtain snapshot for block") + .expect("Failed to obtain snapshot for block"); + + if !snapshot.pox_valid { + warn!( + "Snapshot for {} is no longer valid; discarding {}...", + &consensus_hash, + &mined_block.block_hash() ); - Error::SnapshotNotFoundForChainTip - })?; + miner_tip = Self::pick_higher_tip(miner_tip, None); + } else { + let ch = snapshot.consensus_hash.clone(); + let bh = mined_block.block_hash(); + let height = mined_block.header.total_work.work; - // don't mine off of an old burnchain block - let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) - .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + if let Err(e) = self + .relayer + .broadcast_block(snapshot.consensus_hash, mined_block) + { + warn!("Failed to push new block: {}", e); + } - if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { - info!( - "New canonical burn chain tip detected. Will not try to mine."; - "new_consensus_hash" => %burn_chain_tip.consensus_hash, - "old_consensus_hash" => %check_burn_block.consensus_hash, - "new_burn_height" => burn_chain_tip.block_height, - "old_burn_height" => check_burn_block.block_height + // proceed to mine microblocks + miner_tip = Some(MinerTip::new( + ch, + bh, + microblock_privkey, + height, + snapshot.block_height, + )); + } + + if has_new_data { + // process the block, now that we've advertized it + if let Err(Error::CoordinatorClosed) = self.process_new_block() { + // coordiantor stopped + return (false, None); + } + } + } else { + debug!( + "Relayer: Did not win sortition in {}, winning block was {}/{}", + &burn_hash, &consensus_hash, &block_header_hash ); - return Err(Error::BurnchainTipChanged); + miner_tip = None; } - debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", - &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, - mine_tip_ch, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + (true, miner_tip) + } - let coinbase_nonce = { - let principal = miner_address.into(); - let account = chain_state - .with_read_only_clarity_tx( - &burn_db.index_conn(), - &StacksBlockHeader::make_index_block_hash(mine_tip_ch, mine_tip_bh), - |conn| StacksChainState::get_account(conn, &principal), - ) - .expect(&format!( - "BUG: stacks tip block {}/{} no longer exists after we queried it", - mine_tip_ch, mine_tip_bh + /// Process all new tenures that we're aware of. + /// Clear out stale tenure artifacts as well. + /// Update the miner tip if we won the highest tenure (or clear it if we didn't). + /// If we won any sortitions, send the block and microblock data to the p2p thread. + /// Return true if we can still continue to run; false if not. + pub fn process_new_tenures( + &mut self, + consensus_hash: ConsensusHash, + burn_hash: BurnchainHeaderHash, + block_header_hash: BlockHeaderHash, + ) -> bool { + let mut miner_tip = None; + + // process all sortitions between the last-processed consensus hash and this + // one. ProcessTenure(..) messages can get lost. + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + .expect("FATAL: failed to read current burnchain tip"); + + let tenures = if let Some(last_ch) = self.last_tenure_consensus_hash.as_ref() { + let mut tenures = vec![]; + let last_sn = + SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), &last_ch) + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: unknown prior consensus hash"); + + debug!( + "Relayer: query tenures between burn block heights {} and {}", + last_sn.block_height + 1, + burn_tip.block_height + 1 + ); + for block_to_process in (last_sn.block_height + 1)..(burn_tip.block_height + 1) { + let sn = { + let ic = self.sortdb_ref().index_conn(); + SortitionDB::get_ancestor_snapshot( + &ic, + block_to_process, + &burn_tip.sortition_id, + ) + .expect("FATAL: failed to read ancestor snapshot from sortition DB") + .expect("Failed to find block in fork processed by burnchain indexer") + }; + if !sn.sortition { + debug!( + "Relayer: Skipping tenure {}/{} at burn hash/height {},{} -- no sortition", + &sn.consensus_hash, + &sn.winning_stacks_block_hash, + &sn.burn_header_hash, + sn.block_height + ); + continue; + } + debug!( + "Relayer: Will process tenure {}/{} at burn hash/height {},{}", + &sn.consensus_hash, + &sn.winning_stacks_block_hash, + &sn.burn_header_hash, + sn.block_height + ); + tenures.push(( + sn.consensus_hash, + sn.burn_header_hash, + sn.winning_stacks_block_hash, )); - account.nonce + } + tenures + } else { + // first-ever tenure processed + vec![(consensus_hash, burn_hash, block_header_hash)] }; - Ok(MiningTenureInformation { - stacks_parent_header: stacks_tip_header, - parent_consensus_hash: mine_tip_ch.clone(), - parent_block_burn_height: parent_block.block_height, - parent_block_total_burn: parent_block.total_burn, - parent_winning_vtxindex, - coinbase_nonce, - }) - } + debug!("Relayer: will process {} tenures", &tenures.len()); + let num_tenures = tenures.len(); + if num_tenures > 0 { + // temporarily halt mining + signal_mining_blocked(self.globals.get_miner_status()); + } - /// Return the assembled anchor block info and microblock private key on success. - /// Return None if we couldn't build a block for whatever reason - fn relayer_run_tenure( - config: &Config, - registered_key: RegisteredKey, - chain_state: &mut StacksChainState, - burn_db: &mut SortitionDB, - burnchain: &Burnchain, - burn_block: BlockSnapshot, - keychain: &mut Keychain, - mem_pool: &mut MemPoolDB, - burn_fee_cap: u64, - bitcoin_controller: &mut BitcoinRegtestController, - last_mined_blocks: &Vec<&AssembledAnchorBlock>, - event_dispatcher: &EventDispatcher, - ) -> Option<(AssembledAnchorBlock, Secp256k1PrivateKey)> { - let MiningTenureInformation { - mut stacks_parent_header, - parent_consensus_hash, - parent_block_burn_height, - parent_block_total_burn, - parent_winning_vtxindex, - coinbase_nonce, - } = if let Some(stacks_tip) = chain_state - .get_stacks_chain_tip(burn_db) - .expect("FATAL: could not query chain tip") - { - let miner_address = keychain.origin_address(config.is_mainnet()).unwrap(); - Self::get_mining_tenure_information( - chain_state, - burn_db, - &burn_block, - miner_address, - &stacks_tip.consensus_hash, - &stacks_tip.anchored_block_hash, + for (consensus_hash, burn_hash, block_header_hash) in tenures.into_iter() { + self.miner_thread_try_join(); + let (continue_thread, new_miner_tip) = + self.process_one_tenure(consensus_hash, block_header_hash, burn_hash); + if !continue_thread { + // coordinator thread hang-up + return false; + } + miner_tip = Self::pick_higher_tip(miner_tip, new_miner_tip); + + // clear all blocks up to this consensus hash + let this_burn_tip = SortitionDB::get_block_snapshot_consensus( + self.sortdb_ref().conn(), + &consensus_hash, ) - .ok()? - } else { - debug!("No Stacks chain tip known, will return a genesis block"); - let (network, _) = config.burnchain.get_bitcoin_network(); - let burnchain_params = - BurnchainParameters::from_params(&config.burnchain.chain, &network) - .expect("Bitcoin network unsupported"); + .expect("FATAL: failed to query sortition DB") + .expect("FATAL: no snapshot for consensus hash"); - let chain_tip = ChainTip::genesis( - &burnchain_params.first_block_hash, - burnchain_params.first_block_height.into(), - burnchain_params.first_block_timestamp.into(), + let old_last_mined_blocks = + mem::replace(&mut self.last_mined_blocks, MinedBlocks::new()); + self.last_mined_blocks = + Self::clear_stale_mined_blocks(this_burn_tip.block_height, old_last_mined_blocks); + + // update last-tenure pointer + self.last_tenure_consensus_hash = Some(consensus_hash); + } + + if let Some(miner_tip) = miner_tip.as_ref() { + debug!( + "Relayer: Microblock miner tip is now {}/{} ({})", + miner_tip.consensus_hash, + miner_tip.block_hash, + StacksBlockHeader::make_index_block_hash( + &miner_tip.consensus_hash, + &miner_tip.block_hash + ) ); - MiningTenureInformation { - stacks_parent_header: chain_tip.metadata, - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - parent_block_burn_height: 0, - parent_block_total_burn: 0, - parent_winning_vtxindex: 0, - coinbase_nonce: 0, + self.with_chainstate(|relayer_thread, sortdb, chainstate, _mempool| { + Relayer::refresh_unconfirmed(chainstate, sortdb); + relayer_thread.globals.send_unconfirmed_txs(chainstate); + }); + } + + // update state for microblock mining + self.setup_microblock_mining_state(miner_tip); + + // resume mining if we blocked it + if num_tenures > 0 { + if self.miner_tip.is_some() { + // we won the highest tenure + if self.config.node.mine_microblocks { + // mine a microblock first + self.mined_stacks_block = true; + } else { + // mine a Stacks block first -- we won't build microblocks + self.mined_stacks_block = false; + } + } else { + // mine a Stacks block first -- we didn't win + self.mined_stacks_block = false; } - }; + signal_mining_ready(self.globals.get_miner_status()); + } + true + } - // has the tip changed from our previously-mined block for this epoch? - let attempt = if last_mined_blocks.len() <= 1 { - // always mine if we've not mined a block for this epoch yet, or - // if we've mined just one attempt, unconditionally try again (so we - // can use `subsequent_miner_time_ms` in this attempt) - if last_mined_blocks.len() == 1 { - debug!("Have only attempted one block; unconditionally trying again"); + /// Update the miner tip with a new tip. If it's changed, then clear out the microblock stream + /// cost since we won't be mining it anymore. + fn setup_microblock_mining_state(&mut self, new_miner_tip: Option) { + // update state + let my_miner_tip = std::mem::replace(&mut self.miner_tip, None); + let best_tip = Self::pick_higher_tip(my_miner_tip.clone(), new_miner_tip.clone()); + if best_tip == new_miner_tip && best_tip != my_miner_tip { + // tip has changed + debug!( + "Relayer: Best miner tip went from {:?} to {:?}", + &my_miner_tip, &new_miner_tip + ); + self.microblock_stream_cost = ExecutionCost::zero(); + } + self.miner_tip = best_tip; + } + + /// Try to resume microblock mining if we don't need to build an anchored block + fn try_resume_microblock_mining(&mut self) { + if self.miner_tip.is_some() { + // we won the highest tenure + if self.config.node.mine_microblocks { + // mine a microblock first + self.mined_stacks_block = true; + } else { + // mine a Stacks block first -- we won't build microblocks + self.mined_stacks_block = false; } - last_mined_blocks.len() as u64 + 1 } else { - let mut best_attempt = 0; + // mine a Stacks block first -- we didn't win + self.mined_stacks_block = false; + } + } + + /// Constructs and returns a LeaderKeyRegisterOp out of the provided params + fn inner_generate_leader_key_register_op( + address: StacksAddress, + vrf_public_key: VRFPublicKey, + consensus_hash: &ConsensusHash, + ) -> BlockstackOperationType { + BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { + public_key: vrf_public_key, + memo: vec![], + address, + consensus_hash: consensus_hash.clone(), + vtxindex: 0, + txid: Txid([0u8; 32]), + block_height: 0, + burn_header_hash: BurnchainHeaderHash::zero(), + }) + } + + /// Create and broadcast a VRF public key registration transaction. + /// Returns true if we succeed in doing so; false if not. + pub fn rotate_vrf_and_register(&mut self, burn_block: &BlockSnapshot) -> bool { + let is_mainnet = self.config.is_mainnet(); + let vrf_pk = self.keychain.rotate_vrf_keypair(burn_block.block_height); + let burnchain_tip_consensus_hash = &burn_block.consensus_hash; + let op = Self::inner_generate_leader_key_register_op( + self.keychain.get_address(is_mainnet), + vrf_pk, + burnchain_tip_consensus_hash, + ); + + let mut one_off_signer = self.keychain.generate_op_signer(); + self.bitcoin_controller + .submit_operation(op, &mut one_off_signer, 1) + } + + /// Remove any block state we've mined for the given burnchain height. + /// Return the filtered `last_mined_blocks` + fn clear_stale_mined_blocks(burn_height: u64, last_mined_blocks: MinedBlocks) -> MinedBlocks { + let mut ret = HashMap::new(); + for (stacks_bhh, (assembled_block, microblock_privkey)) in last_mined_blocks.into_iter() { + if assembled_block.my_block_height < burn_height { + debug!( + "Stale mined block: {} (as of {},{})", + &stacks_bhh, &assembled_block.my_burn_hash, assembled_block.my_block_height + ); + continue; + } debug!( - "Consider {} in-flight Stacks tip(s)", - &last_mined_blocks.len() + "Mined block in-flight: {} (as of {},{})", + &stacks_bhh, &assembled_block.my_burn_hash, assembled_block.my_block_height ); - for prev_block in last_mined_blocks.iter() { - debug!( - "Consider in-flight block {} on Stacks tip {}/{} in {} with {} txs", - &prev_block.anchored_block.block_hash(), - &prev_block.parent_consensus_hash, - &prev_block.anchored_block.header.parent_block, - &prev_block.my_burn_hash, - &prev_block.anchored_block.txs.len() + ret.insert(stacks_bhh, (assembled_block, microblock_privkey)); + } + ret + } + + /// Create the block miner thread state. + /// Only proceeds if all of the following are true: + /// * the miner is not blocked + /// * last_burn_block corresponds to the canonical sortition DB's chain tip + /// * the time of issuance is sufficiently recent + /// * there are no unprocessed stacks blocks in the staging DB + /// * the relayer has already tried a download scan that included this sortition (which, if a + /// block was found, would have placed it into the staging DB and marked it as + /// unprocessed) + /// * a miner thread is not running already + fn create_block_miner( + &mut self, + registered_key: RegisteredKey, + last_burn_block: BlockSnapshot, + issue_timestamp_ms: u128, + ) -> Option { + if self + .globals + .get_miner_status() + .lock() + .expect("FATAL: mutex poisoned") + .is_blocked() + { + debug!( + "Relayer: miner is blocked as of {}; cannot mine Stacks block at this time", + &last_burn_block.burn_header_hash + ); + return None; + } + + // start a new tenure + if let Some(cur_sortition) = self.globals.get_last_sortition() { + if last_burn_block.sortition_id != cur_sortition.sortition_id { + debug!( + "Relayer: Drop stale RunTenure for {}: current sortition is for {}", + &last_burn_block.burn_header_hash, &cur_sortition.burn_header_hash ); + self.globals.counters.bump_missed_tenures(); + return None; + } + } - if prev_block.anchored_block.txs.len() == 1 && prev_block.attempt == 1 { - // Don't let the fact that we've built an empty block during this sortition - // prevent us from trying again. - best_attempt = 1; - continue; - } - if prev_block.parent_consensus_hash == parent_consensus_hash - && prev_block.my_burn_hash == burn_block.burn_header_hash - && prev_block.anchored_block.header.parent_block - == stacks_parent_header.anchored_header.block_hash() - { - // the anchored chain tip hasn't changed since we attempted to build a block. - // But, have discovered any new microblocks worthy of being mined? - if let Ok(Some(stream)) = - StacksChainState::load_descendant_staging_microblock_stream( - chain_state.db(), - &StacksBlockHeader::make_index_block_hash( - &prev_block.parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - ), - 0, - u16::MAX, - ) - { - if (prev_block.anchored_block.header.parent_microblock - == BlockHeaderHash([0u8; 32]) - && stream.len() == 0) - || (prev_block.anchored_block.header.parent_microblock - != BlockHeaderHash([0u8; 32]) - && stream.len() - <= (prev_block.anchored_block.header.parent_microblock_sequence - as usize) - + 1) - { - // the chain tip hasn't changed since we attempted to build a block. Use what we - // already have. - debug!("Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {})", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + let burn_header_hash = last_burn_block.burn_header_hash.clone(); + let burn_chain_sn = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - return None; - } else { - // there are new microblocks! - // TODO: only consider rebuilding our anchored block if we (a) have - // time, and (b) the new microblocks are worth more than the new BTC - // fee minus the old BTC fee - debug!("Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {})", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + let burn_chain_tip = burn_chain_sn.burn_header_hash.clone(); - best_attempt = cmp::max(best_attempt, prev_block.attempt); - } - } else { - // no microblock stream to confirm, and the stacks tip hasn't changed - debug!("Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", - &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.my_burn_hash, parent_block_burn_height); + if burn_chain_tip != burn_header_hash { + debug!( + "Relayer: Drop stale RunTenure for {}: current sortition is for {}", + &burn_header_hash, &burn_chain_tip + ); + self.globals.counters.bump_missed_tenures(); + return None; + } - return None; - } - } else { - if burn_block.burn_header_hash == prev_block.my_burn_hash { - // only try and re-mine if there was no sortition since the last chain tip - debug!("Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", - parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.my_burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); - best_attempt = cmp::max(best_attempt, prev_block.attempt); - } else { - debug!("Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &burn_block.burn_header_hash, burn_block.block_height, &prev_block.my_burn_hash); - } + if let Some(stacks_tip) = self + .chainstate_ref() + .get_stacks_chain_tip(self.sortdb_ref()) + .expect("FATAL: could not query chain tip") + { + let has_unprocessed = StacksChainState::has_higher_unprocessed_blocks( + self.chainstate_ref().db(), + stacks_tip.height, + ) + .expect("FATAL: failed to query staging blocks"); + if has_unprocessed { + debug!( + "Relayer: Drop RunTenure for {} because there are pending blocks", + &burn_header_hash + ); + return None; + } + } + + if burn_chain_sn.block_height != self.last_network_block_height + || !self.has_waited_for_latest_blocks() + { + debug!("Relayer: network has not had a chance to process in-flight blocks ({} != {} || !({}))", + burn_chain_sn.block_height, self.last_network_block_height, self.debug_waited_for_latest_blocks()); + return None; + } + + let tenure_cooldown = if self.config.node.mine_microblocks { + self.config.node.wait_time_for_microblocks as u128 + } else { + 0 + }; + + // no burnchain change, so only re-run block tenure every so often in order + // to give microblocks a chance to collect + if issue_timestamp_ms < self.last_tenure_issue_time + tenure_cooldown { + debug!("Relayer: will NOT run tenure since issuance at {} is too fresh (wait until {} + {} = {})", + issue_timestamp_ms / 1000, self.last_tenure_issue_time / 1000, tenure_cooldown / 1000, (self.last_tenure_issue_time + tenure_cooldown) / 1000); + return None; + } + + // if we're still mining on this burn block, then do nothing + if self.miner_thread.is_some() { + debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {}", &burn_chain_tip); + return None; + } + + debug!( + "Relayer: Spawn tenure thread"; + "height" => last_burn_block.block_height, + "burn_header_hash" => %burn_header_hash, + ); + + let miner_thread_state = + BlockMinerThread::from_relayer_thread(self, registered_key, last_burn_block); + Some(miner_thread_state) + } + + /// Try to start up a block miner thread with this given VRF key and current burnchain tip. + /// Returns true if the thread was started; false if it was not (for any reason) + pub fn block_miner_thread_try_start( + &mut self, + registered_key: RegisteredKey, + last_burn_block: BlockSnapshot, + issue_timestamp_ms: u128, + ) -> bool { + if !self.miner_thread_try_join() { + return false; + } + + if !self.config.node.mock_mining { + // mock miner can't mine microblocks yet, so don't stop it from trying multiple + // anchored blocks + if self.mined_stacks_block && self.config.node.mine_microblocks { + debug!("Relayer: mined a Stacks block already; waiting for microblock miner"); + return false; + } + } + + let mut miner_thread_state = + match self.create_block_miner(registered_key, last_burn_block, issue_timestamp_ms) { + Some(state) => state, + None => { + return false; } + }; + + if let Ok(miner_handle) = thread::Builder::new() + .name(format!("miner-block-{}", self.local_peer.data_url)) + .spawn(move || miner_thread_state.run_tenure()) + .map_err(|e| { + error!("Relayer: Failed to start tenure thread: {:?}", &e); + e + }) + { + self.miner_thread = Some(miner_handle); + } + + true + } + + /// See if we should run a microblock tenure now. + /// Return true if so; false if not + fn can_run_microblock_tenure(&mut self) -> bool { + if !self.config.node.mine_microblocks { + // not enabled + test_debug!("Relayer: not configured to mine microblocks"); + return false; + } + if !self.miner_thread_try_join() { + // already running (for an anchored block or microblock) + test_debug!("Relayer: miner thread already running so cannot mine microblock"); + return false; + } + if self.microblock_deadline > get_epoch_time_ms() { + debug!( + "Relayer: Too soon to start a microblock tenure ({} > {})", + self.microblock_deadline, + get_epoch_time_ms() + ); + return false; + } + if self.miner_tip.is_none() { + debug!("Relayer: did not win last block, so cannot mine microblocks"); + return false; + } + if !self.mined_stacks_block { + // have not tried to mine a stacks block yet that confirms previously-mined unconfirmed + // state (or have not tried to mine a new Stacks block yet for this active tenure); + debug!("Relayer: Did not mine a block yet, so will not mine a microblock"); + return false; + } + if self.globals.get_last_sortition().is_none() { + debug!("Relayer: no first sortition yet"); + return false; + } + + // go ahead + true + } + + /// Start up a microblock miner thread if we can: + /// * no miner thread must be running already + /// * the miner must not be blocked + /// * we must have won the sortition on the stacks chain tip + /// Returns true if the thread was started; false if not. + pub fn microblock_miner_thread_try_start(&mut self) -> bool { + let miner_tip = match self.miner_tip.as_ref() { + Some(tip) => tip.clone(), + None => { + debug!("Relayer: did not win last block, so cannot mine microblocks"); + return false; } - best_attempt + 1 }; - // Generates a proof out of the sortition hash provided in the params. - let vrf_proof = match keychain.generate_proof( - ®istered_key.vrf_public_key, - burn_block.sortition_hash.as_bytes(), - ) { - Some(vrfp) => vrfp, + let burnchain_tip = match self.globals.get_last_sortition() { + Some(sn) => sn, None => { - // Try to recover a key registered in a former session. - // registered_key.block_height gives us a pointer to the height of the block - // holding the key register op, but the VRF was derived using the height of one - // of the parents blocks. - let _ = keychain.rotate_vrf_keypair(registered_key.block_height - 1); - match keychain.generate_proof( - ®istered_key.vrf_public_key, - burn_block.sortition_hash.as_bytes(), - ) { - Some(vrfp) => vrfp, - None => { - error!( - "Failed to generate proof with {:?}", - ®istered_key.vrf_public_key - ); - return None; - } - } + debug!("Relayer: no first sortition yet"); + return false; } }; debug!( - "Generated VRF Proof: {} over {} with key {}", - vrf_proof.to_hex(), - &burn_block.sortition_hash, - ®istered_key.vrf_public_key.to_hex() + "Relayer: mined Stacks block {}/{} so can mine microblocks", + &miner_tip.consensus_hash, &miner_tip.block_hash ); - // Generates a new secret key for signing the trail of microblocks - // of the upcoming tenure. - let microblock_secret_key = if attempt > 1 { - match keychain.get_microblock_key() { - Some(k) => k, - None => { - error!( - "Failed to obtain microblock key for mining attempt"; - "attempt" => %attempt - ); - return None; - } + if !self.miner_thread_try_join() { + // already running (for an anchored block or microblock) + debug!("Relayer: miner thread already running so cannot mine microblock"); + return false; + } + if self + .globals + .get_miner_status() + .lock() + .expect("FATAL: mutex poisoned") + .is_blocked() + { + debug!( + "Relayer: miner is blocked as of {}; cannot mine microblock at this time", + &burnchain_tip.burn_header_hash + ); + self.globals.counters.set_microblocks_processed(0); + return false; + } + + let parent_consensus_hash = &miner_tip.consensus_hash; + let parent_block_hash = &miner_tip.block_hash; + + debug!( + "Relayer: Run microblock tenure for {}/{}", + parent_consensus_hash, parent_block_hash + ); + + let mut microblock_thread_state = match MicroblockMinerThread::from_relayer_thread(self) { + Some(ts) => ts, + None => { + return false; } - } else { - keychain.rotate_microblock_keypair(burn_block.block_height) }; - let mblock_pubkey_hash = - Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_secret_key)); - let coinbase_tx = inner_generate_coinbase_tx( - keychain, - coinbase_nonce, - config.is_mainnet(), - config.burnchain.chain_id, - ); + if let Ok(miner_handle) = thread::Builder::new() + .name(format!("miner-microblock-{}", self.local_peer.data_url)) + .spawn(move || { + Some(MinerThreadResult::Microblock( + microblock_thread_state.try_mine_microblock(miner_tip.clone()), + miner_tip, + )) + }) + .map_err(|e| { + error!("Relayer: Failed to start tenure thread: {:?}", &e); + e + }) + { + // thread started! + self.miner_thread = Some(miner_handle); + self.microblock_deadline = + get_epoch_time_ms() + (self.config.node.microblock_frequency as u128); + } + + true + } + + /// Inner body of Self::miner_thread_try_join + fn inner_miner_thread_try_join( + &mut self, + thread_handle: JoinHandle>, + ) -> Option>> { + // tenure run already in progress; try and join + if !thread_handle.is_finished() { + debug!("Relayer: RunTenure thread not finished / is in-progress"); + return Some(thread_handle); + } + let last_mined_block_opt = thread_handle + .join() + .expect("FATAL: failed to join miner thread"); + if let Some(miner_result) = last_mined_block_opt { + match miner_result { + MinerThreadResult::Block( + last_mined_block, + modified_keychain, + microblock_privkey, + ongoing_commit_opt, + ) => { + // finished mining a block + if BlockMinerThread::find_inflight_mined_blocks( + last_mined_block.my_block_height, + &self.last_mined_blocks, + ) + .len() + == 0 + { + // first time we've mined a block in this burnchain block + self.globals.counters.bump_blocks_processed(); + } - // find the longest microblock tail we can build off of - let microblock_info_opt = - match StacksChainState::load_descendant_staging_microblock_stream_with_poison( - chain_state.db(), - &StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - ), - 0, - u16::MAX, - ) { - Ok(x) => { - let num_mblocks = x.as_ref().map(|(mblocks, ..)| mblocks.len()).unwrap_or(0); debug!( - "Loaded {} microblocks descending from {}/{}", - num_mblocks, - &parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash() + "Relayer: RunTenure thread joined; got Stacks block {}", + &last_mined_block.anchored_block.block_hash() ); - x - } - Err(e) => { - warn!( - "Failed to load descendant microblock stream from {}/{}: {:?}", - &parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - &e + + let bhh = last_mined_block.my_burn_hash.clone(); + let orig_bhh = last_mined_block.orig_burn_hash.clone(); + let tenure_begin = last_mined_block.tenure_begin; + + // keep our keychain up-to-date with the miner's progress + self.keychain = modified_keychain; + + self.last_mined_blocks.insert( + last_mined_block.anchored_block.block_hash(), + (last_mined_block, microblock_privkey), ); - None + + self.last_tenure_issue_time = get_epoch_time_ms(); + self.bitcoin_controller + .set_ongoing_commit(ongoing_commit_opt); + + debug!( + "Relayer: RunTenure finished at {} (in {}ms) targeting {} (originally {})", + self.last_tenure_issue_time, + self.last_tenure_issue_time.saturating_sub(tenure_begin), + &bhh, + &orig_bhh + ); + + // this stacks block confirms all in-flight microblocks we know about, + // including the ones we produced. + self.mined_stacks_block = true; } - }; + MinerThreadResult::Microblock(microblock_result, miner_tip) => { + // finished mining a microblock + match microblock_result { + Ok(Some((next_microblock, new_cost))) => { + // apply it + let microblock_hash = next_microblock.block_hash(); + + let (processed_unconfirmed_state, num_mblocks) = self.with_chainstate( + |_relayer_thread, sortdb, chainstate, _mempool| { + let processed_unconfirmed_state = + Relayer::refresh_unconfirmed(chainstate, sortdb); + let num_mblocks = chainstate + .unconfirmed_state + .as_ref() + .map(|ref unconfirmed| unconfirmed.num_microblocks()) + .unwrap_or(0); + + (processed_unconfirmed_state, num_mblocks) + }, + ); - if let Some((ref microblocks, ref poison_opt)) = µblock_info_opt { - if let Some(ref tail) = microblocks.last() { - debug!( - "Confirm microblock stream tailed at {} (seq {})", - &tail.block_hash(), - tail.header.sequence - ); + info!( + "Mined one microblock: {} seq {} txs {} (total processed: {})", + µblock_hash, + next_microblock.header.sequence, + next_microblock.txs.len(), + num_mblocks + ); + self.globals.counters.set_microblocks_processed(num_mblocks); + + let parent_index_block_hash = StacksBlockHeader::make_index_block_hash( + &miner_tip.consensus_hash, + &miner_tip.block_hash, + ); + self.event_dispatcher.process_new_microblocks( + parent_index_block_hash, + processed_unconfirmed_state, + ); + + // send it off + if let Err(e) = self.relayer.broadcast_microblock( + &miner_tip.consensus_hash, + &miner_tip.block_hash, + next_microblock, + ) { + error!( + "Failure trying to broadcast microblock {}: {}", + microblock_hash, e + ); + } + + self.last_microblock_tenure_time = get_epoch_time_ms(); + self.microblock_stream_cost = new_cost; + + // synchronise state + self.with_chainstate( + |relayer_thread, _sortdb, chainstate, _mempool| { + relayer_thread.globals.send_unconfirmed_txs(chainstate); + }, + ); + + // have not yet mined a stacks block that confirms this microblock, so + // do that on the next run + self.mined_stacks_block = false; + } + Ok(None) => { + debug!("Relayer: did not mine microblock in this tenure"); + + // switch back to block mining + self.mined_stacks_block = false; + } + Err(e) => { + warn!("Relayer: Failed to mine next microblock: {:?}", &e); + + // switch back to block mining + self.mined_stacks_block = false; + } + } + } } + } else { + // if we tried and failed to make an anchored block (e.g. because there's nothing to + // do), then resume microblock mining + if !self.mined_stacks_block { + self.try_resume_microblock_mining(); + } + } + None + } - // try and confirm as many microblocks as we can (but note that the stream itself may - // be too long; we'll try again if that happens). - stacks_parent_header.microblock_tail = - microblocks.last().clone().map(|blk| blk.header.clone()); + /// Try to join with the miner thread. If we succeed, join the thread and return true. + /// Otherwise, if the thread is still running, return false; + /// Updates internal state gleaned from the miner, such as: + /// * new stacks block data + /// * new keychain state + /// * new metrics + /// * new unconfirmed state + /// Returns true if joined; false if not. + pub fn miner_thread_try_join(&mut self) -> bool { + if let Some(thread_handle) = self.miner_thread.take() { + let new_thread_handle = self.inner_miner_thread_try_join(thread_handle); + self.miner_thread = new_thread_handle; + } + self.miner_thread.is_none() + } + + /// Top-level dispatcher + pub fn handle_directive(&mut self, directive: RelayerDirective) -> bool { + debug!("Relayer: received next directive"); + let continue_running = match directive { + RelayerDirective::HandleNetResult(net_result) => { + self.process_network_result(net_result); + true + } + RelayerDirective::RegisterKey(last_burn_block) => { + self.rotate_vrf_and_register(&last_burn_block); + self.globals.counters.bump_blocks_processed(); + true + } + RelayerDirective::ProcessTenure(consensus_hash, burn_hash, block_header_hash) => { + self.process_new_tenures(consensus_hash, burn_hash, block_header_hash) + } + RelayerDirective::RunTenure(registered_key, last_burn_block, issue_timestamp_ms) => { + self.block_miner_thread_try_start( + registered_key, + last_burn_block, + issue_timestamp_ms, + ); + true + } + RelayerDirective::Exit => false, + }; + if !continue_running { + return false; + } + + // see if we need to run a microblock tenure + if self.can_run_microblock_tenure() { + self.microblock_miner_thread_try_start(); + } + continue_running + } +} + +impl ParentStacksBlockInfo { + /// Determine where in the set of forks to attempt to mine the next anchored block. + /// `mine_tip_ch` and `mine_tip_bhh` identify the parent block on top of which to mine. + /// `check_burn_block` identifies what we believe to be the burn chain's sortition history tip. + /// This is used to mitigate (but not eliminate) a TOCTTOU issue with mining: the caller's + /// conception of the sortition history tip may have become stale by the time they call this + /// method, in which case, mining should *not* happen (since the block will be invalid). + pub fn lookup( + chain_state: &mut StacksChainState, + burn_db: &mut SortitionDB, + check_burn_block: &BlockSnapshot, + miner_address: StacksAddress, + mine_tip_ch: &ConsensusHash, + mine_tip_bh: &BlockHeaderHash, + ) -> Result { + let stacks_tip_header = StacksChainState::get_anchored_block_header_info( + chain_state.db(), + &mine_tip_ch, + &mine_tip_bh, + ) + .unwrap() + .ok_or_else(|| { + error!( + "Could not mine new tenure, since could not find header for known chain tip."; + "tip_consensus_hash" => %mine_tip_ch, + "tip_stacks_block_hash" => %mine_tip_bh + ); + Error::HeaderNotFoundForChainTip + })?; + + // the stacks block I'm mining off of's burn header hash and vtxindex: + let parent_snapshot = + SortitionDB::get_block_snapshot_consensus(burn_db.conn(), mine_tip_ch) + .expect("Failed to look up block's parent snapshot") + .expect("Failed to look up block's parent snapshot"); + + let parent_sortition_id = &parent_snapshot.sortition_id; + let parent_winning_vtxindex = + SortitionDB::get_block_winning_vtxindex(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find winning vtx index for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + Error::WinningVtxNotFoundForChainTip + })?; + + let parent_block = SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find block snapshot for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + Error::SnapshotNotFoundForChainTip + })?; + + // don't mine off of an old burnchain block + let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) + .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + + if burn_chain_tip.consensus_hash != check_burn_block.consensus_hash { + info!( + "New canonical burn chain tip detected. Will not try to mine."; + "new_consensus_hash" => %burn_chain_tip.consensus_hash, + "old_consensus_hash" => %check_burn_block.consensus_hash, + "new_burn_height" => burn_chain_tip.block_height, + "old_burn_height" => check_burn_block.block_height + ); + return Err(Error::BurnchainTipChanged); + } + + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, + mine_tip_ch, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + + let coinbase_nonce = { + let principal = miner_address.into(); + let account = chain_state + .with_read_only_clarity_tx( + &burn_db.index_conn(), + &StacksBlockHeader::make_index_block_hash(mine_tip_ch, mine_tip_bh), + |conn| StacksChainState::get_account(conn, &principal), + ) + .expect(&format!( + "BUG: stacks tip block {}/{} no longer exists after we queried it", + mine_tip_ch, mine_tip_bh + )); + account.nonce + }; + + Ok(ParentStacksBlockInfo { + stacks_parent_header: stacks_tip_header, + parent_consensus_hash: mine_tip_ch.clone(), + parent_block_burn_height: parent_block.block_height, + parent_block_total_burn: parent_block.total_burn, + parent_winning_vtxindex, + coinbase_nonce, + }) + } +} + +/// Thread that runs the network state machine, handling both p2p and http requests. +pub struct PeerThread { + /// Node config + config: Config, + /// instance of the peer network. Made optional in order to trick the borrow checker. + net: Option, + /// handle to global inter-thread comms + globals: Globals, + /// how long to wait for network messages on each poll, in millis + poll_timeout: u64, + /// receiver for attachments discovered by the chains coordinator thread + attachments_rx: Receiver>, + /// handle to the sortition DB (optional so we can take/replace it) + sortdb: Option, + /// handle to the chainstate DB (optional so we can take/replace it) + chainstate: Option, + /// handle to the mempool DB (optional so we can take/replace it) + mempool: Option, + /// buffer of relayer commands with block data that couldn't be sent to the relayer just yet + /// (i.e. due to backpressure). We track this separately, instead of just using a bigger + /// channel, because we need to know when backpressure occurs in order to throttle the p2p + /// thread's downloader. + results_with_data: VecDeque, + /// total number of p2p state-machine passes so far. Used to signal when to download the next + /// reward cycle of blocks + num_p2p_state_machine_passes: u64, + /// total number of inventory state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_inv_sync_passes: u64, + /// total number of download state-machine passes so far. Used to signal when to download the + /// next reward cycle of blocks. + num_download_passes: u64, + /// last burnchain block seen in the PeerNetwork's chain view since the last run + last_burn_block_height: u64, +} + +impl PeerThread { + /// set up the mempool DB connection + fn connect_mempool_db(config: &Config) -> MemPoolDB { + // create estimators, metric instances for RPC handler + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + cost_estimator, + metric, + ) + .expect("Database failure opening mempool"); + + mempool + } + + /// Instantiate the p2p thread. + /// Binds the addresses in the config (which may panic if the port is blocked). + /// This is so the node will crash "early" before any new threads start if there's going to be + /// a bind error anyway. + pub fn new( + runloop: &RunLoop, + mut net: PeerNetwork, + attachments_rx: Receiver>, + ) -> PeerThread { + let config = runloop.config().clone(); + let mempool = Self::connect_mempool_db(&config); + let burn_db_path = config.get_burn_db_file_path(); + let stacks_chainstate_path = config.get_chainstate_path_str(); + + let sortdb = + SortitionDB::open(&burn_db_path, false).expect("FATAL: could not open sortition DB"); + let (chainstate, _) = StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &stacks_chainstate_path, + Some(config.node.get_marf_opts()), + ) + .expect("FATAL: could not open chainstate DB"); + + let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_bind + )); + let rpc_sock = config.node.rpc_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.rpc_bind + )); + + net.bind(&p2p_sock, &rpc_sock) + .expect("BUG: PeerNetwork could not bind or is already bound"); + + let poll_timeout = cmp::min(5000, config.miner.first_attempt_time_ms / 2); + + PeerThread { + config, + net: Some(net), + globals: runloop.get_globals(), + poll_timeout, + attachments_rx, + sortdb: Some(sortdb), + chainstate: Some(chainstate), + mempool: Some(mempool), + results_with_data: VecDeque::new(), + num_p2p_state_machine_passes: 0, + num_inv_sync_passes: 0, + num_download_passes: 0, + last_burn_block_height: 0, + } + } + + /// Do something with mutable references to the mempool, sortdb, and chainstate + /// Fools the borrow checker. + /// NOT COMPOSIBLE + fn with_chainstate(&mut self, func: F) -> R + where + F: FnOnce(&mut PeerThread, &mut SortitionDB, &mut StacksChainState, &mut MemPoolDB) -> R, + { + let mut sortdb = self.sortdb.take().expect("BUG: sortdb already taken"); + let mut chainstate = self + .chainstate + .take() + .expect("BUG: chainstate already taken"); + let mut mempool = self.mempool.take().expect("BUG: mempool already taken"); + + let res = func(self, &mut sortdb, &mut chainstate, &mut mempool); + + self.sortdb = Some(sortdb); + self.chainstate = Some(chainstate); + self.mempool = Some(mempool); + + res + } + + /// Get an immutable ref to the inner network. + /// DO NOT USE WITHIN with_network() + fn get_network(&self) -> &PeerNetwork { + self.net.as_ref().expect("BUG: did not replace net") + } + + /// Do something with mutable references to the network. + /// Fools the borrow checker. + /// NOT COMPOSIBLE. DO NOT CALL THIS OR get_network() IN func + fn with_network(&mut self, func: F) -> R + where + F: FnOnce(&mut PeerThread, &mut PeerNetwork) -> R, + { + let mut net = self.net.take().expect("BUG: net already taken"); + + let res = func(self, &mut net); + + self.net = Some(net); + res + } + + /// Run one pass of the p2p/http state machine + /// Return true if we should continue running passes; false if not + pub fn run_one_pass( + &mut self, + dns_client_opt: Option<&mut DNSClient>, + event_dispatcher: &EventDispatcher, + cost_estimator: &Box, + cost_metric: &Box, + fee_estimator: Option<&Box>, + ) -> bool { + // initial block download? + let ibd = self.globals.sync_comms.get_ibd(); + let download_backpressure = self.results_with_data.len() > 0; + let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { + // keep getting those blocks -- drive the downloader state-machine + debug!( + "P2P: backpressure: {}, more downloads: {}", + download_backpressure, + self.get_network().has_more_downloads() + ); + 1 + } else { + self.poll_timeout + }; + + let mut expected_attachments = match self.attachments_rx.try_recv() { + Ok(expected_attachments) => { + debug!("Atlas: received attachments: {:?}", &expected_attachments); + expected_attachments + } + _ => { + debug!("Atlas: attachment channel is empty"); + HashSet::new() + } + }; + + // move over unconfirmed state obtained from the relayer + self.with_chainstate(|p2p_thread, sortdb, chainstate, _mempool| { + let _ = Relayer::setup_unconfirmed_state_readonly(chainstate, sortdb); + p2p_thread.globals.recv_unconfirmed_txs(chainstate); + }); + + // do one pass + let p2p_res = self.with_chainstate(|p2p_thread, sortdb, chainstate, mempool| { + // NOTE: handler_args must be created such that it outlives the inner net.run() call and + // doesn't ref anything within p2p_thread. + let handler_args = RPCHandlerArgs { + exit_at_block_height: p2p_thread + .config + .burnchain + .process_exit_at_block_height + .clone(), + genesis_chainstate_hash: Sha256Sum::from_hex(stx_genesis::GENESIS_CHAINSTATE_HASH) + .unwrap(), + event_observer: Some(event_dispatcher), + cost_estimator: Some(cost_estimator.as_ref()), + cost_metric: Some(cost_metric.as_ref()), + fee_estimator: fee_estimator.map(|boxed_estimator| boxed_estimator.as_ref()), + ..RPCHandlerArgs::default() + }; + p2p_thread.with_network(|_, net| { + net.run( + sortdb, + chainstate, + mempool, + dns_client_opt, + download_backpressure, + ibd, + poll_ms, + &handler_args, + &mut expected_attachments, + ) + }) + }); + + match p2p_res { + Ok(network_result) => { + let mut have_update = false; + if self.num_p2p_state_machine_passes < network_result.num_state_machine_passes { + // p2p state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_p2p_state_pass(); + self.num_p2p_state_machine_passes = network_result.num_state_machine_passes; + } + + if self.num_inv_sync_passes < network_result.num_inv_sync_passes { + // inv-sync state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_inv_sync_pass(); + self.num_inv_sync_passes = network_result.num_inv_sync_passes; + + // the relayer cares about the number of inventory passes, so pass this along + have_update = true; + } + + if self.num_download_passes < network_result.num_download_passes { + // download state-machine did a full pass. Notify anyone listening. + self.globals.sync_comms.notify_download_pass(); + self.num_download_passes = network_result.num_download_passes; + + // the relayer cares about the number of download passes, so pass this along + have_update = true; + } + + if network_result.has_data_to_store() + || self.last_burn_block_height != network_result.burn_height + || have_update + { + // pass along if we have blocks, microblocks, or transactions, or a status + // update on the network's view of the burnchain + self.last_burn_block_height = network_result.burn_height; + self.results_with_data + .push_back(RelayerDirective::HandleNetResult(network_result)); + } + } + Err(e) => { + // this is only reachable if the network is not instantiated correctly -- + // i.e. you didn't connect it + panic!("P2P: Failed to process network dispatch: {:?}", &e); + } + }; + + while let Some(next_result) = self.results_with_data.pop_front() { + // have blocks, microblocks, and/or transactions (don't care about anything else), + // or a directive to mine microblocks + if let Err(e) = self.globals.relay_send.try_send(next_result) { + debug!( + "P2P: {:?}: download backpressure detected (bufferred {})", + &self.get_network().local_peer, + self.results_with_data.len() + ); + match e { + TrySendError::Full(directive) => { + if let RelayerDirective::RunTenure(..) = directive { + // can drop this + } else { + // don't lose this data -- just try it again + self.results_with_data.push_front(directive); + } + break; + } + TrySendError::Disconnected(_) => { + info!("P2P: Relayer hang up with p2p channel"); + self.globals.signal_stop(); + return false; + } + } + } else { + debug!("P2P: Dispatched result to Relayer!"); + } + } + + true + } +} + +impl StacksNode { + /// Create a StacksPrivateKey from a given seed buffer + pub fn make_node_private_key_from_seed(seed: &[u8]) -> StacksPrivateKey { + let node_privkey = { + let mut re_hashed_seed = seed.to_vec(); + let my_private_key = loop { + match Secp256k1PrivateKey::from_slice(&re_hashed_seed[..]) { + Ok(sk) => break sk, + Err(_) => { + re_hashed_seed = Sha256Sum::from_data(&re_hashed_seed[..]) + .as_bytes() + .to_vec() + } + } + }; + my_private_key + }; + node_privkey + } + + /// Set up the AST size-precheck height, if configured + fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { + if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { + info!( + "Override burnchain height of {:?} to {}", + ASTRules::PrecheckSize, + ast_precheck_size_height + ); + let mut tx = sortdb + .tx_begin() + .expect("FATAL: failed to begin tx on sortition DB"); + SortitionDB::override_ast_rule_height( + &mut tx, + ASTRules::PrecheckSize, + ast_precheck_size_height, + ) + .expect("FATAL: failed to override AST PrecheckSize rule height"); + tx.commit() + .expect("FATAL: failed to commit sortition DB transaction"); + } + } + + /// Set up the mempool DB by making sure it exists. + /// Panics on failure. + fn setup_mempool_db(config: &Config) -> MemPoolDB { + // force early mempool instantiation + let cost_estimator = config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let metric = config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + let mempool = MemPoolDB::open( + config.is_mainnet(), + config.burnchain.chain_id, + &config.get_chainstate_path_str(), + cost_estimator, + metric, + ) + .expect("BUG: failed to instantiate mempool"); + + mempool + } + + /// Set up the Peer DB and update any soft state from the config file. This includes: + /// * blacklisted/whitelisted nodes + /// * node keys + /// * bootstrap nodes + /// Returns the instantiated PeerDB + /// Panics on failure. + fn setup_peer_db(config: &Config, burnchain: &Burnchain) -> PeerDB { + let data_url = UrlString::try_from(format!("{}", &config.node.data_url)).unwrap(); + let initial_neighbors = config.node.bootstrap_node.clone(); + if initial_neighbors.len() > 0 { + info!( + "Will bootstrap from peers {}", + VecDisplay(&initial_neighbors) + ); + } else { + warn!("Without a peer to bootstrap from, the node will start mining a new chain"); + } + + let p2p_sock: SocketAddr = config.node.p2p_bind.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_bind + )); + let p2p_addr: SocketAddr = config.node.p2p_address.parse().expect(&format!( + "Failed to parse socket: {}", + &config.node.p2p_address + )); + let node_privkey = + StacksNode::make_node_private_key_from_seed(&config.node.local_peer_seed); + + let mut peerdb = PeerDB::connect( + &config.get_peer_db_file_path(), + true, + config.burnchain.chain_id, + burnchain.network_id, + Some(node_privkey), + config.connection_options.private_key_lifetime.clone(), + PeerAddress::from_socketaddr(&p2p_addr), + p2p_sock.port(), + data_url, + &vec![], + Some(&initial_neighbors), + ) + .map_err(|e| { + eprintln!( + "Failed to open {}: {:?}", + &config.get_peer_db_file_path(), + &e + ); + panic!(); + }) + .unwrap(); + + // allow all bootstrap nodes + { + let mut tx = peerdb.tx_begin().unwrap(); + for initial_neighbor in initial_neighbors.iter() { + // update peer in case public key changed + PeerDB::update_peer(&mut tx, &initial_neighbor).unwrap(); + PeerDB::set_allow_peer( + &mut tx, + initial_neighbor.addr.network_id, + &initial_neighbor.addr.addrbytes, + initial_neighbor.addr.port, + -1, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + + if !config.node.deny_nodes.is_empty() { + warn!("Will ignore nodes {:?}", &config.node.deny_nodes); + } + + // deny all config-denied peers + { + let mut tx = peerdb.tx_begin().unwrap(); + for denied in config.node.deny_nodes.iter() { + PeerDB::set_deny_peer( + &mut tx, + denied.addr.network_id, + &denied.addr.addrbytes, + denied.addr.port, + get_epoch_time_secs() + 24 * 365 * 3600, + ) + .unwrap(); + } + tx.commit().unwrap(); + } + + // update services to indicate we can support mempool sync + { + let mut tx = peerdb.tx_begin().unwrap(); + PeerDB::set_local_services( + &mut tx, + (ServiceFlags::RPC as u16) | (ServiceFlags::RELAY as u16), + ) + .unwrap(); + tx.commit().unwrap(); + } + + peerdb + } + + /// Set up the PeerNetwork, but do not bind it. + pub fn setup_peer_network( + config: &Config, + atlas_config: &AtlasConfig, + burnchain: Burnchain, + ) -> PeerNetwork { + let sortdb = SortitionDB::open(&config.get_burn_db_file_path(), true) + .expect("Error while instantiating sor/tition db"); + + let epochs = SortitionDB::get_stacks_epochs(sortdb.conn()) + .expect("Error while loading stacks epochs"); + + let view = { + let sortition_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()) + .expect("Failed to get sortition tip"); + SortitionDB::get_burnchain_view(&sortdb.conn(), &burnchain, &sortition_tip).unwrap() + }; + + let peerdb = Self::setup_peer_db(config, &burnchain); + + let atlasdb = + AtlasDB::connect(atlas_config.clone(), &config.get_atlas_db_file_path(), true).unwrap(); + + let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { + Ok(local_peer) => local_peer, + _ => panic!("Unable to retrieve local peer"), + }; + + let p2p_net = PeerNetwork::new( + peerdb, + atlasdb, + local_peer, + config.burnchain.peer_version, + burnchain, + view, + config.connection_options.clone(), + epochs, + ); - if let Some(poison_payload) = poison_opt { - let poison_microblock_tx = inner_generate_poison_microblock_tx( - keychain, - coinbase_nonce + 1, - poison_payload.clone(), - config.is_mainnet(), - config.burnchain.chain_id, - ); + p2p_net + } - let stacks_epoch = burn_db - .index_conn() - .get_stacks_epoch(burn_block.block_height as u32) - .expect("Could not find a stacks epoch."); + /// Main loop of the relayer. + /// Runs in a separate thread. + /// Continuously receives + pub fn relayer_main(mut relayer_thread: RelayerThread, relay_recv: Receiver) { + while let Ok(directive) = relay_recv.recv() { + if !relayer_thread.globals.keep_running() { + break; + } - // submit the poison payload, privately, so we'll mine it when building the - // anchored block. - if let Err(e) = mem_pool.submit( - chain_state, - &parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - &poison_microblock_tx, - Some(event_dispatcher), - &stacks_epoch.block_limit, - &stacks_epoch.epoch_id, - ) { - warn!( - "Detected but failed to mine poison-microblock transaction: {:?}", - &e - ); - } + if !relayer_thread.handle_directive(directive) { + break; } } - let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( - chain_state, - &burn_db.index_conn(), - mem_pool, - &stacks_parent_header, - parent_block_total_burn, - vrf_proof.clone(), - mblock_pubkey_hash, - &coinbase_tx, - config.make_block_builder_settings((last_mined_blocks.len() + 1) as u64, false), - Some(event_dispatcher), - ) { - Ok(block) => block, - Err(ChainstateError::InvalidStacksMicroblock(msg, mblock_header_hash)) => { - // part of the parent microblock stream is invalid, so try again - info!("Parent microblock stream is invalid; trying again without the offender {} (msg: {})", &mblock_header_hash, &msg); + // kill miner if it's running + signal_mining_blocked(relayer_thread.globals.get_miner_status()); - // truncate the stream - stacks_parent_header.microblock_tail = match microblock_info_opt { - Some((microblocks, _)) => { - let mut tail = None; - for mblock in microblocks.into_iter() { - if mblock.block_hash() == mblock_header_hash { - break; - } - tail = Some(mblock); - } - if let Some(ref t) = &tail { - debug!( - "New parent microblock stream tail is {} (seq {})", - t.block_hash(), - t.header.sequence - ); - } - tail.map(|t| t.header) - } - None => None, - }; + // set termination flag so other threads die + relayer_thread.globals.signal_stop(); - // try again - match StacksBlockBuilder::build_anchored_block( - chain_state, - &burn_db.index_conn(), - mem_pool, - &stacks_parent_header, - parent_block_total_burn, - vrf_proof.clone(), - mblock_pubkey_hash, - &coinbase_tx, - config.make_block_builder_settings((last_mined_blocks.len() + 1) as u64, false), - Some(event_dispatcher), - ) { - Ok(block) => block, - Err(e) => { - error!("Failure mining anchor block even after removing offending microblock {}: {}", &mblock_header_hash, &e); - return None; - } - } + debug!("Relayer exit!"); + } + + /// Main loop of the p2p thread. + /// Runs in a separate thread. + /// Continuously receives, until told otherwise. + pub fn p2p_main(mut p2p_thread: PeerThread, event_dispatcher: EventDispatcher) { + let (mut dns_resolver, mut dns_client) = DNSResolver::new(10); + // spawn a daemon thread that runs the DNS resolver. + // It will die when the rest of the system dies. + { + let _jh = thread::Builder::new() + .name("dns-resolver".to_string()) + .spawn(move || { + dns_resolver.thread_main(); + }) + .unwrap(); + } + + // NOTE: these must be instantiated in the thread context, since it can't be safely sent + // between threads + let fee_estimator_opt = p2p_thread.config.make_fee_estimator(); + let cost_estimator = p2p_thread + .config + .make_cost_estimator() + .unwrap_or_else(|| Box::new(UnitEstimator)); + let cost_metric = p2p_thread + .config + .make_cost_metric() + .unwrap_or_else(|| Box::new(UnitMetric)); + + // receive until we can't reach the receiver thread + loop { + if !p2p_thread.globals.keep_running() { + break; } - Err(e) => { - error!("Failure mining anchored block: {}", e); - return None; + if !p2p_thread.run_one_pass( + Some(&mut dns_client), + &event_dispatcher, + &cost_estimator, + &cost_metric, + fee_estimator_opt.as_ref(), + ) { + break; } - }; - let block_height = anchored_block.header.total_work.work; - info!( - "Succeeded assembling {} block #{}: {}, with {} txs, attempt {}", - if parent_block_total_burn == 0 { - "Genesis" - } else { - "Stacks" - }, - block_height, - anchored_block.block_hash(), - anchored_block.txs.len(), - attempt - ); + } - // let's figure out the recipient set! - let recipients = match get_next_recipients( - &burn_block, - chain_state, - burn_db, - burnchain, - &OnChainRewardSetProvider(), - ) { - Ok(x) => x, - Err(e) => { - error!("Failure fetching recipient set: {:?}", e); - return None; - } - }; + // kill miner + signal_mining_blocked(p2p_thread.globals.get_miner_status()); - let sunset_burn = burnchain.expected_sunset_burn(burn_block.block_height + 1, burn_fee_cap); - let rest_commit = burn_fee_cap - sunset_burn; + // set termination flag so other threads die + p2p_thread.globals.signal_stop(); - let commit_outs = if burn_block.block_height + 1 < burnchain.pox_constants.sunset_end - && !burnchain.is_in_prepare_phase(burn_block.block_height + 1) + // thread exited, so signal to the relayer thread to die. + while let Err(TrySendError::Full(_)) = p2p_thread + .globals + .relay_send + .try_send(RelayerDirective::Exit) { - RewardSetInfo::into_commit_outs(recipients, config.is_mainnet()) + warn!("Failed to direct relayer thread to exit, sleeping and trying again"); + thread::sleep(Duration::from_secs(5)); + } + info!("P2P thread exit!"); + } + + pub fn spawn( + runloop: &RunLoop, + globals: Globals, + // relay receiver endpoint for the p2p thread, so the relayer can feed it data to push + relay_recv: Receiver, + // attachments receiver endpoint for the p2p thread, so the chains coordinator can feed it + // attachments it discovers + attachments_receiver: Receiver>, + ) -> StacksNode { + let config = runloop.config().clone(); + let is_miner = runloop.is_miner(); + let burnchain = runloop.get_burnchain(); + let atlas_config = AtlasConfig::default(config.is_mainnet()); + let mut keychain = Keychain::default(config.node.seed.clone()); + + // we can call _open_ here rather than _connect_, since connect is first called in + // make_genesis_block + let mut sortdb = SortitionDB::open(&config.get_burn_db_file_path(), true) + .expect("Error while instantiating sor/tition db"); + + Self::setup_ast_size_precheck(&config, &mut sortdb); + + let _ = Self::setup_mempool_db(&config); + + let mut p2p_net = Self::setup_peer_network(&config, &atlas_config, burnchain.clone()); + let relayer = Relayer::from_p2p(&mut p2p_net); + + let local_peer = p2p_net.local_peer.clone(); + + let burnchain_signer = keychain.get_burnchain_signer(); + match monitoring::set_burnchain_signer(burnchain_signer.clone()) { + Err(e) => { + warn!("Failed to set global burnchain signer: {:?}", &e); + } + _ => {} + } + + let leader_key_registration_state = if config.node.mock_mining { + // mock mining, pretend to have a registered key + let vrf_public_key = keychain.rotate_vrf_keypair(VRF_MOCK_MINER_KEY); + LeaderKeyRegistrationState::Active(RegisteredKey { + block_height: 1, + op_vtxindex: 1, + vrf_public_key, + }) } else { - vec![StacksAddress::burn_address(config.is_mainnet())] + LeaderKeyRegistrationState::Inactive }; - // let's commit - let op = inner_generate_block_commit_op( - keychain.get_burnchain_signer(), - anchored_block.block_hash(), - rest_commit, - ®istered_key, - parent_block_burn_height - .try_into() - .expect("Could not convert parent block height into u32"), - parent_winning_vtxindex, - VRFSeed::from_proof(&vrf_proof), - commit_outs, - sunset_burn, - burn_block.block_height, - ); + let relayer_thread = RelayerThread::new(runloop, local_peer.clone(), relayer); + let relayer_thread_handle = thread::Builder::new() + .name(format!("relayer-{}", &local_peer.data_url)) + .spawn(move || { + Self::relayer_main(relayer_thread, relay_recv); + }) + .expect("FATAL: failed to start relayer thread"); + + let p2p_event_dispatcher = runloop.get_event_dispatcher(); + let p2p_thread = PeerThread::new(runloop, p2p_net, attachments_receiver); + let p2p_thread_handle = thread::Builder::new() + .name(format!( + "p2p-({},{})", + &config.node.p2p_bind, &config.node.rpc_bind + )) + .spawn(move || { + Self::p2p_main(p2p_thread, p2p_event_dispatcher); + }) + .expect("FATAL: failed to start p2p thread"); - let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) - .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); + info!("Start HTTP server on: {}", &config.node.rpc_bind); + info!("Start P2P server on: {}", &config.node.p2p_bind); - // last chance -- confirm that the stacks tip and burnchain tip are unchanged (since it could have taken long - // enough to build this block that another block could have arrived). - if let Some(stacks_tip) = chain_state - .get_stacks_chain_tip(burn_db) - .expect("FATAL: could not query chain tip") - { - if stacks_tip.anchored_block_hash != anchored_block.header.parent_block - || parent_consensus_hash != stacks_tip.consensus_hash - || cur_burn_chain_tip.sortition_id != burn_block.sortition_id - { - debug!( - "Cancel block-commit; chain tip(s) have changed"; - "block_hash" => %anchored_block.block_hash(), - "tx_count" => anchored_block.txs.len(), - "target_height" => %anchored_block.header.total_work.work, - "parent_consensus_hash" => %parent_consensus_hash, - "parent_block_hash" => %anchored_block.header.parent_block, - "parent_microblock_hash" => %anchored_block.header.parent_microblock, - "parent_microblock_seq" => anchored_block.header.parent_microblock_sequence, - "old_tip_burn_block_hash" => %burn_block.burn_header_hash, - "old_tip_burn_block_height" => burn_block.block_height, - "old_tip_burn_block_sortition_id" => %burn_block.sortition_id, - "attempt" => attempt, - "new_stacks_tip_block_hash" => %stacks_tip.anchored_block_hash, - "new_stacks_tip_consensus_hash" => %stacks_tip.consensus_hash, - "new_tip_burn_block_height" => cur_burn_chain_tip.block_height, - "new_tip_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id, - "new_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id - ); - return None; - } + StacksNode { + config, + atlas_config, + globals, + burnchain_signer, + is_miner, + leader_key_registration_state, + p2p_thread_handle, + relayer_thread_handle, } + } - let mut op_signer = keychain.generate_op_signer(); - debug!( - "Submit block-commit"; - "block_hash" => %anchored_block.block_hash(), - "tx_count" => anchored_block.txs.len(), - "target_height" => anchored_block.header.total_work.work, - "parent_consensus_hash" => %parent_consensus_hash, - "parent_block_hash" => %anchored_block.header.parent_block, - "parent_microblock_hash" => %anchored_block.header.parent_microblock, - "parent_microblock_seq" => anchored_block.header.parent_microblock_sequence, - "tip_burn_block_hash" => %burn_block.burn_header_hash, - "tip_burn_block_height" => burn_block.block_height, - "tip_burn_block_sortition_id" => %burn_block.sortition_id, - "attempt" => attempt - ); + /// Manage the VRF public key registration state machine. + /// Tell the relayer thread to fire off a tenure and a block commit op, + /// if it is time to do so. + /// Called from the main thread. + /// Return true if we succeeded in carrying out the next task of the operation. + pub fn relayer_issue_tenure(&mut self) -> bool { + if !self.is_miner { + // node is a follower, don't try to issue a tenure + return true; + } - let res = bitcoin_controller.submit_operation(op, &mut op_signer, attempt); - if !res { - if !config.node.mock_mining { - warn!("Failed to submit Bitcoin transaction"); - return None; - } else { - debug!("Mock-mining enabled; not sending Bitcoin transaction"); + if let Some(burnchain_tip) = self.globals.get_last_sortition() { + match self.leader_key_registration_state { + LeaderKeyRegistrationState::Active(ref key) => { + debug!( + "Tenure: Using key {:?} off of {}", + &key.vrf_public_key, &burnchain_tip.burn_header_hash + ); + + self.globals + .relay_send + .send(RelayerDirective::RunTenure( + key.clone(), + burnchain_tip, + get_epoch_time_ms(), + )) + .is_ok() + } + LeaderKeyRegistrationState::Inactive => { + warn!( + "Tenure: skipped tenure because no active VRF key. Trying to register one." + ); + self.leader_key_registration_state = LeaderKeyRegistrationState::Pending; + self.globals + .relay_send + .send(RelayerDirective::RegisterKey(burnchain_tip)) + .is_ok() + } + LeaderKeyRegistrationState::Pending => true, } + } else { + warn!("Tenure: Do not know the last burn block. As a miner, this is bad."); + true } + } - Some(( - AssembledAnchorBlock { - parent_consensus_hash: parent_consensus_hash, - my_burn_hash: burn_block.burn_header_hash, - anchored_block, - attempt, - }, - microblock_secret_key, - )) + /// Notify the relayer of a sortition, telling it to process the block + /// and advertize it if it was mined by the node. + /// returns _false_ if the relayer hung up the channel. + /// Called from the main thread. + pub fn relayer_sortition_notify(&self) -> bool { + if !self.is_miner { + // node is a follower, don't try to process my own tenure. + return true; + } + + if let Some(snapshot) = self.globals.get_last_sortition() { + debug!( + "Tenure: Notify sortition!"; + "consensus_hash" => %snapshot.consensus_hash, + "burn_block_hash" => %snapshot.burn_header_hash, + "winning_stacks_block_hash" => %snapshot.winning_stacks_block_hash, + "burn_block_height" => &snapshot.block_height, + "sortition_id" => %snapshot.sortition_id + ); + if snapshot.sortition { + return self + .globals + .relay_send + .send(RelayerDirective::ProcessTenure( + snapshot.consensus_hash.clone(), + snapshot.parent_burn_header_hash.clone(), + snapshot.winning_stacks_block_hash.clone(), + )) + .is_ok(); + } + } else { + debug!("Tenure: Notify sortition! No last burn block"); + } + true } /// Process a state coming from the burnchain, by extracting the validated KeyRegisterOp /// and inspecting if a sortition was won. /// `ibd`: boolean indicating whether or not we are in the initial block download + /// Called from the main thread. pub fn process_burnchain_state( &mut self, sortdb: &SortitionDB, @@ -2389,12 +4063,11 @@ impl StacksNode { } } - // no-op on UserBurnSupport ops are not supported / produced at this point. - - set_last_sortition(&mut self.last_sortition, block_snapshot); + self.globals.set_last_sortition(block_snapshot); last_sortitioned_block.map(|x| x.0) } + /// Join all inner threads pub fn join(self) { self.relayer_thread_handle.join().unwrap(); self.p2p_thread_handle.join().unwrap(); diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 0af4e5c6e2..9bca951e47 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -180,7 +180,7 @@ fn spawn_peer( let fee_estimator = config.make_fee_estimator(); let handler_args = RPCHandlerArgs { - exit_at_block_height: exit_at_block_height.as_ref(), + exit_at_block_height: exit_at_block_height.clone(), cost_estimator: Some(cost_estimator.as_ref()), cost_metric: Some(metric.as_ref()), fee_estimator: fee_estimator.as_ref().map(|x| x.as_ref()), diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 68246da2da..1557a31ba5 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -7,6 +7,7 @@ use std::sync::atomic::AtomicU64; use std::sync::mpsc::sync_channel; use std::sync::mpsc::Receiver; use std::sync::Arc; +use std::sync::Mutex; use std::thread; use std::thread::JoinHandle; @@ -31,13 +32,16 @@ use stacks::util_lib::db::Error as db_error; use stx_genesis::GenesisData; use crate::monitoring::start_serving_monitoring_metrics; +use crate::neon_node::Globals; use crate::neon_node::StacksNode; +use crate::neon_node::RELAYER_MAX_BUFFER; use crate::node::use_test_genesis_chainstate; use crate::syncctl::{PoxSyncWatchdog, PoxSyncWatchdogComms}; use crate::{ node::{get_account_balances, get_account_lockups, get_names, get_namespaces}, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, }; +use stacks::chainstate::stacks::miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}; use super::RunLoopCallbacks; use libc; @@ -126,6 +130,7 @@ impl Counters { pub struct RunLoop { config: Config, pub callbacks: RunLoopCallbacks, + globals: Option, counters: Counters, coordinator_channels: Option<(CoordinatorReceivers, CoordinatorChannels)>, should_keep_running: Arc, @@ -134,6 +139,9 @@ pub struct RunLoop { is_miner: Option, // not known until .start() is called burnchain: Option, // not known until .start() is called pox_watchdog_comms: PoxSyncWatchdogComms, + /// NOTE: this is duplicated in self.globals, but it needs to be accessible before globals is + /// instantiated (namely, so the test framework can access it). + miner_status: Arc>, } /// Write to stderr in an async-safe manner. @@ -160,6 +168,7 @@ impl RunLoop { let channels = CoordinatorCommunication::instantiate(); let should_keep_running = Arc::new(AtomicBool::new(true)); let pox_watchdog_comms = PoxSyncWatchdogComms::new(should_keep_running.clone()); + let miner_status = Arc::new(Mutex::new(MinerStatus::make_ready())); let mut event_dispatcher = EventDispatcher::new(); for observer in config.events_observers.iter() { @@ -168,6 +177,7 @@ impl RunLoop { Self { config, + globals: None, coordinator_channels: Some(channels), callbacks: RunLoopCallbacks::new(), counters: Counters::new(), @@ -177,9 +187,20 @@ impl RunLoop { is_miner: None, burnchain: None, pox_watchdog_comms, + miner_status, } } + pub fn get_globals(&self) -> Globals { + self.globals + .clone() + .expect("FATAL: globals not instantiated") + } + + fn set_globals(&mut self, globals: Globals) { + self.globals = Some(globals); + } + pub fn get_coordinator_channel(&self) -> Option { self.coordinator_channels.as_ref().map(|x| x.1.clone()) } @@ -225,7 +246,7 @@ impl RunLoop { } pub fn get_termination_switch(&self) -> Arc { - self.should_keep_running.clone() + self.get_globals().should_keep_running.clone() } pub fn get_burnchain(&self) -> Burnchain { @@ -240,6 +261,10 @@ impl RunLoop { .expect("FATAL: tried to get PoX watchdog before calling .start()") } + pub fn get_miner_status(&self) -> Arc> { + self.miner_status.clone() + } + /// Set up termination handler. Have a signal set the `should_keep_running` atomic bool to /// false. Panics of called more than once. fn setup_termination_handler(&self) { @@ -393,6 +418,7 @@ impl RunLoop { &mut self, burnchain_config: &Burnchain, coordinator_receivers: CoordinatorReceivers, + miner_status: Arc>, ) -> (JoinHandle<()>, Receiver>) { let use_test_genesis_data = use_test_genesis_chainstate(&self.config); @@ -451,7 +477,10 @@ impl RunLoop { let (attachments_tx, attachments_rx) = sync_channel(ATTACHMENTS_CHANNEL_SIZE); let coordinator_thread_handle = thread::Builder::new() - .name("chains-coordinator".to_string()) + .name(format!( + "chains-coordinator-{}", + &moved_config.node.rpc_bind + )) .spawn(move || { let mut cost_estimator = moved_config.make_cost_estimator(); let mut fee_estimator = moved_config.make_fee_estimator(); @@ -465,6 +494,7 @@ impl RunLoop { moved_atlas_config, cost_estimator.as_deref_mut(), fee_estimator.as_deref_mut(), + miner_status, ); }) .expect("FATAL: failed to start chains coordinator thread"); @@ -543,21 +573,39 @@ impl RunLoop { let burnchain_config = burnchain.get_burnchain(); self.burnchain = Some(burnchain_config.clone()); + // can we mine? let is_miner = self.check_is_miner(&mut burnchain); self.is_miner = Some(is_miner); + // relayer linkup + let (relay_send, relay_recv) = sync_channel(RELAYER_MAX_BUFFER); + + // set up globals so other subsystems can instantiate off of the runloop state. + let globals = Globals::new( + coordinator_senders, + self.get_miner_status(), + relay_send, + self.counters.clone(), + self.pox_watchdog_comms.clone(), + self.should_keep_running.clone(), + ); + self.set_globals(globals.clone()); + // have headers; boot up the chains coordinator and instantiate the chain state - let (coordinator_thread_handle, attachments_rx) = - self.spawn_chains_coordinator(&burnchain_config, coordinator_receivers); + let (coordinator_thread_handle, attachments_rx) = self.spawn_chains_coordinator( + &burnchain_config, + coordinator_receivers, + globals.get_miner_status(), + ); self.instantiate_pox_watchdog(); self.start_prometheus(); // We announce a new burn block so that the chains coordinator // can resume prior work and handle eventual unprocessed sortitions // stored during a previous session. - coordinator_senders.announce_new_burn_block(); + globals.coord().announce_new_burn_block(); - // Make sure at least one sortition has happened + // Make sure at least one sortition has happened, and make sure it's globally available let sortdb = burnchain.sortdb_mut(); let (rc_aligned_height, sn) = RunLoop::get_reward_cycle_sortition_db_height(&sortdb, &burnchain_config); @@ -572,14 +620,11 @@ impl RunLoop { sn }; + globals.set_last_sortition(burnchain_tip_snapshot); + // Boot up the p2p network and relayer, and figure out how many sortitions we have so far // (it could be non-zero if the node is resuming from chainstate) - let mut node = StacksNode::spawn( - self, - Some(burnchain_tip_snapshot), - coordinator_senders.clone(), - attachments_rx, - ); + let mut node = StacksNode::spawn(self, globals.clone(), relay_recv, attachments_rx); // Wait for all pending sortitions to process let mut burnchain_tip = burnchain @@ -609,14 +654,14 @@ impl RunLoop { let mut last_tenure_sortition_height = 0; loop { - if !self.should_keep_running.load(Ordering::SeqCst) { + if !globals.keep_running() { // The p2p thread relies on the same atomic_bool, it will // discontinue its execution after completing its ongoing runloop epoch. info!("Terminating p2p process"); info!("Terminating relayer"); info!("Terminating chains-coordinator"); - coordinator_senders.stop_chains_coordinator(); + globals.coord().stop_chains_coordinator(); coordinator_thread_handle.join().unwrap(); node.join(); @@ -652,7 +697,7 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. while burnchain_height <= target_burnchain_block_height { - if !self.should_keep_running.load(Ordering::SeqCst) { + if !globals.keep_running() { break; } @@ -686,9 +731,15 @@ impl RunLoop { ); let mut sort_count = 0; + signal_mining_blocked(globals.get_miner_status()); // first, let's process all blocks in (sortition_db_height, next_sortition_height] for block_to_process in (sortition_db_height + 1)..(next_sortition_height + 1) { + // stop mining so we can advance the sortition DB and so our + // ProcessTenure() directive (sent by relayer_sortition_notify() below) + // will be unblocked. + debug!("Runloop: disable miner to process sortitions"); + let block = { let ic = burnchain.sortdb_ref().index_conn(); SortitionDB::get_ancestor_snapshot(&ic, block_to_process, sortition_tip) @@ -718,6 +769,8 @@ impl RunLoop { } } + signal_mining_ready(globals.get_miner_status()); + num_sortitions_in_last_cycle = sort_count; debug!( "Synchronized burnchain up to block height {} from {} (chain tip height is {}); {} sortitions", @@ -730,7 +783,7 @@ impl RunLoop { // we may have downloaded all the blocks already, // so we can't rely on the relayer alone to // drive it. - coordinator_senders.announce_new_stacks_block(); + globals.coord().announce_new_stacks_block(); } if burnchain_height == target_burnchain_block_height @@ -771,10 +824,11 @@ impl RunLoop { ); last_tenure_sortition_height = sortition_db_height; } + if !node.relayer_issue_tenure() { // relayer hung up, exit. error!("Block relayer and miner hung up, exiting."); - continue; + break; } } } diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index 5d1c511eac..799f6bedf0 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -27,8 +27,6 @@ pub struct PoxSyncWatchdogComms { inv_sync_passes: Arc, /// how many times have we done a download pass? download_passes: Arc, - /// What's the burnchain tip we last saw? - burnchain_tip_height: Arc, /// What's our last IBD status? last_ibd: Arc, /// Should keep running? @@ -41,7 +39,6 @@ impl PoxSyncWatchdogComms { p2p_state_passes: Arc::new(AtomicU64::new(0)), inv_sync_passes: Arc::new(AtomicU64::new(0)), download_passes: Arc::new(AtomicU64::new(0)), - burnchain_tip_height: Arc::new(AtomicU64::new(0)), last_ibd: Arc::new(AtomicBool::new(true)), should_keep_running, } diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index 4aaa70ae2e..a7a5c2cba2 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -1,3 +1,4 @@ +/// Only used by the Helium (Mocknet) node use super::node::ChainTip; use super::{BurnchainTip, Config}; diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index dd1bd10c02..90cbbcb64f 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -853,7 +853,9 @@ fn integration_test_get_info() { // test query parameters for v2/trait endpoint // evaluate check for explicit compliance against the chain tip of the first block (contract DNE at that block) - let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=753d84de5c475a85abd0eeb3ac87da03ff0f794507b60a3f66356425bc1dedaf", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); + // N.B. if the block version changes (e.g. due to a new release), this tip value + // will also change + let path = format!("{}/v2/traits/{}/{}/{}/{}/{}?tip=7d0edc26639d8da442da75999909f4fb0247f66d4d87f72e7ea63e5d9f7fabd0", &http_origin, &contract_addr, "impl-trait-contract", &contract_addr, "get-info", "trait-1"); let res = client.get(&path).send().unwrap(); eprintln!("Test: GET {}", path); assert_eq!(res.text().unwrap(), "No contract analysis found or trait definition not found"); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 0106695d11..b7ac8a96c6 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -72,6 +72,8 @@ use crate::{ use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; +use crate::neon_node::StacksNode; + use rand::Rng; use super::bitcoin_regtest::BitcoinCoreController; @@ -87,15 +89,20 @@ use clarity::vm::ast::ASTRules; use clarity::vm::MAX_CALL_STACK_DEPTH; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::stacks::miner::{ - TransactionErrorEvent, TransactionEvent, TransactionSkippedEvent, TransactionSuccessEvent, + signal_mining_blocked, signal_mining_ready, TransactionErrorEvent, TransactionEvent, + TransactionSuccessEvent, }; use stacks::net::RPCFeeEstimateResponse; use stacks::vm::ClarityName; use stacks::vm::ContractName; use std::convert::TryFrom; -pub fn neon_integration_test_conf() -> (Config, StacksAddress) { +use crate::stacks_common::types::PrivateKey; + +fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAddress) { let mut conf = super::new_test_conf(); + let seed = seed.unwrap_or(conf.node.seed.clone()); + conf.node.seed = seed; let keychain = Keychain::default(conf.node.seed.clone()); @@ -125,11 +132,22 @@ pub fn neon_integration_test_conf() -> (Config, StacksAddress) { conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + // if there's just one node, then this must be true for tests to pass + conf.miner.wait_for_block_download = false; + let miner_account = keychain.origin_address(conf.is_mainnet()).unwrap(); (conf, miner_account) } +pub fn neon_integration_test_conf() -> (Config, StacksAddress) { + inner_neon_integration_test_conf(None) +} + +pub fn neon_integration_test_conf_with_seed(seed: Vec) -> (Config, StacksAddress) { + inner_neon_integration_test_conf(Some(seed)) +} + pub mod test_observer { use std::convert::Infallible; use std::sync::Mutex; @@ -365,10 +383,20 @@ pub mod test_observer { } const PANIC_TIMEOUT_SECS: u64 = 600; + /// Returns `false` on a timeout, true otherwise. pub fn next_block_and_wait( btc_controller: &mut BitcoinRegtestController, blocks_processed: &Arc, +) -> bool { + next_block_and_wait_with_timeout(btc_controller, blocks_processed, PANIC_TIMEOUT_SECS) +} + +/// Returns `false` on a timeout, true otherwise. +pub fn next_block_and_wait_with_timeout( + btc_controller: &mut BitcoinRegtestController, + blocks_processed: &Arc, + timeout: u64, ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); eprintln!( @@ -379,7 +407,7 @@ pub fn next_block_and_wait( btc_controller.build_next_block(1); let start = Instant::now(); while blocks_processed.load(Ordering::SeqCst) <= current { - if start.elapsed() > Duration::from_secs(PANIC_TIMEOUT_SECS) { + if start.elapsed() > Duration::from_secs(timeout) { error!("Timed out waiting for block to process, trying to continue test"); return false; } @@ -393,6 +421,36 @@ pub fn next_block_and_wait( true } +/// Returns `false` on a timeout, true otherwise. +pub fn next_block_and_iterate( + btc_controller: &mut BitcoinRegtestController, + blocks_processed: &Arc, + iteration_delay_ms: u64, +) -> bool { + let current = blocks_processed.load(Ordering::SeqCst); + eprintln!( + "Issuing block at {}, waiting for bump ({})", + get_epoch_time_secs(), + current + ); + btc_controller.build_next_block(1); + let start = Instant::now(); + while blocks_processed.load(Ordering::SeqCst) <= current { + if start.elapsed() > Duration::from_secs(PANIC_TIMEOUT_SECS) { + error!("Timed out waiting for block to process, trying to continue test"); + return false; + } + thread::sleep(Duration::from_millis(iteration_delay_ms)); + btc_controller.build_next_block(1); + } + eprintln!( + "Block bumped at {} ({})", + get_epoch_time_secs(), + blocks_processed.load(Ordering::SeqCst) + ); + true +} + /// This function will call `next_block_and_wait` until the burnchain height underlying `BitcoinRegtestController` /// reaches *exactly* `target_height`. /// @@ -593,6 +651,22 @@ pub fn get_chain_info(conf: &Config) -> RPCPeerInfoData { tip_info } +pub fn get_chain_info_opt(conf: &Config) -> Option { + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let client = reqwest::blocking::Client::new(); + + // get the canonical chain tip + let path = format!("{}/v2/info", &http_origin); + let tip_info_opt = client + .get(&path) + .send() + .unwrap() + .json::() + .ok(); + + tip_info_opt +} + fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { let tip_info = get_chain_info(conf); @@ -1013,7 +1087,7 @@ fn deep_contract() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut blocks = test_observer::get_blocks(); + let blocks = test_observer::get_blocks(); let mut included_smart_contract = false; for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -1024,7 +1098,7 @@ fn deep_contract() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(contract_call) = parsed.payload { + if let TransactionPayload::SmartContract(..) = parsed.payload { included_smart_contract = true; } } @@ -1804,6 +1878,244 @@ fn make_signed_microblock( mblock } +#[test] +#[ignore] +fn microblock_fork_poison_integration_test() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let spender_addr: PrincipalData = to_addr(&spender_sk).into(); + let second_spender_sk = StacksPrivateKey::from_hex(SK_2).unwrap(); + let second_spender_addr: PrincipalData = to_addr(&second_spender_sk).into(); + + let (mut conf, _) = neon_integration_test_conf(); + + conf.initial_balances.push(InitialBalance { + address: spender_addr.clone(), + amount: 100300, + }); + conf.initial_balances.push(InitialBalance { + address: second_spender_addr.clone(), + amount: 10000, + }); + + // we'll manually post a forked stream to the node + conf.node.mine_microblocks = false; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.node.wait_time_for_blocks = 1_000; + + conf.miner.min_tx_fee = 1; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let client = reqwest::blocking::Client::new(); + let miner_status = run_loop.get_miner_status(); + + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(None, 0)); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + sleep_ms(10_000); + + // turn off the miner for now, so we can ensure both of these get accepted and preprocessed + // before we try and mine an anchor block that confirms them + eprintln!("Disable miner"); + signal_mining_blocked(miner_status.clone()); + sleep_ms(10_000); + + // our first spender + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.balance, 100300); + assert_eq!(account.nonce, 0); + + // our second spender + let account = get_account(&http_origin, &second_spender_addr); + assert_eq!(account.balance, 10000); + assert_eq!(account.nonce, 0); + + info!("Test microblock"); + + let recipient = StacksAddress::from_string(ADDR_4).unwrap(); + let unconfirmed_tx_bytes = + make_stacks_transfer_mblock_only(&spender_sk, 0, 1000, &recipient.into(), 1000); + let unconfirmed_tx = + StacksTransaction::consensus_deserialize(&mut &unconfirmed_tx_bytes[..]).unwrap(); + let second_unconfirmed_tx_bytes = + make_stacks_transfer_mblock_only(&second_spender_sk, 0, 1000, &recipient.into(), 1500); + let second_unconfirmed_tx = + StacksTransaction::consensus_deserialize(&mut &second_unconfirmed_tx_bytes[..]).unwrap(); + + // TODO (hack) instantiate the sortdb in the burnchain + let _ = btc_regtest_controller.sortdb_mut(); + + // put each into a microblock + let (first_microblock, second_microblock) = { + let tip_info = get_chain_info(&conf); + let stacks_tip = tip_info.stacks_tip; + + let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); + let tip_hash = + StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); + let privk = + find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024) + .unwrap(); + let (mut chainstate, _) = StacksChainState::open( + false, + CHAIN_ID_TESTNET, + &conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + chainstate + .reload_unconfirmed_state(&btc_regtest_controller.sortdb_ref().index_conn(), tip_hash) + .unwrap(); + let first_microblock = make_microblock( + &privk, + &mut chainstate, + &btc_regtest_controller.sortdb_ref().index_conn(), + consensus_hash, + stacks_block.clone(), + vec![unconfirmed_tx], + ); + + eprintln!( + "Created first microblock: {}: {:?}", + &first_microblock.block_hash(), + &first_microblock + ); + + // NOTE: this microblock conflicts because it has the same parent as the first microblock, + // even though it's seq is different. + let second_microblock = + make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); + + eprintln!( + "Created second conflicting microblock: {}: {:?}", + &second_microblock.block_hash(), + &second_microblock + ); + (first_microblock, second_microblock) + }; + + let mut microblock_bytes = vec![]; + first_microblock + .consensus_serialize(&mut microblock_bytes) + .unwrap(); + + // post the first microblock + let path = format!("{}/v2/microblocks", &http_origin); + let res: String = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(microblock_bytes.clone()) + .send() + .unwrap() + .json() + .unwrap(); + + assert_eq!(res, format!("{}", &first_microblock.block_hash())); + + let mut second_microblock_bytes = vec![]; + second_microblock + .consensus_serialize(&mut second_microblock_bytes) + .unwrap(); + + // post the second microblock + let path = format!("{}/v2/microblocks", &http_origin); + let res: String = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(second_microblock_bytes.clone()) + .send() + .unwrap() + .json() + .unwrap(); + + assert_eq!(res, format!("{}", &second_microblock.block_hash())); + + eprintln!("Wait 10s and re-enable miner"); + sleep_ms(10_000); + + // resume mining + eprintln!("Enable miner"); + signal_mining_ready(miner_status.clone()); + sleep_ms(10_000); + + eprintln!("Attempt to mine poison-microblock"); + let mut found = false; + for _i in 0..10 { + if found { + break; + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + let blocks = test_observer::get_blocks(); + for block in blocks.iter() { + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + + if let TransactionPayload::PoisonMicroblock(..) = &parsed.payload { + found = true; + break; + } + } + } + } + + assert!( + found, + "Did not find poison microblock tx in any mined block" + ); + + test_observer::clear(); + channel.stop_chains_coordinator(); +} + #[test] #[ignore] fn microblock_integration_test() { @@ -1818,6 +2130,7 @@ fn microblock_integration_test() { let (mut conf, miner_account) = neon_integration_test_conf(); + conf.miner.wait_for_block_download = false; conf.initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: 100300, @@ -1934,9 +2247,6 @@ fn microblock_integration_test() { // put each into a microblock let (first_microblock, second_microblock) = { - let tip_info = get_chain_info(&conf); - let stacks_tip = tip_info.stacks_tip; - let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf); let tip_hash = StacksBlockHeader::make_index_block_hash(&consensus_hash, &stacks_block.block_hash()); @@ -1963,9 +2273,26 @@ fn microblock_integration_test() { vec![unconfirmed_tx], ); + eprintln!( + "Created first microblock: {}: {:?}", + &first_microblock.block_hash(), + &first_microblock + ); + /* let second_microblock = make_signed_microblock(&privk, vec![second_unconfirmed_tx], stacks_tip, 1); - + */ + let second_microblock = make_signed_microblock( + &privk, + vec![second_unconfirmed_tx], + first_microblock.block_hash(), + 1, + ); + eprintln!( + "Created second microblock: {}: {:?}", + &second_microblock.block_hash(), + &second_microblock + ); (first_microblock, second_microblock) }; @@ -2217,8 +2544,8 @@ fn microblock_integration_test() { for next_nonce in 2..5 { // verify that the microblock miner can automatically pick up transactions debug!( - "Try to send unconfirmed tx from {} to {}", - &spender_addr, &recipient + "Try to send unconfirmed tx from {} to {} nonce {}", + &spender_addr, &recipient, next_nonce ); let unconfirmed_tx_bytes = make_stacks_transfer_mblock_only( &spender_sk, @@ -2245,6 +2572,7 @@ fn microblock_integration_test() { .txid() .to_string() ); + eprintln!("Sent {}", &res); } else { eprintln!("{}", res.text().unwrap()); panic!(""); @@ -2508,113 +2836,17 @@ fn miner_submit_twice() { conf.miner.first_attempt_time_ms = 20; conf.miner.subsequent_attempt_time_ms = 30_000; - // note: this test depends on timing of how long it takes to assemble a block, - // but it won't flake if the miner behaves correctly: a correct miner should - // always be able to mine both transactions by the end of this test. an incorrect - // miner may sometimes pass this test though, if they can successfully mine a - // 2-transaction block in 20 ms *OR* if they are slow enough that they mine a - // 0-transaction block in that time (because this would trigger a re-attempt, which - // is exactly what this test is measuring). - // - // The "fixed" behavior is the corner case where a miner did a "first attempt", which - // included 1 or more transaction, but they could have made a second attempt with - // more transactions. - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - submit_tx(&http_origin, &tx_1); - submit_tx(&http_origin, &tx_2); - - // mine a couple more blocks - // waiting enough time between them that a second attempt could be made. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - thread::sleep(Duration::from_secs(15)); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // 1 transaction mined - let account = get_account(&http_origin, &spender_addr); - assert_eq!(account.nonce, 2); - - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn mining_transactions_is_fair() { - // test that origin addresses with higher-than-min-fee transactions pending will get considered - // in a round-robin fashion, even if one origin has waaaaaay more outstanding transactions than - // the other (and with higher fees). - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sks: Vec<_> = (0..2) - .into_iter() - .map(|_| StacksPrivateKey::new()) - .collect(); - let spender_addrs: Vec = spender_sks.iter().map(|x| to_addr(x).into()).collect(); - - let mut txs = vec![]; - let recipient = StacksAddress::from_string(ADDR_4).unwrap(); - - // spender 0 sends 20 txs, at over 2000 uSTX tx fee - for i in 0..20 { - let tx = make_stacks_transfer(&spender_sks[0], i, 2000 * (21 - i), &recipient.into(), 1000); - txs.push(tx); - } - - // spender 1 sends 1 tx, that is roughly the middle rate among the spender[0] transactions - let tx = make_stacks_transfer(&spender_sks[1], 0, 20_000, &recipient.into(), 1000); - txs.push(tx); - - let (mut conf, _) = neon_integration_test_conf(); - for spender_addr in spender_addrs.iter() { - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: 1049230, - }); - } - - // all transactions have high-enough fees... - conf.miner.min_tx_fee = 1; - conf.miner.first_attempt_time_ms = u64::max_value(); - conf.miner.subsequent_attempt_time_ms = u64::max_value(); - - test_observer::spawn(); - conf.events_observers.push(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - }); + // note: this test depends on timing of how long it takes to assemble a block, + // but it won't flake if the miner behaves correctly: a correct miner should + // always be able to mine both transactions by the end of this test. an incorrect + // miner may sometimes pass this test though, if they can successfully mine a + // 2-transaction block in 20 ms *OR* if they are slow enough that they mine a + // 0-transaction block in that time (because this would trigger a re-attempt, which + // is exactly what this test is measuring). + // + // The "fixed" behavior is the corner case where a miner did a "first attempt", which + // included 1 or more transaction, but they could have made a second attempt with + // more transactions. let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -2648,51 +2880,18 @@ fn mining_transactions_is_fair() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - for tx in txs.iter() { - submit_tx(&http_origin, tx); - } + submit_tx(&http_origin, &tx_1); + submit_tx(&http_origin, &tx_2); - // mine a couple more blocks -- all 21 transactions should get mined; the same origin should be - // considered more than once per block, but all origins should be considered - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // mine a couple more blocks + // waiting enough time between them that a second attempt could be made. next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + thread::sleep(Duration::from_secs(15)); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let blocks = test_observer::get_blocks(); - - let mut found_sender_1 = false; - let mut sender_1_is_last = true; - - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::TokenTransfer(..) = parsed.payload { - if parsed.auth().origin().address_testnet() == to_addr(&spender_sks[1]) { - found_sender_1 = true; - } else if found_sender_1 { - // some tx from sender 0 got mined after the one from sender 1, which is what - // we want -- sender 1 shouldn't monopolize mempool consideration - sender_1_is_last = false; - } - } - } - } - - assert!(found_sender_1); - assert!(!sender_1_is_last); - - // all transactions mined - let account_0 = get_account(&http_origin, &spender_addrs[0]); - assert_eq!(account_0.nonce, 20); - - let account_1 = get_account(&http_origin, &spender_addrs[1]); - assert_eq!(account_1.nonce, 1); + // 1 transaction mined + let account = get_account(&http_origin, &spender_addr); + assert_eq!(account.nonce, 2); channel.stop_chains_coordinator(); } @@ -3887,6 +4086,14 @@ fn cost_voting_integration() { let (mut conf, miner_account) = neon_integration_test_conf(); + conf.miner.microblock_attempt_time_ms = 1_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + test_observer::spawn(); conf.events_observers.push(EventObserverConfig { @@ -4199,7 +4406,7 @@ fn mining_events_integration_test() { }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; + conf.node.wait_time_for_microblocks = 1000; conf.node.microblock_frequency = 1000; conf.miner.min_tx_fee = 1; @@ -4644,7 +4851,7 @@ fn microblock_limit_hit_integration_test() { 100, ); - let (mut conf, miner_account) = neon_integration_test_conf(); + let (mut conf, _) = neon_integration_test_conf(); conf.initial_balances.push(InitialBalance { address: addr.clone().into(), @@ -4660,9 +4867,14 @@ fn microblock_limit_hit_integration_test() { }); conf.node.mine_microblocks = true; - conf.node.wait_time_for_microblocks = 30000; + // conf.node.wait_time_for_microblocks = 30000; + conf.node.wait_time_for_microblocks = 1000; conf.node.microblock_frequency = 1000; + conf.miner.microblock_attempt_time_ms = i64::max_value() as u64; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -4811,7 +5023,8 @@ fn block_large_tx_integration_test() { let spender_sk = StacksPrivateKey::new(); let spender_addr = to_addr(&spender_sk); - let tx = make_contract_publish(&spender_sk, 0, 150_000, "small", &small_contract_src); + // higher fee for tx means it will get mined first + let tx = make_contract_publish(&spender_sk, 0, 671_000, "small", &small_contract_src); let tx_2 = make_contract_publish(&spender_sk, 1, 670_000, "over", &oversize_contract_src); let (mut conf, miner_account) = neon_integration_test_conf(); @@ -4831,6 +5044,10 @@ fn block_large_tx_integration_test() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 1000; + conf.miner.microblock_attempt_time_ms = i64::max_value() as u64; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + conf.miner.min_tx_fee = 1; conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; @@ -4875,16 +5092,16 @@ fn block_large_tx_integration_test() { assert_eq!(account.nonce, 0); assert_eq!(account.balance, 10000000); - submit_tx(&http_origin, &tx); + let normal_txid = submit_tx(&http_origin, &tx); let huge_txid = submit_tx(&http_origin, &tx_2); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sleep_ms(20_000); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + eprintln!( + "Try to mine a too-big tx. Normal = {}, TooBig = {}", + &normal_txid, &huge_txid + ); + next_block_and_wait_with_timeout(&mut btc_regtest_controller, &blocks_processed, 1200); - let res = get_account(&http_origin, &spender_addr); - assert_eq!(res.nonce, 1); + eprintln!("Finished trying to mine a too-big tx"); let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); @@ -4969,6 +5186,11 @@ fn microblock_large_tx_integration_test_FLAKY() { conf.miner.first_attempt_time_ms = i64::max_value() as u64; conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; + conf.miner.microblock_attempt_time_ms = 1_000; + conf.node.wait_time_for_microblocks = 0; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -5107,6 +5329,12 @@ fn pox_integration_test() { amount: third_bal, }); + conf.miner.microblock_attempt_time_ms = 1_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 1_000; + conf.burnchain.max_rbf = 10_000_000; + conf.node.wait_time_for_blocks = 1_000; + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller .start_bitcoind() @@ -7540,7 +7768,7 @@ fn test_flash_block_skip_tenure() { // second block will be the first mined Stacks block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - // fault injection: force tenures to take 11 seconds + // fault injection: force tenures to take too long std::env::set_var("STX_TEST_SLOW_TENURE".to_string(), "11000".to_string()); for i in 0..10 { @@ -7560,6 +7788,7 @@ fn test_flash_block_skip_tenure() { eprintln!("Miner account: {}", miner_account); let account = get_account(&http_origin, &miner_account); + eprintln!("account = {:?}", &account); assert_eq!(account.balance, 0); assert_eq!(account.nonce, 2); @@ -7627,6 +7856,7 @@ fn test_problematic_txs_are_not_stored() { }); let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller .start_bitcoind() .map_err(|_e| ()) @@ -7716,6 +7946,8 @@ fn test_problematic_txs_are_not_stored() { assert!(get_unconfirmed_tx(&http_origin, &tx_edge_txid).is_some()); assert!(get_unconfirmed_tx(&http_origin, &tx_exceeds_txid).is_none()); assert!(get_unconfirmed_tx(&http_origin, &tx_high_txid).is_none()); + + channel.stop_chains_coordinator(); } fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, HashSet) { @@ -7724,7 +7956,7 @@ fn find_new_files(dirp: &str, prev_files: &HashSet) -> (Vec, Has let cur_files = fs::read_dir(dirp).unwrap(); let mut new_files = vec![]; let mut cur_files_set = HashSet::new(); - for mut cur_file in cur_files.into_iter() { + for cur_file in cur_files.into_iter() { let cur_file = cur_file.unwrap(); let cur_file_fullpath = dirpp.join(cur_file.path()).to_str().unwrap().to_string(); test_debug!("file in {}: {}", dirp, &cur_file_fullpath); @@ -7945,7 +8177,7 @@ fn test_problematic_blocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - for i in 0..5 { + for _i in 0..5 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -7964,7 +8196,7 @@ fn test_problematic_blocks_are_not_mined() { assert_eq!(all_new_files.len(), 0); // one block contained tx_exceeds - let mut blocks = test_observer::get_blocks(); + let blocks = test_observer::get_blocks(); let mut found = false; for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -7975,7 +8207,7 @@ fn test_problematic_blocks_are_not_mined() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_exceeds_txid { found = true; break; @@ -7987,7 +8219,7 @@ fn test_problematic_blocks_are_not_mined() { assert!(found); let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -8010,21 +8242,23 @@ fn test_problematic_blocks_are_not_mined() { ); btc_regtest_controller.build_next_block(1); + // wait for runloop to advance loop { sleep_ms(1_000); - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } - let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + + let cur_ast_rules = { + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) + cur_ast_rules }; // new rules took effect @@ -8037,7 +8271,7 @@ fn test_problematic_blocks_are_not_mined() { eprintln!("old_tip_info = {:?}", &old_tip_info); // mine some blocks, and log problematic blocks - for i in 0..5 { + for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -8063,8 +8297,7 @@ fn test_problematic_blocks_are_not_mined() { } // no block contained the tx_high bad transaction, ever - let mut blocks = test_observer::get_blocks(); - let mut found = false; + let blocks = test_observer::get_blocks(); for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { @@ -8074,7 +8307,7 @@ fn test_problematic_blocks_are_not_mined() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { assert!(parsed.txid() != tx_high_txid); } } @@ -8086,10 +8319,7 @@ fn test_problematic_blocks_are_not_mined() { // verify that a follower node that boots up with this node as a bootstrap peer will process // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, follower_blocks_processed, pox_sync_comms, follower_channel) = - spawn_follower_node(&conf); - let follower_http_origin = format!("http://{}", &follower_conf.node.rpc_bind); - let follower_tip_info = get_chain_info(&follower_conf); + let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); eprintln!( "\nFollower booted on port {},{}\n", @@ -8298,7 +8528,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - for i in 0..5 { + for _i in 0..5 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -8317,7 +8547,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { assert_eq!(all_new_files.len(), 0); // one block contained tx_exceeds - let mut blocks = test_observer::get_blocks(); + let blocks = test_observer::get_blocks(); let mut found = false; for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -8328,7 +8558,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_exceeds_txid { found = true; break; @@ -8340,7 +8570,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { assert!(found); let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -8354,18 +8584,18 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // wait for runloop to advance loop { sleep_ms(1_000); - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } - let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let cur_ast_rules = { + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) + cur_ast_rules }; // new rules took effect @@ -8374,13 +8604,13 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // the follower we will soon boot up will start applying the new AST rules at this height. // Make it so the miner does *not* follow the rules { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let mut tx = sortdb.tx_begin().unwrap(); SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 10_000).unwrap(); tx.commit().unwrap(); } let cur_ast_rules = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -8411,7 +8641,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { eprintln!("old_tip_info = {:?}", &old_tip_info); // mine some blocks, and log problematic blocks - for i in 0..5 { + for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -8419,7 +8649,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { cur_files = cur_files_new; let cur_ast_rules = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -8441,7 +8671,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { assert_eq!(all_new_files.len(), 1); // tx_high got mined by the miner - let mut blocks = test_observer::get_blocks(); + let blocks = test_observer::get_blocks(); let mut bad_block_height = None; for block in blocks { let transactions = block.get("transactions").unwrap().as_array().unwrap(); @@ -8452,7 +8682,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_high_txid { bad_block_height = Some(block.get("block_height").unwrap().as_u64().unwrap()); } @@ -8469,10 +8699,7 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // verify that a follower node that boots up with this node as a bootstrap peer will process // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, follower_blocks_processed, pox_sync_comms, follower_channel) = - spawn_follower_node(&conf); - let follower_http_origin = format!("http://{}", &follower_conf.node.rpc_bind); - let follower_tip_info = get_chain_info(&follower_conf); + let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); eprintln!( "\nFollower booted on port {},{}\n", @@ -8691,7 +8918,7 @@ fn test_problematic_microblocks_are_not_mined() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - for i in 0..5 { + for _i in 0..5 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -8713,7 +8940,7 @@ fn test_problematic_microblocks_are_not_mined() { assert_eq!(all_new_files.len(), 0); // one microblock contained tx_exceeds - let mut microblocks = test_observer::get_microblocks(); + let microblocks = test_observer::get_microblocks(); let mut found = false; for microblock in microblocks { let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); @@ -8724,7 +8951,7 @@ fn test_problematic_microblocks_are_not_mined() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_exceeds_txid { found = true; break; @@ -8736,7 +8963,7 @@ fn test_problematic_microblocks_are_not_mined() { assert!(found); let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -8763,18 +8990,18 @@ fn test_problematic_microblocks_are_not_mined() { // wait for runloop to advance loop { sleep_ms(1_000); - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } - let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let cur_ast_rules = { + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) + cur_ast_rules }; // new rules took effect @@ -8787,7 +9014,7 @@ fn test_problematic_microblocks_are_not_mined() { eprintln!("old_tip_info = {:?}", &old_tip_info); // mine some microblocks, and log problematic microblocks - for i in 0..5 { + for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -8813,8 +9040,7 @@ fn test_problematic_microblocks_are_not_mined() { } // no microblock contained the tx_high bad transaction, ever - let mut microblocks = test_observer::get_microblocks(); - let mut found = false; + let microblocks = test_observer::get_microblocks(); for microblock in microblocks { let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); for tx in transactions.iter() { @@ -8824,7 +9050,7 @@ fn test_problematic_microblocks_are_not_mined() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { assert!(parsed.txid() != tx_high_txid); } } @@ -8836,10 +9062,7 @@ fn test_problematic_microblocks_are_not_mined() { // verify that a follower node that boots up with this node as a bootstrap peer will process // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, follower_blocks_processed, pox_sync_comms, follower_channel) = - spawn_follower_node(&conf); - let follower_http_origin = format!("http://{}", &follower_conf.node.rpc_bind); - let follower_tip_info = get_chain_info(&follower_conf); + let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); eprintln!( "\nFollower booted on port {},{}\n", @@ -9056,7 +9279,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let old_tip_info = get_chain_info(&conf); let mut all_new_files = vec![]; - for i in 0..5 { + for _i in 0..5 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -9078,7 +9301,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { assert_eq!(all_new_files.len(), 0); // one microblock contained tx_exceeds - let mut microblocks = test_observer::get_microblocks(); + let microblocks = test_observer::get_microblocks(); let mut found = false; for microblock in microblocks { let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); @@ -9089,7 +9312,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_exceeds_txid { found = true; break; @@ -9101,7 +9324,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { assert!(found); let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -9115,18 +9338,18 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // wait for runloop to advance loop { sleep_ms(1_000); - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let new_tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); if new_tip.block_height > tip.block_height { break; } } - let (tip, cur_ast_rules) = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let cur_ast_rules = { + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); - (tip, cur_ast_rules) + cur_ast_rules }; // new rules took effect @@ -9135,13 +9358,13 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // the follower we will soon boot up will start applying the new AST rules at this height. // Make it so the miner does *not* follow the rules { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let mut tx = sortdb.tx_begin().unwrap(); SortitionDB::override_ast_rule_height(&mut tx, ASTRules::PrecheckSize, 10_000).unwrap(); tx.commit().unwrap(); } let cur_ast_rules = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); eprintln!("Sort db tip: {}", tip.block_height); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -9173,7 +9396,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { eprintln!("old_tip_info = {:?}", &old_tip_info); // mine some blocks, and log problematic microblocks - for i in 0..5 { + for _i in 0..6 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); let cur_files_old = cur_files.clone(); let (mut new_files, cur_files_new) = find_new_files(bad_blocks_dir, &cur_files_old); @@ -9181,7 +9404,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { cur_files = cur_files_new; let cur_ast_rules = { - let mut sortdb = btc_regtest_controller.sortdb_mut(); + let sortdb = btc_regtest_controller.sortdb_mut(); let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); let cur_ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), tip.block_height).unwrap(); @@ -9208,7 +9431,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { assert!(all_new_files.len() >= 1); // tx_high got mined by the miner - let mut microblocks = test_observer::get_microblocks(); + let microblocks = test_observer::get_microblocks(); let mut bad_block_id = None; for microblock in microblocks { let transactions = microblock.get("transactions").unwrap().as_array().unwrap(); @@ -9219,7 +9442,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::SmartContract(ref contract_call) = &parsed.payload { + if let TransactionPayload::SmartContract(..) = &parsed.payload { if parsed.txid() == tx_high_txid { bad_block_id = { let parts: Vec<_> = microblock @@ -9249,10 +9472,7 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { // verify that a follower node that boots up with this node as a bootstrap peer will process // all of the blocks available, even if they are problematic, with the checks on. - let (follower_conf, follower_blocks_processed, pox_sync_comms, follower_channel) = - spawn_follower_node(&conf); - let follower_http_origin = format!("http://{}", &follower_conf.node.rpc_bind); - let follower_tip_info = get_chain_info(&follower_conf); + let (follower_conf, _, pox_sync_comms, follower_channel) = spawn_follower_node(&conf); eprintln!( "\nFollower booted on port {},{}\n", @@ -9311,3 +9531,404 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } + +/// Make a contract that takes a parameterized amount of runtime +/// `num_index_of` is the number of times to call `index-of` +fn make_runtime_sized_contract(num_index_of: usize, nonce: u64, addr_prefix: &str) -> String { + let iters_256 = num_index_of / 256; + let iters_mod = num_index_of % 256; + let full_iters_code_parts: Vec = (0..iters_256) + .map(|_cnt| "(unwrap-panic (index-of BUFF_TO_BYTE input))".to_string()) + .collect(); + + let full_iters_code = full_iters_code_parts.join("\n "); + + let iters_mod_code_parts: Vec = + (0..iters_mod).map(|cnt| format!("0x{}", cnt)).collect(); + + let iters_mod_code = format!("(list {})", iters_mod_code_parts.join(" ")); + + let code = format!( + " + (define-constant BUFF_TO_BYTE (list + 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0a 0x0b 0x0c 0x0d 0x0e 0x0f + 0x10 0x11 0x12 0x13 0x14 0x15 0x16 0x17 0x18 0x19 0x1a 0x1b 0x1c 0x1d 0x1e 0x1f + 0x20 0x21 0x22 0x23 0x24 0x25 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f + 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f + 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f + 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f + 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f + 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f + 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f + 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f + 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf + 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf + 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf + 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf + 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef + 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff + )) + (define-private (crash-me-folder (input (buff 1)) (ctr uint)) + (begin + ;; full_iters_code + {} + (+ u1 ctr) + ) + ) + (define-public (crash-me (name (string-ascii 128))) + (begin + ;; call index-of (iters_256 * 256) times + (fold crash-me-folder BUFF_TO_BYTE u0) + ;; call index-of iters_mod times + (fold crash-me-folder {} u0) + (print name) + (ok u0) + ) + ) + (begin + (crash-me \"{}\")) + ", + full_iters_code, + iters_mod_code, + &format!("large-{}-{}-{}", nonce, &addr_prefix, num_index_of) + ); + + eprintln!("{}", &code); + code +} + +enum TxChainStrategy { + Expensive, + Random, +} + +fn make_expensive_tx_chain( + privk: &StacksPrivateKey, + fee_plus: u64, + mblock_only: bool, +) -> Vec> { + let addr = to_addr(&privk); + let mut chain = vec![]; + for nonce in 0..25 { + let mut addr_prefix = addr.to_string(); + let _ = addr_prefix.split_off(12); + let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, 256); + eprintln!("Make tx {}", &contract_name); + let tx = if mblock_only { + make_contract_publish_microblock_only( + privk, + nonce, + 1049230 + nonce + fee_plus, + &contract_name, + &make_runtime_sized_contract(256, nonce, &addr_prefix), + ) + } else { + make_contract_publish( + privk, + nonce, + 1049230 + nonce + fee_plus, + &contract_name, + &make_runtime_sized_contract(256, nonce, &addr_prefix), + ) + }; + chain.push(tx); + } + chain +} + +fn make_random_tx_chain( + privk: &StacksPrivateKey, + fee_plus: u64, + mblock_only: bool, +) -> Vec> { + let addr = to_addr(&privk); + let mut chain = vec![]; + + for nonce in 0..25 { + // N.B. private keys are 32-33 bytes, so this is always safe + let random_iters = privk.to_bytes()[nonce as usize] as usize; + + let be_bytes = [ + privk.to_bytes()[nonce as usize], + privk.to_bytes()[(nonce + 1) as usize], + ]; + + let random_extra_fee = u16::from_be_bytes(be_bytes) as u64; + + let mut addr_prefix = addr.to_string(); + let _ = addr_prefix.split_off(12); + let contract_name = format!("large-{}-{}-{}", nonce, &addr_prefix, random_iters); + eprintln!("Make tx {}", &contract_name); + let tx = if mblock_only { + make_contract_publish_microblock_only( + privk, + nonce, + 1049230 + nonce + fee_plus + random_extra_fee, + &contract_name, + &make_runtime_sized_contract(random_iters, nonce, &addr_prefix), + ) + } else { + make_contract_publish( + privk, + nonce, + 1049230 + nonce + fee_plus + random_extra_fee, + &contract_name, + &make_runtime_sized_contract(random_iters, nonce, &addr_prefix), + ) + }; + chain.push(tx); + } + chain +} + +fn test_competing_miners_build_on_same_chain( + num_miners: usize, + conf_template: Config, + mblock_only: bool, + block_time_ms: u64, + chain_strategy: TxChainStrategy, +) { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let privks: Vec<_> = (0..100) + .into_iter() + .map(|_| StacksPrivateKey::new()) + .collect(); + let balances: Vec<_> = privks + .iter() + .map(|privk| { + let addr = to_addr(privk); + InitialBalance { + address: addr.into(), + amount: 1_000_000_000, + } + }) + .collect(); + + let mut confs = vec![]; + let mut burnchain_configs = vec![]; + let mut blocks_processed = vec![]; + + for _i in 0..num_miners { + let seed = StacksPrivateKey::new().to_bytes(); + let (mut conf, _) = neon_integration_test_conf_with_seed(seed); + + conf.initial_balances.append(&mut balances.clone()); + + conf.node.mine_microblocks = conf_template.node.mine_microblocks; + conf.miner.microblock_attempt_time_ms = conf_template.miner.microblock_attempt_time_ms; + conf.node.wait_time_for_microblocks = conf_template.node.wait_time_for_microblocks; + conf.node.microblock_frequency = conf_template.node.microblock_frequency; + conf.miner.first_attempt_time_ms = conf_template.miner.first_attempt_time_ms; + conf.miner.subsequent_attempt_time_ms = conf_template.miner.subsequent_attempt_time_ms; + conf.node.wait_time_for_blocks = conf_template.node.wait_time_for_blocks; + conf.burnchain.max_rbf = conf_template.burnchain.max_rbf; + + // multiple nodes so they must download from each other + conf.miner.wait_for_block_download = true; + + confs.push(conf); + } + + let node_privkey_1 = + StacksNode::make_node_private_key_from_seed(&confs[0].node.local_peer_seed); + for i in 1..num_miners { + let chain_id = confs[0].burnchain.chain_id; + let peer_version = confs[0].burnchain.peer_version; + let p2p_bind = confs[0].node.p2p_bind.clone(); + + confs[i].node.set_bootstrap_nodes( + format!( + "{}@{}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex(), + p2p_bind + ), + chain_id, + peer_version, + ); + } + + // use long reward cycles + for i in 0..num_miners { + let mut burnchain_config = Burnchain::regtest(&confs[i].get_burn_db_path()); + let reward_cycle_len = 100; + let prepare_phase_len = 20; + let pox_constants = PoxConstants::new( + reward_cycle_len, + prepare_phase_len, + 4 * prepare_phase_len / 5, + 5, + 15, + (16 * reward_cycle_len - 1).into(), + (17 * reward_cycle_len).into(), + ); + burnchain_config.pox_constants = pox_constants.clone(); + + burnchain_configs.push(burnchain_config); + } + + let mut btcd_controller = BitcoinCoreController::new(confs[0].clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + confs[0].clone(), + None, + Some(burnchain_configs[0].clone()), + None, + ); + + btc_regtest_controller.bootstrap_chain(1); + + // make sure all miners have BTC + for i in 1..num_miners { + let old_mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); + btc_regtest_controller + .set_mining_pubkey(confs[i].burnchain.local_mining_public_key.clone().unwrap()); + btc_regtest_controller.bootstrap_chain(1); + btc_regtest_controller.set_mining_pubkey(old_mining_pubkey); + } + + btc_regtest_controller.bootstrap_chain((199 - num_miners) as u64); + + eprintln!("Chain bootstrapped..."); + + for (i, burnchain_config) in burnchain_configs.into_iter().enumerate() { + let mut run_loop = neon::RunLoop::new(confs[i].clone()); + let blocks_processed_arc = run_loop.get_blocks_processed_arc(); + + blocks_processed.push(blocks_processed_arc); + thread::spawn(move || run_loop.start(Some(burnchain_config), 0)); + } + + let http_origin = format!("http://{}", &confs[0].node.rpc_bind); + + // give the run loops some time to start up! + for i in 0..num_miners { + wait_for_runloop(&blocks_processed[i as usize]); + } + + // activate miners + eprintln!("\n\nBoot miner 0\n\n"); + loop { + let tip_info_opt = get_chain_info_opt(&confs[0]); + if let Some(tip_info) = tip_info_opt { + eprintln!("\n\nMiner 1: {:?}\n\n", &tip_info); + if tip_info.stacks_tip_height > 0 { + break; + } + } else { + eprintln!("\n\nWaiting for miner 0...\n\n"); + } + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed[0]); + } + + for i in 1..num_miners { + eprintln!("\n\nBoot miner {}\n\n", i); + loop { + let tip_info_opt = get_chain_info_opt(&confs[i]); + if let Some(tip_info) = tip_info_opt { + eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + if tip_info.stacks_tip_height > 0 { + break; + } + } else { + eprintln!("\n\nWaiting for miner {}...\n\n", i); + } + next_block_and_iterate( + &mut btc_regtest_controller, + &blocks_processed[i as usize], + 5_000, + ); + } + } + + eprintln!("\n\nBegin transactions\n\n"); + + // blast out lots of expensive transactions. + // keeps the mempool full, and makes it so miners will spend a nontrivial amount of time + // building blocks + let all_txs: Vec<_> = privks + .iter() + .enumerate() + .map(|(i, pk)| match chain_strategy { + TxChainStrategy::Expensive => make_expensive_tx_chain(pk, (25 * i) as u64, mblock_only), + TxChainStrategy::Random => make_random_tx_chain(pk, (25 * i) as u64, mblock_only), + }) + .collect(); + let mut cnt = 0; + for tx_chain in all_txs { + for tx in tx_chain { + eprintln!("\n\nSubmit tx {}\n\n", &cnt); + submit_tx(&http_origin, &tx); + cnt += 1; + } + } + + eprintln!("\n\nBegin mining\n\n"); + + // mine quickly -- see if we can induce flash blocks + for i in 0..1000 { + eprintln!("\n\nBuild block {}\n\n", i); + btc_regtest_controller.build_next_block(1); + sleep_ms(block_time_ms); + } +} + +// TODO: this needs to run as a smoke test, since they take too long to run in CI +#[test] +#[ignore] +fn test_one_miner_build_anchor_blocks_on_same_chain_without_rbf() { + let (mut conf, _) = neon_integration_test_conf(); + + conf.node.mine_microblocks = false; + conf.miner.microblock_attempt_time_ms = 5_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 10_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.burnchain.max_rbf = 0; + conf.node.wait_time_for_blocks = 1_000; + + test_competing_miners_build_on_same_chain(1, conf, false, 10_000, TxChainStrategy::Random) +} + +// TODO: this needs to run as a smoke test, since they take too long to run in CI +#[test] +#[ignore] +fn test_competing_miners_build_anchor_blocks_on_same_chain_without_rbf() { + let (mut conf, _) = neon_integration_test_conf(); + + conf.node.mine_microblocks = false; + conf.miner.microblock_attempt_time_ms = 5_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 10_000; + conf.miner.first_attempt_time_ms = 2_000; + conf.miner.subsequent_attempt_time_ms = 5_000; + conf.burnchain.max_rbf = 0; + conf.node.wait_time_for_blocks = 1_000; + + test_competing_miners_build_on_same_chain(5, conf, false, 10_000, TxChainStrategy::Expensive) +} + +// TODO: this needs to run as a smoke test, since they take too long to run in CI +#[test] +#[ignore] +fn test_competing_miners_build_anchor_blocks_and_microblocks_on_same_chain() { + let (mut conf, _) = neon_integration_test_conf(); + + conf.node.mine_microblocks = true; + conf.miner.microblock_attempt_time_ms = 2_000; + conf.node.wait_time_for_microblocks = 0; + conf.node.microblock_frequency = 0; + conf.miner.first_attempt_time_ms = 1; + conf.miner.subsequent_attempt_time_ms = 1; + conf.burnchain.max_rbf = 1000000; + conf.node.wait_time_for_blocks = 1_000; + + test_competing_miners_build_on_same_chain(5, conf, true, 15_000, TxChainStrategy::Random) +}