diff --git a/Cargo.lock b/Cargo.lock index 791738766b..5f0fd52728 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6621,7 +6621,6 @@ dependencies = [ "tokio 1.14.0", "tracing", "tracing-attributes", - "ttl_cache", "uint", ] @@ -7903,15 +7902,6 @@ dependencies = [ "cfg-if 0.1.10", ] -[[package]] -name = "ttl_cache" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4189890526f0168710b6ee65ceaedf1460c48a14318ceec933cb26baa492096a" -dependencies = [ - "linked-hash-map 0.5.4", -] - [[package]] name = "tui" version = "0.16.0" diff --git a/applications/tari_base_node/src/bootstrap.rs b/applications/tari_base_node/src/bootstrap.rs index 1c16b11ca0..73d1bb8ebc 100644 --- a/applications/tari_base_node/src/bootstrap.rs +++ b/applications/tari_base_node/src/bootstrap.rs @@ -140,7 +140,6 @@ where B: BlockchainBackend + 'static node_config, )) .add_initializer(MempoolServiceInitializer::new( - mempool_config, self.mempool.clone(), peer_message_subscriptions.clone(), )) diff --git a/applications/tari_base_node/src/command_handler.rs b/applications/tari_base_node/src/command_handler.rs index d69be9a45b..005dd1e8f7 100644 --- a/applications/tari_base_node/src/command_handler.rs +++ b/applications/tari_base_node/src/command_handler.rs @@ -158,7 +158,7 @@ impl CommandHandler { "Mempool", format!( "{}tx ({}g, +/- {}blks)", - mempool_stats.total_txs, + mempool_stats.unconfirmed_txs, mempool_stats.total_weight, if mempool_stats.total_weight == 0 { 0 @@ -1118,7 +1118,9 @@ impl CommandHandler { let local_node_comms_interface = self.node_service.clone(); self.executor.spawn(async move { let blocks = try_or_print!(db.rewind_to_height(new_height).await); - local_node_comms_interface.publish_block_event(BlockEvent::BlockSyncRewind(blocks)); + if !blocks.is_empty() { + local_node_comms_interface.publish_block_event(BlockEvent::BlockSyncRewind(blocks)); + } }); } diff --git a/base_layer/core/Cargo.toml b/base_layer/core/Cargo.toml index 954d12a307..c268aba391 100644 --- a/base_layer/core/Cargo.toml +++ b/base_layer/core/Cargo.toml @@ -65,7 +65,6 @@ thiserror = "1.0.26" tokio = { version = "1.11", features = ["time", "sync", "macros"] } tracing = "0.1.26" tracing-attributes = "*" -ttl_cache = "0.5.1" uint = { version = "0.9", default-features = false } [dev-dependencies] diff --git a/base_layer/core/src/base_node/comms_interface/comms_request.rs b/base_layer/core/src/base_node/comms_interface/comms_request.rs index 9721521d17..c233bc3473 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_request.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_request.rs @@ -26,7 +26,7 @@ use std::{ }; use serde::{Deserialize, Serialize}; -use tari_common_types::types::{Commitment, HashOutput, PublicKey, Signature}; +use tari_common_types::types::{Commitment, HashOutput, PrivateKey, PublicKey, Signature}; use tari_crypto::tari_utilities::hex::Hex; use crate::{blocks::NewBlockTemplate, chain_storage::MmrTree, proof_of_work::PowAlgorithm}; @@ -44,14 +44,14 @@ pub struct MmrStateRequest { pub enum NodeCommsRequest { GetChainMetadata, FetchHeaders(RangeInclusive), - FetchHeadersWithHashes(Vec), + FetchHeadersByHashes(Vec), FetchHeadersAfter(Vec, HashOutput), FetchMatchingUtxos(Vec), FetchMatchingTxos(Vec), FetchMatchingBlocks(RangeInclusive), FetchBlocksByHash(Vec), - FetchBlocksWithKernels(Vec), - FetchBlocksWithUtxos(Vec), + FetchBlocksByKernelExcessSigs(Vec), + FetchBlocksByUtxos(Vec), GetHeaderByHash(HashOutput), GetBlockByHash(HashOutput), GetNewBlockTemplate(GetNewBlockTemplateRequest), @@ -67,6 +67,9 @@ pub enum NodeCommsRequest { FetchAssetMetadata { asset_public_key: PublicKey, }, + FetchMempoolTransactionsByExcessSigs { + excess_sigs: Vec, + }, } #[derive(Debug, Serialize, Deserialize)] @@ -83,7 +86,7 @@ impl Display for NodeCommsRequest { FetchHeaders(range) => { write!(f, "FetchHeaders ({:?})", range) }, - FetchHeadersWithHashes(v) => write!(f, "FetchHeadersWithHashes (n={})", v.len()), + FetchHeadersByHashes(v) => write!(f, "FetchHeadersByHashes (n={})", v.len()), FetchHeadersAfter(v, _hash) => write!(f, "FetchHeadersAfter (n={})", v.len()), FetchMatchingUtxos(v) => write!(f, "FetchMatchingUtxos (n={})", v.len()), FetchMatchingTxos(v) => write!(f, "FetchMatchingTxos (n={})", v.len()), @@ -91,8 +94,8 @@ impl Display for NodeCommsRequest { write!(f, "FetchMatchingBlocks ({:?})", range) }, FetchBlocksByHash(v) => write!(f, "FetchBlocksByHash (n={})", v.len()), - FetchBlocksWithKernels(v) => write!(f, "FetchBlocksWithKernels (n={})", v.len()), - FetchBlocksWithUtxos(v) => write!(f, "FetchBlocksWithUtxos (n={})", v.len()), + FetchBlocksByKernelExcessSigs(v) => write!(f, "FetchBlocksByKernelExcessSigs (n={})", v.len()), + FetchBlocksByUtxos(v) => write!(f, "FetchBlocksByUtxos (n={})", v.len()), GetHeaderByHash(v) => write!(f, "GetHeaderByHash({})", v.to_hex()), GetBlockByHash(v) => write!(f, "GetBlockByHash({})", v.to_hex()), GetNewBlockTemplate(v) => write!(f, "GetNewBlockTemplate ({}) with weight {}", v.algo, v.max_weight), @@ -112,6 +115,9 @@ impl Display for NodeCommsRequest { FetchAssetMetadata { .. } => { write!(f, "FetchAssetMetadata") }, + FetchMempoolTransactionsByExcessSigs { .. } => { + write!(f, "FetchMempoolTransactionsByExcessSigs") + }, } } } diff --git a/base_layer/core/src/base_node/comms_interface/comms_response.rs b/base_layer/core/src/base_node/comms_interface/comms_response.rs index 6c20351f8d..a5a0e3c90f 100644 --- a/base_layer/core/src/base_node/comms_interface/comms_response.rs +++ b/base_layer/core/src/base_node/comms_interface/comms_response.rs @@ -20,20 +20,25 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::fmt::{self, Display, Formatter}; +use std::{ + fmt::{self, Display, Formatter}, + sync::Arc, +}; -use serde::{Deserialize, Serialize}; -use tari_common_types::{chain_metadata::ChainMetadata, types::HashOutput}; +use tari_common_types::{ + chain_metadata::ChainMetadata, + types::{HashOutput, PrivateKey}, +}; use crate::{ blocks::{Block, BlockHeader, ChainHeader, HistoricalBlock, NewBlockTemplate}, chain_storage::UtxoMinedInfo, proof_of_work::Difficulty, - transactions::transaction::{TransactionKernel, TransactionOutput}, + transactions::transaction::{Transaction, TransactionKernel, TransactionOutput}, }; /// API Response enum -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Clone)] pub enum NodeCommsResponse { ChainMetadata(ChainMetadata), TransactionKernels(Vec), @@ -60,6 +65,7 @@ pub enum NodeCommsResponse { FetchAssetMetadataResponse { output: Box>, }, + FetchMempoolTransactionsByExcessSigsResponse(FetchMempoolTransactionsResponse), } impl Display for NodeCommsResponse { @@ -90,6 +96,19 @@ impl Display for NodeCommsResponse { FetchTokensResponse { .. } => write!(f, "FetchTokensResponse"), FetchAssetRegistrationsResponse { .. } => write!(f, "FetchAssetRegistrationsResponse"), FetchAssetMetadataResponse { .. } => write!(f, "FetchAssetMetadataResponse"), + FetchMempoolTransactionsByExcessSigsResponse(resp) => write!( + f, + "FetchMempoolTransactionsByExcessSigsResponse({} transaction(s), {} not found)", + resp.transactions.len(), + resp.not_found.len() + ), } } } + +/// Container struct for mempool transaction responses +#[derive(Debug, Clone)] +pub struct FetchMempoolTransactionsResponse { + pub transactions: Vec>, + pub not_found: Vec, +} diff --git a/base_layer/core/src/base_node/comms_interface/error.rs b/base_layer/core/src/base_node/comms_interface/error.rs index 4a3bb1aa68..5ff845d60f 100644 --- a/base_layer/core/src/base_node/comms_interface/error.rs +++ b/base_layer/core/src/base_node/comms_interface/error.rs @@ -65,4 +65,6 @@ pub enum CommsInterfaceError { BlockHeaderNotFound(u64), #[error("Block error: {0}")] BlockError(#[from] BlockError), + #[error("Invalid request for {request}: {details}")] + InvalidRequest { request: &'static str, details: String }, } diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index a5a8115c7d..500e9c9bfb 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -20,12 +20,15 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::Arc; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; use log::*; use strum_macros::Display; use tari_common_types::types::{BlockHash, HashOutput, PublicKey}; -use tari_comms::peer_manager::NodeId; +use tari_comms::{connectivity::ConnectivityRequester, peer_manager::NodeId}; use tari_crypto::tari_utilities::{hash::Hashable, hex::Hex}; use tari_utilities::ByteArray; use tokio::sync::Semaphore; @@ -34,20 +37,23 @@ use crate::{ base_node::comms_interface::{ error::CommsInterfaceError, local_interface::BlockEventSender, + FetchMempoolTransactionsResponse, NodeCommsRequest, NodeCommsResponse, OutboundNodeCommsInterface, }, - blocks::{Block, BlockHeader, ChainBlock, NewBlock, NewBlockTemplate}, - chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, PrunedOutput}, + blocks::{Block, BlockBuilder, BlockHeader, ChainBlock, NewBlock, NewBlockTemplate}, + chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainStorageError, PrunedOutput}, consensus::{ConsensusConstants, ConsensusManager}, mempool::Mempool, proof_of_work::{Difficulty, PowAlgorithm}, - transactions::transaction::TransactionKernel, }; const LOG_TARGET: &str = "c::bn::comms_interface::inbound_handler"; const MAX_HEADERS_PER_RESPONSE: u32 = 100; +const MAX_REQUEST_BY_BLOCK_HASHES: usize = 100; +const MAX_REQUEST_BY_KERNEL_EXCESS_SIGS: usize = 100; +const MAX_REQUEST_BY_UTXO_HASHES: usize = 100; /// Events that can be published on the Validated Block Event Stream /// Broadcast is to notify subscribers if this is a valid propagated block event @@ -60,25 +66,27 @@ pub enum BlockEvent { } /// The InboundNodeCommsInterface is used to handle all received inbound requests from remote nodes. -pub struct InboundNodeCommsHandlers { +pub struct InboundNodeCommsHandlers { block_event_sender: BlockEventSender, - blockchain_db: AsyncBlockchainDb, + blockchain_db: AsyncBlockchainDb, mempool: Mempool, consensus_manager: ConsensusManager, new_block_request_semaphore: Arc, outbound_nci: OutboundNodeCommsInterface, + connectivity: ConnectivityRequester, } -impl InboundNodeCommsHandlers -where T: BlockchainBackend + 'static +impl InboundNodeCommsHandlers +where B: BlockchainBackend + 'static { /// Construct a new InboundNodeCommsInterface. pub fn new( block_event_sender: BlockEventSender, - blockchain_db: AsyncBlockchainDb, + blockchain_db: AsyncBlockchainDb, mempool: Mempool, consensus_manager: ConsensusManager, outbound_nci: OutboundNodeCommsInterface, + connectivity: ConnectivityRequester, ) -> Self { Self { block_event_sender, @@ -87,6 +95,7 @@ where T: BlockchainBackend + 'static consensus_manager, new_block_request_semaphore: Arc::new(Semaphore::new(1)), outbound_nci, + connectivity, } } @@ -101,7 +110,17 @@ where T: BlockchainBackend + 'static let headers = self.blockchain_db.fetch_chain_headers(range).await?; Ok(NodeCommsResponse::BlockHeaders(headers)) }, - NodeCommsRequest::FetchHeadersWithHashes(block_hashes) => { + NodeCommsRequest::FetchHeadersByHashes(block_hashes) => { + if block_hashes.len() > MAX_REQUEST_BY_BLOCK_HASHES { + return Err(CommsInterfaceError::InvalidRequest { + request: "FetchHeadersByHashes", + details: format!( + "Exceeded maximum block hashes request (max: {}, got:{})", + MAX_REQUEST_BY_BLOCK_HASHES, + block_hashes.len() + ), + }); + } let mut block_headers = Vec::with_capacity(block_hashes.len()); for block_hash in block_hashes { let block_hex = block_hash.to_hex(); @@ -234,7 +253,17 @@ where T: BlockchainBackend + 'static } Ok(NodeCommsResponse::HistoricalBlocks(blocks)) }, - NodeCommsRequest::FetchBlocksWithKernels(excess_sigs) => { + NodeCommsRequest::FetchBlocksByKernelExcessSigs(excess_sigs) => { + if excess_sigs.len() > MAX_REQUEST_BY_KERNEL_EXCESS_SIGS { + return Err(CommsInterfaceError::InvalidRequest { + request: "FetchBlocksByKernelExcessSigs", + details: format!( + "Exceeded maximum number of kernel excess sigs in request (max: {}, got:{})", + MAX_REQUEST_BY_KERNEL_EXCESS_SIGS, + excess_sigs.len() + ), + }); + } let mut blocks = Vec::with_capacity(excess_sigs.len()); for sig in excess_sigs { let sig_hex = sig.get_signature().to_hex(); @@ -260,7 +289,17 @@ where T: BlockchainBackend + 'static } Ok(NodeCommsResponse::HistoricalBlocks(blocks)) }, - NodeCommsRequest::FetchBlocksWithUtxos(commitments) => { + NodeCommsRequest::FetchBlocksByUtxos(commitments) => { + if commitments.len() > MAX_REQUEST_BY_UTXO_HASHES { + return Err(CommsInterfaceError::InvalidRequest { + request: "FetchBlocksByUtxos", + details: format!( + "Exceeded maximum number of utxo hashes in request (max: {}, got:{})", + MAX_REQUEST_BY_UTXO_HASHES, + commitments.len() + ), + }); + } let mut blocks = Vec::with_capacity(commitments.len()); for commitment in commitments { let commitment_hex = commitment.to_hex(); @@ -361,20 +400,14 @@ where T: BlockchainBackend + 'static }) }, NodeCommsRequest::FetchKernelByExcessSig(signature) => { - let mut kernels = Vec::::new(); - - match self.blockchain_db.fetch_kernel_by_excess_sig(signature).await { - Ok(kernel) => match kernel { - None => (), - Some((kernel, _kernel_hash)) => { - kernels.push(kernel); - }, - }, + let kernels = match self.blockchain_db.fetch_kernel_by_excess_sig(signature).await { + Ok(Some((kernel, _))) => vec![kernel], + Ok(None) => vec![], Err(err) => { error!(target: LOG_TARGET, "Could not fetch kernel {}", err.to_string()); return Err(err.into()); }, - } + }; Ok(NodeCommsResponse::TransactionKernels(kernels)) }, @@ -438,6 +471,15 @@ where T: BlockchainBackend + 'static output: Box::new(output), }) }, + NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs } => { + let (transactions, not_found) = self.mempool.retrieve_by_excess_sigs(&excess_sigs).await; + Ok(NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse( + FetchMempoolTransactionsResponse { + transactions, + not_found, + }, + )) + }, } } @@ -450,7 +492,7 @@ where T: BlockchainBackend + 'static new_block: NewBlock, source_peer: NodeId, ) -> Result<(), CommsInterfaceError> { - let NewBlock { block_hash } = new_block; + let block_hash = new_block.header.hash(); if self.blockchain_db.inner().is_add_block_disabled() { info!( @@ -465,7 +507,9 @@ where T: BlockchainBackend + 'static // As multiple NewBlock requests arrive from propagation, this semaphore prevents multiple requests to nodes for // the same full block. The first request that succeeds will stop the node from requesting the block from any // other node (block_exists is true). - let _permit = self.new_block_request_semaphore.acquire().await; + // Arc clone to satisfy the borrow checker + let semaphore = self.new_block_request_semaphore.clone(); + let _permit = semaphore.acquire().await.unwrap(); if self.blockchain_db.block_exists(block_hash.clone()).await? { debug!( @@ -478,39 +522,144 @@ where T: BlockchainBackend + 'static debug!( target: LOG_TARGET, - "Block with hash `{}` is unknown. Requesting it from peer `{}`.", + "Block with hash `{}` is unknown. Constructing block from known mempool transactions / requesting missing \ + transactions from peer '{}'.", block_hash.to_hex(), - source_peer.short_str() + source_peer ); - let mut block = self + + let block = self.reconcile_block(source_peer.clone(), new_block).await?; + self.handle_block(block, Some(source_peer)).await?; + + Ok(()) + } + + async fn reconcile_block( + &mut self, + source_peer: NodeId, + new_block: NewBlock, + ) -> Result, CommsInterfaceError> { + let NewBlock { + header, + coinbase_kernel, + coinbase_output, + kernel_excess_sigs: excess_sigs, + } = new_block; + + let (known_transactions, missing_excess_sigs) = self.mempool.retrieve_by_excess_sigs(&excess_sigs).await; + let known_transactions = known_transactions.into_iter().map(|tx| (*tx).clone()).collect(); + + let mut builder = BlockBuilder::new(header.version) + .with_coinbase_utxo(coinbase_output, coinbase_kernel) + .with_transactions(known_transactions); + + if missing_excess_sigs.is_empty() { + debug!( + target: LOG_TARGET, + "All transactions for block #{} ({}) found in mempool", + header.height, + header.hash().to_hex() + ); + } else { + debug!( + target: LOG_TARGET, + "Requesting {} unknown transaction(s) from peer '{}'.", + missing_excess_sigs.len(), + source_peer + ); + + let FetchMempoolTransactionsResponse { + transactions, + not_found, + } = self + .outbound_nci + .request_transactions_by_excess_sig(source_peer.clone(), missing_excess_sigs) + .await?; + + // Add returned transactions to unconfirmed pool + if !transactions.is_empty() { + self.mempool.insert_all(&transactions).await?; + } + + if !not_found.is_empty() { + let block_hash = header.hash(); + warn!( + target: LOG_TARGET, + "Peer {} was not able to return all transactions for block #{} ({}). {} transaction(s) not found. \ + Requesting full block.", + source_peer, + header.height, + block_hash.to_hex(), + not_found.len() + ); + + let block = self.request_full_block_from_peer(source_peer, block_hash).await?; + return Ok(block); + } + + builder = builder.with_transactions( + transactions + .into_iter() + .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone())) + .collect(), + ); + } + + // NB: Add the header last because `with_transactions` etc updates the current header, but we have the final one + // already + builder = builder.with_header(header); + + Ok(Arc::new(builder.build())) + } + + async fn request_full_block_from_peer( + &mut self, + source_peer: NodeId, + block_hash: BlockHash, + ) -> Result, CommsInterfaceError> { + let mut historical_block = self .outbound_nci - .request_blocks_with_hashes_from_peer(vec![block_hash], Some(source_peer.clone())) + .request_blocks_by_hashes_from_peer(vec![block_hash], Some(source_peer.clone())) .await?; - match block.pop() { + return match historical_block.pop() { Some(block) => { - self.handle_block(Arc::new(block.try_into_block()?), Some(source_peer)) - .await?; - Ok(()) + let block = Arc::new(block.try_into_block()?); + Ok(block) }, None => { - // TODO: #banheuristic - peer propagated block hash for which it could not return the full block + if let Err(e) = self + .connectivity + .ban_peer_until( + source_peer.clone(), + Duration::from_secs(100), + format!("Peer {} failed to return the block that was requested.", source_peer), + ) + .await + { + error!(target: LOG_TARGET, "Failed to ban peer: {}", e); + } + debug!( target: LOG_TARGET, - "Peer `{}` failed to return the block that was requested.", - source_peer.short_str() + "Peer `{}` failed to return the block that was requested.", source_peer ); Err(CommsInterfaceError::InvalidPeerResponse(format!( "Invalid response from peer `{}`: Peer failed to provide the block that was propagated", - source_peer.short_str() + source_peer ))) }, - } + }; } /// Handle inbound blocks from remote nodes and local services. + /// + /// ## Arguments + /// block - the block to store + /// new_block_msg - propagate this new block message + /// source_peer - the peer that sent this new block message, or None if the block was generated by a local miner pub async fn handle_block( - &self, + &mut self, block: Arc, source_peer: Option, ) -> Result { @@ -526,12 +675,19 @@ where T: BlockchainBackend + 'static .map(|p| format!("remote peer: {}", p)) .unwrap_or_else(|| "local services".to_string()) ); - trace!(target: LOG_TARGET, "Block: {}", block); + let timer = Instant::now(); let add_block_result = self.blockchain_db.add_block(block.clone()).await; // Create block event on block event stream match add_block_result { Ok(block_add_result) => { - trace!(target: LOG_TARGET, "Block event created: {}", block_add_result); + debug!( + target: LOG_TARGET, + "Block #{} ({}) added ({}) to blockchain in {:.2?}", + block_height, + block_hash.to_hex(), + block_add_result, + timer.elapsed() + ); let should_propagate = match &block_add_result { BlockAddResult::Ok(_) => true, @@ -542,7 +698,7 @@ where T: BlockchainBackend + 'static self.blockchain_db.cleanup_orphans().await?; - self.publish_block_event(BlockEvent::ValidBlockAdded(block, block_add_result)); + self.publish_block_event(BlockEvent::ValidBlockAdded(block.clone(), block_add_result)); if should_propagate { debug!( @@ -551,21 +707,38 @@ where T: BlockchainBackend + 'static block_hash.to_hex() ); let exclude_peers = source_peer.into_iter().collect(); - let new_block = NewBlock::new(block_hash.clone()); - self.outbound_nci.propagate_block(new_block, exclude_peers).await?; + let new_block_msg = NewBlock::from(&*block); + self.outbound_nci.propagate_block(new_block_msg, exclude_peers).await?; } Ok(block_hash) }, - Err(e) => { + + Err(e @ ChainStorageError::ValidationError { .. }) => { warn!( target: LOG_TARGET, - "Block #{} ({}) validation failed: {:?}", - block_height, - block_hash.to_hex(), + "Peer {} sent an invalid header: {}", + source_peer + .as_ref() + .map(ToString::to_string) + .unwrap_or_else(|| "".to_string()), e ); + if let Some(source_peer) = source_peer { + if let Err(e) = self + .connectivity + .ban_peer(source_peer, format!("Peer propagated invalid block: {}", e)) + .await + { + error!(target: LOG_TARGET, "Failed to ban peer: {}", e); + } + } + self.publish_block_event(BlockEvent::AddBlockFailed(block)); + Err(e.into()) + }, + + Err(e) => { self.publish_block_event(BlockEvent::AddBlockFailed(block)); - Err(CommsInterfaceError::ChainStorageError(e)) + Err(e.into()) }, } } @@ -596,7 +769,7 @@ where T: BlockchainBackend + 'static } } -impl Clone for InboundNodeCommsHandlers { +impl Clone for InboundNodeCommsHandlers { fn clone(&self) -> Self { Self { block_event_sender: self.block_event_sender.clone(), @@ -605,6 +778,7 @@ impl Clone for InboundNodeCommsHandlers { consensus_manager: self.consensus_manager.clone(), new_block_request_semaphore: self.new_block_request_semaphore.clone(), outbound_nci: self.outbound_nci.clone(), + connectivity: self.connectivity.clone(), } } } diff --git a/base_layer/core/src/base_node/comms_interface/local_interface.rs b/base_layer/core/src/base_node/comms_interface/local_interface.rs index 8a91c01263..4bd623cc46 100644 --- a/base_layer/core/src/base_node/comms_interface/local_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/local_interface.rs @@ -208,7 +208,7 @@ impl LocalNodeCommsInterface { ) -> Result, CommsInterfaceError> { match self .request_sender - .call(NodeCommsRequest::FetchBlocksWithUtxos(commitments)) + .call(NodeCommsRequest::FetchBlocksByUtxos(commitments)) .await?? { NodeCommsResponse::HistoricalBlocks(blocks) => Ok(blocks), @@ -223,7 +223,7 @@ impl LocalNodeCommsInterface { ) -> Result, CommsInterfaceError> { match self .request_sender - .call(NodeCommsRequest::FetchBlocksWithKernels(kernels)) + .call(NodeCommsRequest::FetchBlocksByKernelExcessSigs(kernels)) .await?? { NodeCommsResponse::HistoricalBlocks(blocks) => Ok(blocks), diff --git a/base_layer/core/src/base_node/comms_interface/mod.rs b/base_layer/core/src/base_node/comms_interface/mod.rs index e29e71a4c4..1cb7c6f726 100644 --- a/base_layer/core/src/base_node/comms_interface/mod.rs +++ b/base_layer/core/src/base_node/comms_interface/mod.rs @@ -24,7 +24,7 @@ mod comms_request; pub use comms_request::{GetNewBlockTemplateRequest, MmrStateRequest, NodeCommsRequest}; mod comms_response; -pub use comms_response::NodeCommsResponse; +pub use comms_response::{FetchMempoolTransactionsResponse, NodeCommsResponse}; mod error; pub use error::CommsInterfaceError; diff --git a/base_layer/core/src/base_node/comms_interface/outbound_interface.rs b/base_layer/core/src/base_node/comms_interface/outbound_interface.rs index eae4c4ec44..5fe89c9a11 100644 --- a/base_layer/core/src/base_node/comms_interface/outbound_interface.rs +++ b/base_layer/core/src/base_node/comms_interface/outbound_interface.rs @@ -20,13 +20,18 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_common_types::types::BlockHash; +use tari_common_types::types::{BlockHash, PrivateKey}; use tari_comms::peer_manager::NodeId; use tari_service_framework::{reply_channel::SenderService, Service}; use tokio::sync::mpsc::UnboundedSender; use crate::{ - base_node::comms_interface::{error::CommsInterfaceError, NodeCommsRequest, NodeCommsResponse}, + base_node::comms_interface::{ + error::CommsInterfaceError, + FetchMempoolTransactionsResponse, + NodeCommsRequest, + NodeCommsResponse, + }, blocks::{HistoricalBlock, NewBlock}, }; @@ -54,7 +59,7 @@ impl OutboundNodeCommsInterface { /// Fetch the Blocks corresponding to the provided block hashes from a specific base node. The requested blocks /// could be chain blocks or orphan blocks. - pub async fn request_blocks_with_hashes_from_peer( + pub async fn request_blocks_by_hashes_from_peer( &mut self, block_hashes: Vec, node_id: Option, @@ -70,6 +75,26 @@ impl OutboundNodeCommsInterface { } } + /// Fetch the transactions corresponding to the provided excess_sigs from the given peer `NodeId`. + pub async fn request_transactions_by_excess_sig( + &mut self, + node_id: NodeId, + excess_sigs: Vec, + ) -> Result { + if let NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse(resp) = self + .request_sender + .call(( + NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs }, + Some(node_id), + )) + .await?? + { + Ok(resp) + } else { + Err(CommsInterfaceError::UnexpectedApiResponse) + } + } + /// Transmit a block to remote base nodes, excluding the provided peers. pub async fn propagate_block( &self, diff --git a/base_layer/core/src/base_node/proto/request.proto b/base_layer/core/src/base_node/proto/request.proto index 3f7893abe4..d1e4d1d1bb 100644 --- a/base_layer/core/src/base_node/proto/request.proto +++ b/base_layer/core/src/base_node/proto/request.proto @@ -12,9 +12,15 @@ message BaseNodeServiceRequest { oneof request { // Indicates a FetchBlocksByHash request. HashOutputs fetch_blocks_by_hash = 8; + ExcessSigs fetch_mempool_transactions_by_excess_sigs = 9; } } +// Excess signature container message. `repeated` label is not permitted in oneof. +message ExcessSigs { + repeated bytes excess_sigs = 1; +} + message BlockHeights { repeated uint64 heights = 1; } diff --git a/base_layer/core/src/base_node/proto/request.rs b/base_layer/core/src/base_node/proto/request.rs index e631f88d31..96cf2256bc 100644 --- a/base_layer/core/src/base_node/proto/request.rs +++ b/base_layer/core/src/base_node/proto/request.rs @@ -22,33 +22,48 @@ use std::convert::{From, TryFrom, TryInto}; -use tari_common_types::types::HashOutput; +use tari_common_types::types::{HashOutput, PrivateKey}; +use tari_utilities::ByteArray; use crate::{ - base_node::comms_interface as ci, - proto::base_node::{base_node_service_request::Request as ProtoNodeCommsRequest, BlockHeights, HashOutputs}, + base_node::comms_interface::NodeCommsRequest, + proto::{base_node as proto, base_node::base_node_service_request::Request as ProtoNodeCommsRequest}, }; //---------------------------------- BaseNodeRequest --------------------------------------------// -impl TryInto for ProtoNodeCommsRequest { +impl TryInto for ProtoNodeCommsRequest { type Error = String; - fn try_into(self) -> Result { + fn try_into(self) -> Result { use ProtoNodeCommsRequest::*; let request = match self { - FetchBlocksByHash(block_hashes) => ci::NodeCommsRequest::FetchBlocksByHash(block_hashes.outputs), + FetchBlocksByHash(block_hashes) => NodeCommsRequest::FetchBlocksByHash(block_hashes.outputs), + FetchMempoolTransactionsByExcessSigs(excess_sigs) => { + let excess_sigs = excess_sigs + .excess_sigs + .into_iter() + .map(|bytes| PrivateKey::from_bytes(&bytes).map_err(|_| "Malformed excess sig".to_string())) + .collect::>()?; + + NodeCommsRequest::FetchMempoolTransactionsByExcessSigs { excess_sigs } + }, }; Ok(request) } } -impl TryFrom for ProtoNodeCommsRequest { +impl TryFrom for ProtoNodeCommsRequest { type Error = String; - fn try_from(request: ci::NodeCommsRequest) -> Result { - use ci::NodeCommsRequest::*; + fn try_from(request: NodeCommsRequest) -> Result { + use NodeCommsRequest::*; match request { FetchBlocksByHash(block_hashes) => Ok(ProtoNodeCommsRequest::FetchBlocksByHash(block_hashes.into())), + FetchMempoolTransactionsByExcessSigs { excess_sigs } => Ok( + ProtoNodeCommsRequest::FetchMempoolTransactionsByExcessSigs(proto::ExcessSigs { + excess_sigs: excess_sigs.into_iter().map(|sig| sig.to_vec()).collect(), + }), + ), e => Err(format!("{} request is not supported", e)), } } @@ -56,13 +71,13 @@ impl TryFrom for ProtoNodeCommsRequest { //---------------------------------- Wrappers --------------------------------------------// -impl From> for HashOutputs { +impl From> for proto::HashOutputs { fn from(outputs: Vec) -> Self { Self { outputs } } } -impl From> for BlockHeights { +impl From> for proto::BlockHeights { fn from(heights: Vec) -> Self { Self { heights } } diff --git a/base_layer/core/src/base_node/proto/response.proto b/base_layer/core/src/base_node/proto/response.proto index 985225f58a..a7f79f7a21 100644 --- a/base_layer/core/src/base_node/proto/response.proto +++ b/base_layer/core/src/base_node/proto/response.proto @@ -12,6 +12,7 @@ message BaseNodeServiceResponse { oneof response { // Indicates a HistoricalBlocks response. HistoricalBlocks historical_blocks = 6; + FetchMempoolTransactionsResponse fetch_mempool_transactions_by_excess_sigs_response = 7; } bool is_synced = 13; } @@ -51,3 +52,8 @@ message NewBlockResponse { bytes deleted = 2; } +message FetchMempoolTransactionsResponse { + repeated tari.types.Transaction transactions = 1; + repeated bytes not_found = 2; +} + diff --git a/base_layer/core/src/base_node/proto/response.rs b/base_layer/core/src/base_node/proto/response.rs index 23f700d9c1..e215db819a 100644 --- a/base_layer/core/src/base_node/proto/response.rs +++ b/base_layer/core/src/base_node/proto/response.rs @@ -23,36 +23,46 @@ use std::{ convert::{TryFrom, TryInto}, iter::{FromIterator, Iterator}, + sync::Arc, }; -use tari_utilities::convert::try_convert_all; +use tari_common_types::types::PrivateKey; +use tari_utilities::{convert::try_convert_all, ByteArray}; pub use crate::proto::base_node::base_node_service_response::Response as ProtoNodeCommsResponse; use crate::{ - base_node::comms_interface as ci, + base_node::comms_interface::{FetchMempoolTransactionsResponse, NodeCommsResponse}, blocks::{BlockHeader, HistoricalBlock}, proto, - proto::{ - base_node as base_node_proto, - base_node::{ - BlockHeaders as ProtoBlockHeaders, - HistoricalBlocks as ProtoHistoricalBlocks, - TransactionKernels as ProtoTransactionKernels, - TransactionOutputs as ProtoTransactionOutputs, - }, - core as core_proto_types, - }, }; -impl TryInto for ProtoNodeCommsResponse { +impl TryInto for ProtoNodeCommsResponse { type Error = String; - fn try_into(self) -> Result { + fn try_into(self) -> Result { use ProtoNodeCommsResponse::*; let response = match self { HistoricalBlocks(blocks) => { let blocks = try_convert_all(blocks.blocks)?; - ci::NodeCommsResponse::HistoricalBlocks(blocks) + NodeCommsResponse::HistoricalBlocks(blocks) + }, + FetchMempoolTransactionsByExcessSigsResponse(response) => { + let transactions = response + .transactions + .into_iter() + .map(|tx| tx.try_into().map(Arc::new)) + .collect::>()?; + let not_found = response + .not_found + .into_iter() + .map(|bytes| PrivateKey::from_bytes(&bytes).map_err(|_| "Malformed excess signature".to_string())) + .collect::>()?; + NodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse( + self::FetchMempoolTransactionsResponse { + transactions, + not_found, + }, + ) }, }; @@ -60,29 +70,42 @@ impl TryInto for ProtoNodeCommsResponse { } } -impl TryFrom for ProtoNodeCommsResponse { +impl TryFrom for ProtoNodeCommsResponse { type Error = String; - fn try_from(response: ci::NodeCommsResponse) -> Result { - use ci::NodeCommsResponse::*; + fn try_from(response: NodeCommsResponse) -> Result { + use NodeCommsResponse::*; match response { HistoricalBlocks(historical_blocks) => { let historical_blocks = historical_blocks .into_iter() .map(TryInto::try_into) - .collect::, _>>()? + .collect::, _>>()? .into_iter() .map(Into::into) .collect(); Ok(ProtoNodeCommsResponse::HistoricalBlocks(historical_blocks)) }, + FetchMempoolTransactionsByExcessSigsResponse(resp) => { + let transactions = resp + .transactions + .into_iter() + .map(|tx| tx.try_into()) + .collect::>()?; + Ok(ProtoNodeCommsResponse::FetchMempoolTransactionsByExcessSigsResponse( + proto::base_node::FetchMempoolTransactionsResponse { + transactions, + not_found: resp.not_found.into_iter().map(|s| s.to_vec()).collect(), + }, + )) + }, // This would only occur if a programming error sent out the unsupported response resp => Err(format!("Response not supported {:?}", resp)), } } } -impl From> for base_node_proto::BlockHeaderResponse { +impl From> for proto::base_node::BlockHeaderResponse { fn from(v: Option) -> Self { Self { header: v.map(Into::into), @@ -90,7 +113,7 @@ impl From> for base_node_proto::BlockHeaderResponse { } } -impl TryInto> for base_node_proto::BlockHeaderResponse { +impl TryInto> for proto::base_node::BlockHeaderResponse { type Error = String; fn try_into(self) -> Result, Self::Error> { @@ -104,7 +127,7 @@ impl TryInto> for base_node_proto::BlockHeaderResponse { } } -impl TryFrom> for base_node_proto::HistoricalBlockResponse { +impl TryFrom> for proto::base_node::HistoricalBlockResponse { type Error = String; fn try_from(v: Option) -> Result { @@ -114,7 +137,7 @@ impl TryFrom> for base_node_proto::HistoricalBlockRespon } } -impl TryInto> for base_node_proto::HistoricalBlockResponse { +impl TryInto> for proto::base_node::HistoricalBlockResponse { type Error = String; fn try_into(self) -> Result, Self::Error> { @@ -132,7 +155,7 @@ impl TryInto> for base_node_proto::HistoricalBlockRespon // The following allow `Iterator::collect` to collect into these repeated types -impl FromIterator for ProtoTransactionKernels { +impl FromIterator for proto::base_node::TransactionKernels { fn from_iter>(iter: T) -> Self { Self { kernels: iter.into_iter().collect(), @@ -140,15 +163,15 @@ impl FromIterator for ProtoTransactionKernels { } } -impl FromIterator for ProtoBlockHeaders { - fn from_iter>(iter: T) -> Self { +impl FromIterator for proto::base_node::BlockHeaders { + fn from_iter>(iter: T) -> Self { Self { headers: iter.into_iter().collect(), } } } -impl FromIterator for ProtoTransactionOutputs { +impl FromIterator for proto::base_node::TransactionOutputs { fn from_iter>(iter: T) -> Self { Self { outputs: iter.into_iter().collect(), @@ -156,8 +179,8 @@ impl FromIterator for ProtoTransactionOutputs { } } -impl FromIterator for ProtoHistoricalBlocks { - fn from_iter>(iter: T) -> Self { +impl FromIterator for proto::base_node::HistoricalBlocks { + fn from_iter>(iter: T) -> Self { Self { blocks: iter.into_iter().collect(), } diff --git a/base_layer/core/src/base_node/service/initializer.rs b/base_layer/core/src/base_node/service/initializer.rs index 4e8bf61f07..8554e34b22 100644 --- a/base_layer/core/src/base_node/service/initializer.rs +++ b/base_layer/core/src/base_node/service/initializer.rs @@ -24,6 +24,7 @@ use std::{convert::TryFrom, sync::Arc}; use futures::{future, Stream, StreamExt}; use log::*; +use tari_comms::connectivity::ConnectivityRequester; use tari_comms_dht::Dht; use tari_p2p::{ comms_connector::{PeerMessage, SubscriptionFactory}, @@ -164,25 +165,32 @@ where T: BlockchainBackend + 'static local_block_sender_service, block_event_sender.clone(), ); - let inbound_nch = InboundNodeCommsHandlers::new( - block_event_sender, - self.blockchain_db.clone(), - self.mempool.clone(), - self.consensus_manager.clone(), - outbound_nci.clone(), - ); - let config = self.config; // Register handle to OutboundNodeCommsInterface before waiting for handles to be ready - context.register_handle(outbound_nci); + context.register_handle(outbound_nci.clone()); context.register_handle(local_nci); + let config = self.config; + let blockchain_db = self.blockchain_db.clone(); + let mempool = self.mempool.clone(); + let consensus_manager = self.consensus_manager.clone(); + context.spawn_when_ready(move |handles| async move { let dht = handles.expect_handle::(); + let connectivity = handles.expect_handle::(); let outbound_message_service = dht.outbound_requester(); let state_machine = handles.expect_handle::(); + let inbound_nch = InboundNodeCommsHandlers::new( + block_event_sender, + blockchain_db, + mempool, + consensus_manager, + outbound_nci.clone(), + connectivity, + ); + let streams = BaseNodeStreams { outbound_request_stream, outbound_block_stream, diff --git a/base_layer/core/src/base_node/service/service.rs b/base_layer/core/src/base_node/service/service.rs index 84e9282b40..da264c7a44 100644 --- a/base_layer/core/src/base_node/service/service.rs +++ b/base_layer/core/src/base_node/service/service.rs @@ -35,9 +35,9 @@ use tari_comms_dht::{ envelope::NodeDestination, outbound::{DhtOutboundError, OutboundEncryption, OutboundMessageRequester, SendMessageParams}, }; -use tari_crypto::tari_utilities::hex::Hex; use tari_p2p::{domain_message::DomainMessage, tari_message::TariMessageType}; use tari_service_framework::reply_channel::RequestContext; +use tari_utilities::{hex::Hex, Hashable}; use tokio::{ sync::{ mpsc, @@ -312,7 +312,7 @@ where B: BlockchainBackend + 'static debug!( target: LOG_TARGET, "Propagated block `{}` from peer `{}` not processed while busy with initial sync.", - new_block.inner.block_hash.to_hex(), + new_block.inner().header.hash().to_hex(), new_block.source_peer.node_id.short_str(), ); return; @@ -352,7 +352,7 @@ where B: BlockchainBackend + 'static } fn spawn_handle_local_block(&self, block_context: RequestContext>) { - let inbound_nch = self.inbound_nch.clone(); + let mut inbound_nch = self.inbound_nch.clone(); task::spawn(async move { let (block, reply_tx) = block_context.split(); let result = reply_tx.send(inbound_nch.handle_block(Arc::new(block), None).await); @@ -637,7 +637,7 @@ async fn handle_incoming_block( debug!( target: LOG_TARGET, "New candidate block with hash `{}` received from `{}`.", - new_block.block_hash.to_hex(), + new_block.header.hash().to_hex(), source_peer.node_id.short_str() ); diff --git a/base_layer/core/src/base_node/service/service_response.rs b/base_layer/core/src/base_node/service/service_response.rs index a8f200f14c..dc1f74f86e 100644 --- a/base_layer/core/src/base_node/service/service_response.rs +++ b/base_layer/core/src/base_node/service/service_response.rs @@ -20,13 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use serde::{Deserialize, Serialize}; use tari_common_types::waiting_requests::RequestKey; use crate::base_node::comms_interface::NodeCommsResponse; /// Response type for a received BaseNodeService requests -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug)] pub struct BaseNodeServiceResponse { pub request_key: RequestKey, pub response: NodeCommsResponse, diff --git a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs index 15fbac986b..08add1032e 100644 --- a/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs +++ b/base_layer/core/src/base_node/sync/header_sync/synchronizer.rs @@ -693,9 +693,9 @@ impl<'a, B: BlockchainBackend + 'static> HeaderSynchronizer<'a, B> { split_info.chain_split_hash.to_hex() ); let blocks = self.rewind_blockchain(split_info.chain_split_hash.clone()).await?; - // NOTE: `blocks` only contains full blocks that were reorged out, and not the headers. - // This may be unexpected for implementers of the rewind hook. - self.hooks.call_on_rewind_hooks(blocks); + if !blocks.is_empty() { + self.hooks.call_on_rewind_hooks(blocks); + } } // Commit the forked chain. At this point diff --git a/base_layer/core/src/blocks/block.rs b/base_layer/core/src/blocks/block.rs index ee2731cec0..2b6b511f15 100644 --- a/base_layer/core/src/blocks/block.rs +++ b/base_layer/core/src/blocks/block.rs @@ -30,7 +30,7 @@ use std::{ use log::*; use serde::{Deserialize, Serialize}; -use tari_common_types::types::BlockHash; +use tari_common_types::types::PrivateKey; use tari_crypto::tari_utilities::Hashable; use tari_utilities::hex::Hex; use thiserror::Error; @@ -42,7 +42,15 @@ use crate::{ transactions::{ aggregated_body::AggregateBody, tari_amount::MicroTari, - transaction::{Transaction, TransactionError, TransactionInput, TransactionKernel, TransactionOutput}, + transaction::{ + KernelFeatures, + OutputFlags, + Transaction, + TransactionError, + TransactionInput, + TransactionKernel, + TransactionOutput, + }, CryptoFactories, }, }; @@ -187,13 +195,13 @@ impl BlockBuilder { self } - /// This function adds the provided transaction outputs to the block + /// This function adds the provided transaction outputs to the block WITHOUT updating output_mmr_size in the header pub fn add_outputs(mut self, mut outputs: Vec) -> Self { self.outputs.append(&mut outputs); self } - /// This function adds the provided transaction kernels to the block + /// This function adds the provided transaction kernels to the block WITHOUT updating kernel_mmr_size in the header pub fn add_kernels(mut self, mut kernels: Vec) -> Self { for kernel in &kernels { self.total_fee += kernel.fee; @@ -202,23 +210,15 @@ impl BlockBuilder { self } - /// This functions add the provided transactions to the block + /// This functions adds the provided transactions to the block, modifying the header MMR counts and offsets pub fn with_transactions(mut self, txs: Vec) -> Self { - let iter = txs.into_iter(); - for tx in iter { - let (inputs, outputs, kernels) = tx.body.dissolve(); - self = self.add_inputs(inputs); - self.header.output_mmr_size += outputs.len() as u64; - self = self.add_outputs(outputs); - self.header.kernel_mmr_size += kernels.len() as u64; - self = self.add_kernels(kernels); - self.header.total_kernel_offset = self.header.total_kernel_offset + tx.offset; - self.header.total_script_offset = self.header.total_script_offset + tx.script_offset; + for tx in txs { + self = self.add_transaction(tx) } self } - /// This functions add the provided transactions to the block + /// This functions adds the provided transaction to the block, modifying the header MMR counts and offsets pub fn add_transaction(mut self, tx: Transaction) -> Self { let (inputs, outputs, kernels) = tx.body.dissolve(); self = self.add_inputs(inputs); @@ -226,7 +226,8 @@ impl BlockBuilder { self = self.add_outputs(outputs); self.header.kernel_mmr_size += kernels.len() as u64; self = self.add_kernels(kernels); - self.header.total_kernel_offset = &self.header.total_kernel_offset + &tx.offset; + self.header.total_kernel_offset = self.header.total_kernel_offset + tx.offset; + self.header.total_script_offset = self.header.total_script_offset + tx.script_offset; self } @@ -264,21 +265,43 @@ impl Hashable for Block { //---------------------------------- NewBlock --------------------------------------------// pub struct NewBlock { - pub block_hash: BlockHash, -} - -impl NewBlock { - pub fn new(block_hash: BlockHash) -> Self { - Self { block_hash } - } + /// The block header. + pub header: BlockHeader, + /// Coinbase kernel of the block + pub coinbase_kernel: TransactionKernel, + pub coinbase_output: TransactionOutput, + /// The scalar `s` component of the kernel excess signatures of the transactions contained in the block. + pub kernel_excess_sigs: Vec, } impl From<&Block> for NewBlock { fn from(block: &Block) -> Self { + let coinbase_kernel = block + .body + .kernels() + .iter() + .find(|k| k.features.contains(KernelFeatures::COINBASE_KERNEL)) + .cloned() + .expect("Invalid block given to NewBlock::from, no coinbase kernel"); + let coinbase_output = block + .body + .outputs() + .iter() + .find(|o| o.features.flags.contains(OutputFlags::COINBASE_OUTPUT)) + .cloned() + .expect("Invalid block given to NewBlock::from, no coinbase output"); + Self { - block_hash: block.hash(), + header: block.header.clone(), + coinbase_kernel, + coinbase_output, + kernel_excess_sigs: block + .body + .kernels() + .iter() + .filter(|k| !k.features.contains(KernelFeatures::COINBASE_KERNEL)) + .map(|kernel| kernel.excess_sig.get_signature().clone()) + .collect(), } } } - -//---------------------------------------- Tests ----------------------------------------------------// diff --git a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs index 708902cb0b..77b703b2f8 100644 --- a/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs +++ b/base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs @@ -1242,12 +1242,12 @@ impl LMDBDatabase { index ))); } - debug!(target: LOG_TARGET, "Inserting input `{}`", input.commitment()?.to_hex()); + trace!(target: LOG_TARGET, "Inserting input `{}`", input.commitment()?.to_hex()); self.insert_input(txn, current_header_at_height.height, &block_hash, input, index)?; } for (output, mmr_count) in outputs { - debug!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); + trace!(target: LOG_TARGET, "Inserting output `{}`", output.commitment.to_hex()); self.insert_output(txn, &block_hash, header.height, &output, mmr_count as u32 - 1)?; } diff --git a/base_layer/core/src/mempool/config.rs b/base_layer/core/src/mempool/config.rs index 6963d6b8eb..339689a2dc 100644 --- a/base_layer/core/src/mempool/config.rs +++ b/base_layer/core/src/mempool/config.rs @@ -20,12 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::time::Duration; - use serde::{Deserialize, Serialize}; -use tari_common::{configuration::seconds, NetworkConfigPath}; +use tari_common::NetworkConfigPath; -use crate::mempool::{consts, reorg_pool::ReorgPoolConfig, unconfirmed_pool::UnconfirmedPoolConfig}; +use crate::mempool::{reorg_pool::ReorgPoolConfig, unconfirmed_pool::UnconfirmedPoolConfig}; /// Configuration for the Mempool. #[derive(Clone, Copy, Deserialize, Serialize, Default)] @@ -43,9 +41,6 @@ impl NetworkConfigPath for MempoolConfig { /// Configuration for the MempoolService. #[derive(Clone, Copy, Deserialize, Serialize)] pub struct MempoolServiceConfig { - /// The allocated waiting time for a request waiting for service responses from the Mempools of remote Base nodes. - #[serde(with = "seconds")] - pub request_timeout: Duration, /// Number of peers from which to initiate a sync. Once this many peers have successfully synced, this node will /// not initiate any more mempool syncs. Default: 2 pub initial_sync_num_peers: usize, @@ -56,7 +51,6 @@ pub struct MempoolServiceConfig { impl Default for MempoolServiceConfig { fn default() -> Self { Self { - request_timeout: consts::MEMPOOL_SERVICE_REQUEST_TIMEOUT, initial_sync_num_peers: 2, initial_sync_max_transactions: 10_000, } @@ -74,10 +68,8 @@ mod test { use config::Config; use tari_common::DefaultConfigLoader; - use super::{ - consts::{MEMPOOL_REORG_POOL_CACHE_TTL, MEMPOOL_REORG_POOL_STORAGE_CAPACITY}, - MempoolConfig, - }; + use super::MempoolConfig; + use crate::mempool::reorg_pool::ReorgPoolConfig; #[test] pub fn test_mempool() { @@ -90,12 +82,11 @@ mod test { // [ ] mempool.mainnet, [X] mempool = 3, [X] Default assert_eq!(my_config.unconfirmed_pool.storage_capacity, 3); // [ ] mempool.mainnet, [ ] mempool, [X] Default = 512 + // [ ] mempool.mainnet, [ ] mempool, [X] Default = 10s assert_eq!( - my_config.reorg_pool.storage_capacity, - MEMPOOL_REORG_POOL_STORAGE_CAPACITY + my_config.reorg_pool.expiry_height, + ReorgPoolConfig::default().expiry_height ); - // [ ] mempool.mainnet, [ ] mempool, [X] Default = 10s - assert_eq!(my_config.reorg_pool.tx_ttl, MEMPOOL_REORG_POOL_CACHE_TTL); config .set("mempool.mainnet.unconfirmed_pool.storage_capacity", 20) @@ -108,13 +99,11 @@ mod test { let my_config = MempoolConfig::load_from(&config).expect("Could not load configuration"); // [ ] mempool.mainnet, [X] mempool = 3, [X] Default assert_eq!(my_config.unconfirmed_pool.storage_capacity, 20); - // [ ] mempool.mainnet, [ ] mempool, [X] Default = 512 + // [ ] mempool.mainnet, [ ] mempool, [X] Default = 10s assert_eq!( - my_config.reorg_pool.storage_capacity, - MEMPOOL_REORG_POOL_STORAGE_CAPACITY + my_config.reorg_pool.expiry_height, + ReorgPoolConfig::default().expiry_height ); - // [ ] mempool.mainnet, [ ] mempool, [X] Default = 10s - assert_eq!(my_config.reorg_pool.tx_ttl, MEMPOOL_REORG_POOL_CACHE_TTL); config .set("mempool.network", "wrong_network") diff --git a/base_layer/core/src/mempool/consts.rs b/base_layer/core/src/mempool/consts.rs index 3f8a4208d6..89d136c36a 100644 --- a/base_layer/core/src/mempool/consts.rs +++ b/base_layer/core/src/mempool/consts.rs @@ -20,18 +20,9 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::time::Duration; - /// The maximum number of transactions that can be stored in the Unconfirmed Transaction pool pub const MEMPOOL_UNCONFIRMED_POOL_STORAGE_CAPACITY: usize = 40_000; + /// The maximum number of transactions that can be skipped when compiling a set of highest priority transactions, /// skipping over large transactions are performed in an attempt to fit more transactions into the remaining space. pub const MEMPOOL_UNCONFIRMED_POOL_WEIGHT_TRANSACTION_SKIP_COUNT: usize = 20; - -/// The maximum number of transactions that can be stored in the Reorg pool -pub const MEMPOOL_REORG_POOL_STORAGE_CAPACITY: usize = 5_000; -/// The time-to-live duration used for transactions stored in the ReorgPool -pub const MEMPOOL_REORG_POOL_CACHE_TTL: Duration = Duration::from_secs(300); - -/// The allocated waiting time for a request waiting for service responses from the mempools of remote base nodes. -pub const MEMPOOL_SERVICE_REQUEST_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/base_layer/core/src/mempool/error.rs b/base_layer/core/src/mempool/error.rs index 9d40fcfe5b..953b20b8af 100644 --- a/base_layer/core/src/mempool/error.rs +++ b/base_layer/core/src/mempool/error.rs @@ -23,28 +23,14 @@ use tari_service_framework::reply_channel::TransportChannelError; use thiserror::Error; -use crate::{ - chain_storage::ChainStorageError, - mempool::{reorg_pool::ReorgPoolError, unconfirmed_pool::UnconfirmedPoolError}, - transactions::transaction::TransactionError, -}; +use crate::{mempool::unconfirmed_pool::UnconfirmedPoolError, transactions::transaction::TransactionError}; #[derive(Debug, Error)] pub enum MempoolError { #[error("Unconfirmed pool error: `{0}`")] UnconfirmedPoolError(#[from] UnconfirmedPoolError), - #[error("Reorg pool error: `{0}`")] - ReorgPoolError(#[from] ReorgPoolError), #[error("Transaction error: `{0}`")] TransactionError(#[from] TransactionError), - #[error("Chain storage error: `{0}`")] - ChainStorageError(#[from] ChainStorageError), - #[error("The Blockchain height is undefined")] - ChainHeightUndefined, - #[error("Blocking task spawn error: `{0}`")] - BlockingTaskSpawnError(String), - #[error("A problem has been encountered with the storage backend: `{0}`")] - BackendError(String), #[error("Internal reply channel error: `{0}`")] TransportChannelError(#[from] TransportChannelError), #[error("The transaction did not contain any kernels")] diff --git a/base_layer/core/src/mempool/mempool.rs b/base_layer/core/src/mempool/mempool.rs index 1dbc9aee2a..25d597789e 100644 --- a/base_layer/core/src/mempool/mempool.rs +++ b/base_layer/core/src/mempool/mempool.rs @@ -22,7 +22,7 @@ use std::sync::Arc; -use tari_common_types::types::Signature; +use tari_common_types::types::{PrivateKey, Signature}; use tokio::sync::RwLock; use crate::{ @@ -60,12 +60,21 @@ impl Mempool { } } - /// Insert an unconfirmed transaction into the Mempool. The transaction *MUST* have passed through the validation - /// pipeline already and will thus always be internally consistent by this stage + /// Insert an unconfirmed transaction into the Mempool. pub async fn insert(&self, tx: Arc) -> Result { self.pool_storage.write().await.insert(tx) } + /// Inserts all transactions into the mempool. + pub async fn insert_all(&self, transactions: &[Arc]) -> Result<(), MempoolError> { + let mut mempool = self.pool_storage.write().await; + for tx in transactions { + mempool.insert(tx.clone())?; + } + + Ok(()) + } + /// Update the Mempool based on the received published block. pub async fn process_published_block(&self, published_block: &Block) -> Result<(), MempoolError> { self.pool_storage.write().await.process_published_block(published_block) @@ -86,7 +95,7 @@ impl Mempool { /// Returns all unconfirmed transaction stored in the Mempool, except the transactions stored in the ReOrgPool. // TODO: Investigate returning an iterator rather than a large vector of transactions - pub async fn snapshot(&self) -> Result>, MempoolError> { + pub async fn snapshot(&self) -> Vec> { self.pool_storage.read().await.snapshot() } @@ -96,9 +105,16 @@ impl Mempool { self.pool_storage.write().await.retrieve(total_weight) } + pub async fn retrieve_by_excess_sigs( + &self, + excess_sigs: &[PrivateKey], + ) -> (Vec>, Vec) { + self.pool_storage.read().await.retrieve_by_excess_sigs(excess_sigs) + } + /// Check if the specified excess signature is found in the Mempool. - pub async fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> Result { - Ok(self.pool_storage.read().await.has_tx_with_excess_sig(excess_sig)) + pub async fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> TxStorageResponse { + self.pool_storage.read().await.has_tx_with_excess_sig(excess_sig) } /// Check if the specified transaction is stored in the Mempool. @@ -107,12 +123,12 @@ impl Mempool { } /// Gathers and returns the stats of the Mempool. - pub async fn stats(&self) -> Result { - self.pool_storage.write().await.stats() + pub async fn stats(&self) -> StatsResponse { + self.pool_storage.read().await.stats() } /// Gathers and returns a breakdown of all the transaction in the Mempool. - pub async fn state(&self) -> Result { - self.pool_storage.write().await.state() + pub async fn state(&self) -> StateResponse { + self.pool_storage.read().await.state() } } diff --git a/base_layer/core/src/mempool/mempool_storage.rs b/base_layer/core/src/mempool/mempool_storage.rs index c77883aabf..80d6118d9d 100644 --- a/base_layer/core/src/mempool/mempool_storage.rs +++ b/base_layer/core/src/mempool/mempool_storage.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use log::*; -use tari_common_types::types::Signature; +use tari_common_types::types::{PrivateKey, Signature}; use tari_crypto::tari_utilities::hex::Hex; use crate::{ @@ -135,7 +135,11 @@ impl MempoolStorage { let removed_transactions = self .unconfirmed_pool .remove_published_and_discard_deprecated_transactions(published_block); - self.reorg_pool.insert_txs(removed_transactions)?; + self.reorg_pool + .insert_all(published_block.header.height, removed_transactions); + + self.unconfirmed_pool.compact(); + self.reorg_pool.compact(); Ok(()) } @@ -159,7 +163,7 @@ impl MempoolStorage { // Remove re-orged transactions from reorg pool and re-submit them to the unconfirmed mempool let removed_txs = self .reorg_pool - .remove_reorged_txs_and_discard_double_spends(removed_blocks, new_blocks)?; + .remove_reorged_txs_and_discard_double_spends(removed_blocks, new_blocks); self.insert_txs(removed_txs)?; // Update the Mempool based on the received set of new blocks. for block in new_blocks { @@ -192,9 +196,8 @@ impl MempoolStorage { /// Returns all unconfirmed transaction stored in the Mempool, except the transactions stored in the ReOrgPool. // TODO: Investigate returning an iterator rather than a large vector of transactions - pub fn snapshot(&self) -> Result>, MempoolError> { - let txs = self.unconfirmed_pool.snapshot(); - Ok(txs) + pub fn snapshot(&self) -> Vec> { + self.unconfirmed_pool.snapshot() } /// Returns a list of transaction ranked by transaction priority up to a given weight. @@ -205,6 +208,16 @@ impl MempoolStorage { Ok(results.retrieved_transactions) } + pub fn retrieve_by_excess_sigs(&self, excess_sigs: &[PrivateKey]) -> (Vec>, Vec) { + let (found_txns, remaining) = self.unconfirmed_pool.retrieve_by_excess_sigs(excess_sigs); + let (found_published_transactions, remaining) = self.reorg_pool.retrieve_by_excess_sigs(&remaining); + + ( + found_txns.into_iter().chain(found_published_transactions).collect(), + remaining, + ) + } + /// Check if the specified excess signature is found in the Mempool. pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> TxStorageResponse { if self.unconfirmed_pool.has_tx_with_excess_sig(excess_sig) { @@ -249,38 +262,33 @@ impl MempoolStorage { } // Returns the total number of transactions in the Mempool. - fn len(&self) -> Result { - Ok(self.unconfirmed_pool.len()) + fn len(&self) -> usize { + self.unconfirmed_pool.len() + self.reorg_pool.len() } /// Gathers and returns the stats of the Mempool. - pub fn stats(&mut self) -> Result { + pub fn stats(&self) -> StatsResponse { let weight = self.get_transaction_weight(0); - Ok(StatsResponse { - total_txs: self.len()?, + StatsResponse { + total_txs: self.len(), unconfirmed_txs: self.unconfirmed_pool.len(), - reorg_txs: self.reorg_pool.len()?, + reorg_txs: self.reorg_pool.len(), total_weight: self.unconfirmed_pool.calculate_weight(&weight), - }) + } } /// Gathers and returns a breakdown of all the transaction in the Mempool. - pub fn state(&mut self) -> Result { - let unconfirmed_pool = self - .unconfirmed_pool - .snapshot() - .iter() - .map(|tx| tx.as_ref().clone()) - .collect::>(); + pub fn state(&self) -> StateResponse { + let unconfirmed_pool = self.unconfirmed_pool.snapshot(); let reorg_pool = self .reorg_pool - .snapshot()? + .snapshot() .iter() - .map(|tx| tx.body.kernels()[0].excess_sig.clone()) + .map(|tx| tx.first_kernel_excess_sig().cloned().unwrap_or_default()) .collect::>(); - Ok(StateResponse { + StateResponse { unconfirmed_pool, reorg_pool, - }) + } } } diff --git a/base_layer/core/src/mempool/mod.rs b/base_layer/core/src/mempool/mod.rs index 4f18b8e5d2..e5946c401b 100644 --- a/base_layer/core/src/mempool/mod.rs +++ b/base_layer/core/src/mempool/mod.rs @@ -67,6 +67,7 @@ pub use service::{MempoolServiceError, MempoolServiceInitializer, OutboundMempoo #[cfg(feature = "base_node")] mod sync_protocol; use core::fmt::{Display, Error, Formatter}; +use std::sync::Arc; use serde::{Deserialize, Serialize}; #[cfg(feature = "base_node")] @@ -88,15 +89,15 @@ impl Display for StatsResponse { fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), Error> { write!( fmt, - "Mempool stats: Total transactions: {}, Unconfirmed: {}, Published: {}, Total Weight: {}", + "Mempool stats: Total transactions: {}, Unconfirmed: {}, Published: {}, Total Weight: {}g", self.total_txs, self.unconfirmed_txs, self.reorg_txs, self.total_weight ) } } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq)] pub struct StateResponse { - pub unconfirmed_pool: Vec, + pub unconfirmed_pool: Vec>, pub reorg_pool: Vec, } diff --git a/base_layer/core/src/mempool/proto/mempool_request.rs b/base_layer/core/src/mempool/proto/mempool_request.rs deleted file mode 100644 index 6509fc7bbc..0000000000 --- a/base_layer/core/src/mempool/proto/mempool_request.rs +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019, The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::convert::{TryFrom, TryInto}; - -use tari_crypto::tari_utilities::ByteArrayError; - -use super::mempool::{ - mempool_service_request::Request as ProtoMempoolRequest, - MempoolServiceRequest as ProtoMempoolServiceRequest, -}; -use crate::mempool::service::{MempoolRequest, MempoolServiceRequest}; - -impl TryInto for ProtoMempoolRequest { - type Error = String; - - fn try_into(self) -> Result { - use ProtoMempoolRequest::*; - let request = match self { - // Field was not specified - GetStats(_) => MempoolRequest::GetStats, - GetState(_) => MempoolRequest::GetState, - GetTxStateByExcessSig(excess_sig) => MempoolRequest::GetTxStateByExcessSig( - excess_sig.try_into().map_err(|err: ByteArrayError| err.to_string())?, - ), - SubmitTransaction(tx) => MempoolRequest::SubmitTransaction(tx.try_into()?), - }; - Ok(request) - } -} - -impl TryFrom for ProtoMempoolRequest { - type Error = String; - - fn try_from(request: MempoolRequest) -> Result { - use MempoolRequest::*; - Ok(match request { - GetStats => ProtoMempoolRequest::GetStats(true), - GetState => ProtoMempoolRequest::GetState(true), - GetTxStateByExcessSig(excess_sig) => ProtoMempoolRequest::GetTxStateByExcessSig(excess_sig.into()), - SubmitTransaction(tx) => ProtoMempoolRequest::SubmitTransaction(tx.try_into()?), - }) - } -} - -impl TryFrom for MempoolServiceRequest { - type Error = String; - - fn try_from(request: ProtoMempoolServiceRequest) -> Result { - Ok(Self { - request_key: request.request_key, - request: request - .request - .ok_or_else(|| "Response field not present".to_string())? - .try_into()?, - }) - } -} diff --git a/base_layer/core/src/mempool/proto/mempool_response.rs b/base_layer/core/src/mempool/proto/mempool_response.rs deleted file mode 100644 index 900115c4ec..0000000000 --- a/base_layer/core/src/mempool/proto/mempool_response.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019, The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::convert::{TryFrom, TryInto}; - -use super::mempool::mempool_service_response::Response as ProtoMempoolResponse; -use crate::mempool::{ - proto::mempool::{ - MempoolServiceResponse as ProtoMempoolServiceResponse, - TxStorageResponse as ProtoTxStorageResponse, - }, - service::{MempoolResponse, MempoolServiceResponse}, -}; - -impl TryInto for ProtoMempoolResponse { - type Error = String; - - fn try_into(self) -> Result { - use ProtoMempoolResponse::*; - let response = match self { - Stats(stats_response) => MempoolResponse::Stats(stats_response.try_into()?), - State(state_response) => MempoolResponse::State(state_response.try_into()?), - TxStorage(tx_storage_response) => { - let tx_storage_response = ProtoTxStorageResponse::from_i32(tx_storage_response) - .ok_or_else(|| "Invalid or unrecognised `TxStorageResponse` enum".to_string())?; - MempoolResponse::TxStorage(tx_storage_response.try_into()?) - }, - }; - Ok(response) - } -} - -impl TryFrom for MempoolServiceResponse { - type Error = String; - - fn try_from(response: ProtoMempoolServiceResponse) -> Result { - Ok(Self { - request_key: response.request_key, - response: response - .response - .ok_or_else(|| "Response field not present to convert".to_string())? - .try_into()?, - }) - } -} - -impl TryFrom for ProtoMempoolResponse { - type Error = String; - - fn try_from(response: MempoolResponse) -> Result { - use MempoolResponse::*; - Ok(match response { - Stats(stats_response) => ProtoMempoolResponse::Stats(stats_response.into()), - State(state_response) => ProtoMempoolResponse::State(state_response.try_into()?), - TxStorage(tx_storage_response) => { - let tx_storage_response: ProtoTxStorageResponse = tx_storage_response.into(); - ProtoMempoolResponse::TxStorage(tx_storage_response.into()) - }, - }) - } -} diff --git a/base_layer/core/src/mempool/proto/mod.rs b/base_layer/core/src/mempool/proto/mod.rs index 9f85d0e5c8..de26eed3e3 100644 --- a/base_layer/core/src/mempool/proto/mod.rs +++ b/base_layer/core/src/mempool/proto/mod.rs @@ -20,22 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -pub use mempool::{ - mempool_service_request, - mempool_service_response, - InventoryIndexes, - MempoolServiceRequest, - MempoolServiceResponse, - TransactionInventory, - TransactionItem, -}; +pub use mempool::{InventoryIndexes, TransactionInventory, TransactionItem}; use crate::proto::mempool; +mod state_response; +mod stats_response; mod sync_protocol; -// TODO: Clean up -pub mod mempool_request; -pub mod mempool_response; -pub mod state_response; -pub mod stats_response; -pub mod tx_storage_response; +mod tx_storage_response; diff --git a/base_layer/core/src/mempool/proto/service_request.proto b/base_layer/core/src/mempool/proto/service_request.proto deleted file mode 100644 index 3a83d54df6..0000000000 --- a/base_layer/core/src/mempool/proto/service_request.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -import "types.proto"; -import "transaction.proto"; - -package tari.mempool; - -// Request type for a received MempoolService request. -message MempoolServiceRequest { - uint64 request_key = 1; - oneof request { - // Indicates a GetStats request. The value of the bool should be ignored. - bool get_stats = 2; - // Indicates a GetState request. The value of the bool should be ignored. - bool get_state = 3; - // Indicates a GetTxStateByExcessSig request. - tari.types.Signature get_tx_state_by_excess_sig = 4; - // Indicates a SubmitTransaction request. - tari.types.Transaction submit_transaction = 5; - } -} diff --git a/base_layer/core/src/mempool/proto/service_response.proto b/base_layer/core/src/mempool/proto/service_response.proto deleted file mode 100644 index bee472bfaa..0000000000 --- a/base_layer/core/src/mempool/proto/service_response.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -import "stats_response.proto"; -import "state_response.proto"; -import "tx_storage_response.proto"; - -package tari.mempool; - -// Response type for a received MempoolService requests -message MempoolServiceResponse { - uint64 request_key = 1; - oneof response { - StatsResponse stats = 2; - StateResponse state = 3; - TxStorageResponse tx_storage = 4; - } -} - diff --git a/base_layer/core/src/mempool/proto/state_response.proto b/base_layer/core/src/mempool/proto/state_response.proto index eb2e2787b5..ee2695619d 100644 --- a/base_layer/core/src/mempool/proto/state_response.proto +++ b/base_layer/core/src/mempool/proto/state_response.proto @@ -1,20 +1,13 @@ syntax = "proto3"; import "transaction.proto"; +import "types.proto"; package tari.mempool; -// TODO: Remove duplicate Signature, transaction also has a Signature. -// Define the explicit Signature implementation for the Tari base layer. A different signature scheme can be -// employed by redefining this type. -message Signature { - bytes public_nonce = 1; - bytes signature = 2; -} - -message StateResponse { +message StateResponse{ // List of transactions in unconfirmed pool. repeated tari.types.Transaction unconfirmed_pool = 1; // List of transactions in reorg pool. - repeated Signature reorg_pool = 4; + repeated tari.types.Signature reorg_pool = 4; } diff --git a/base_layer/core/src/mempool/proto/state_response.rs b/base_layer/core/src/mempool/proto/state_response.rs index 1767adf747..54d5c66db7 100644 --- a/base_layer/core/src/mempool/proto/state_response.rs +++ b/base_layer/core/src/mempool/proto/state_response.rs @@ -20,40 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::convert::{TryFrom, TryInto}; - -use tari_common_types::types::{PrivateKey, PublicKey, Signature}; -use tari_crypto::tari_utilities::{ByteArray, ByteArrayError}; - -use crate::{ - mempool::{ - proto::mempool::{Signature as ProtoSignature, StateResponse as ProtoStateResponse}, - StateResponse, - }, - proto::{mempool::Signature as SignatureProto, types::Transaction}, +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, }; -//---------------------------------- Signature --------------------------------------------// -// TODO: Remove duplicate Signature, transaction also has a Signature. -impl TryFrom for Signature { - type Error = ByteArrayError; - - fn try_from(sig: ProtoSignature) -> Result { - let public_nonce = PublicKey::from_bytes(&sig.public_nonce)?; - let signature = PrivateKey::from_bytes(&sig.signature)?; - - Ok(Self::new(public_nonce, signature)) - } -} - -impl From for ProtoSignature { - fn from(sig: Signature) -> Self { - Self { - public_nonce: sig.get_public_nonce().to_vec(), - signature: sig.get_signature().to_vec(), - } - } -} +use crate::mempool::{proto::mempool::StateResponse as ProtoStateResponse, StateResponse}; //--------------------------------- StateResponse -------------------------------------------// @@ -65,14 +37,14 @@ impl TryFrom for StateResponse { unconfirmed_pool: state .unconfirmed_pool .into_iter() - .map(TryInto::try_into) + .map(|tx| tx.try_into().map(Arc::new)) .collect::, _>>()?, reorg_pool: state .reorg_pool .into_iter() .map(TryInto::try_into) .collect::, _>>() - .map_err(|err: ByteArrayError| err.to_string())?, + .map_err(|_| "Malformed excess sig")?, }) } } @@ -86,12 +58,8 @@ impl TryFrom for ProtoStateResponse { .unconfirmed_pool .into_iter() .map(TryInto::try_into) - .collect::, _>>()?, - reorg_pool: state - .reorg_pool - .into_iter() - .map(Into::into) - .collect::>(), + .collect::, _>>()?, + reorg_pool: state.reorg_pool.into_iter().map(Into::into).collect::>(), }) } } diff --git a/base_layer/core/src/mempool/reorg_pool/error.rs b/base_layer/core/src/mempool/reorg_pool/error.rs deleted file mode 100644 index 7fee0de031..0000000000 --- a/base_layer/core/src/mempool/reorg_pool/error.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019 The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use thiserror::Error; -#[derive(Debug, Error)] -pub enum ReorgPoolError { - #[error("A problem has been encountered with the storage backend: `{0}`")] - BackendError(String), -} diff --git a/base_layer/core/src/mempool/reorg_pool/mod.rs b/base_layer/core/src/mempool/reorg_pool/mod.rs index 31d15c8d68..43e3ffd1fd 100644 --- a/base_layer/core/src/mempool/reorg_pool/mod.rs +++ b/base_layer/core/src/mempool/reorg_pool/mod.rs @@ -20,12 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -mod error; #[allow(clippy::module_inception)] mod reorg_pool; -mod reorg_pool_storage; - -// Public re-exports -pub use error::ReorgPoolError; pub use reorg_pool::{ReorgPool, ReorgPoolConfig}; -pub use reorg_pool_storage::ReorgPoolStorage; diff --git a/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs b/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs index 48538c4028..bcd68e4b69 100644 --- a/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs +++ b/base_layer/core/src/mempool/reorg_pool/reorg_pool.rs @@ -20,102 +20,324 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{sync::Arc, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + hash::Hash, + sync::Arc, +}; +use log::*; use serde::{Deserialize, Serialize}; -use tari_common::configuration::seconds; -use tari_common_types::types::Signature; - -use crate::{ - blocks::Block, - mempool::{ - consts::{MEMPOOL_REORG_POOL_CACHE_TTL, MEMPOOL_REORG_POOL_STORAGE_CAPACITY}, - reorg_pool::{ReorgPoolError, ReorgPoolStorage}, - }, - transactions::transaction::Transaction, -}; +use tari_common_types::types::{PrivateKey, Signature}; +use tari_utilities::{hex::Hex, Hashable}; + +use crate::{blocks::Block, transactions::transaction::Transaction}; + +pub const LOG_TARGET: &str = "c::mp::reorg_pool::reorg_pool_storage"; /// Configuration for the ReorgPool -#[derive(Clone, Copy, Deserialize, Serialize)] +#[derive(Clone, Copy, Serialize, Deserialize)] pub struct ReorgPoolConfig { - /// The maximum number of transactions that can be stored in the ReorgPool - pub storage_capacity: usize, - /// The Time-to-live for each stored transaction - #[serde(with = "seconds")] - pub tx_ttl: Duration, + /// The height horizon to clear transactions from the reorg pool. + pub expiry_height: u64, } impl Default for ReorgPoolConfig { fn default() -> Self { - Self { - storage_capacity: MEMPOOL_REORG_POOL_STORAGE_CAPACITY, - tx_ttl: MEMPOOL_REORG_POOL_CACHE_TTL, - } + Self { expiry_height: 5 } } } +type TransactionId = usize; + /// The ReorgPool consists of all transactions that have recently been added to blocks. /// When a potential blockchain reorganization occurs the transactions can be recovered from the ReorgPool and can be -/// added back into the UnconfirmedPool. Transactions in the ReOrg pool have a limited Time-to-live and will be removed -/// from the pool when the Time-to-live thresholds is reached. Also, when the capacity of the pool has been reached, the -/// oldest transactions will be removed to make space for incoming transactions. +/// added back into the UnconfirmedPool. Transactions in the ReOrg pool expire as block height moves on. pub struct ReorgPool { - pool_storage: ReorgPoolStorage, + config: ReorgPoolConfig, + key_counter: usize, + tx_by_key: HashMap>, + txs_by_signature: HashMap>, + txs_by_height: HashMap>, } impl ReorgPool { /// Create a new ReorgPool with the specified configuration pub fn new(config: ReorgPoolConfig) -> Self { Self { - pool_storage: ReorgPoolStorage::new(config), + config, + key_counter: 0, + tx_by_key: HashMap::new(), + txs_by_signature: HashMap::new(), + txs_by_height: HashMap::new(), } } - /// Insert a set of new transactions into the ReorgPool. Published transactions will have a limited Time-to-live in - /// the ReorgPool and will be discarded once the Time-to-live threshold has been reached. - pub fn insert_txs(&mut self, transactions: Vec>) -> Result<(), ReorgPoolError> { - self.pool_storage.insert_txs(transactions); - Ok(()) + /// Insert a new transaction into the ReorgPool. Published transactions will be discarded once they are + /// `config.expiry_height` blocks old. + fn insert(&mut self, height: u64, tx: Arc) { + let excess_hex = tx + .first_kernel_excess_sig() + .map(|s| s.get_signature().to_hex()) + .unwrap_or_else(|| "no kernel!".to_string()); + if tx + .body + .kernels() + .iter() + .all(|k| self.txs_by_signature.contains_key(k.excess_sig.get_signature())) + { + debug!( + target: LOG_TARGET, + "Transaction {} already found in reorg pool", excess_hex + ); + self.cleanup_expired(height); + return; + } + + let new_key = self.get_next_key(); + for kernel in tx.body.kernels() { + let sig = kernel.excess_sig.get_signature(); + self.txs_by_signature.entry(sig.clone()).or_default().push(new_key); + } + + trace!( + target: LOG_TARGET, + "Inserted transaction {} into reorg pool at height {}", + new_key, + height + ); + self.tx_by_key.insert(new_key, tx); + self.txs_by_height.entry(height).or_default().push(new_key); + self.cleanup_expired(height); + } + + /// Insert a set of new transactions into the ReorgPool + pub fn insert_all(&mut self, height: u64, txs: Vec>) { + debug!( + target: LOG_TARGET, + "Inserting {} transaction(s) into reorg pool at height {}", + txs.len(), + height + ); + + // Even if we are not inserting any transactions, we still need to clear out the pool by height + if txs.is_empty() { + self.cleanup_expired(height); + } + for tx in txs.into_iter() { + self.insert(height, tx); + } } - /// Insert a new transaction into the ReorgPool. Published transactions will have a limited Time-to-live in - /// the ReorgPool and will be discarded once the Time-to-live threshold has been reached. - pub fn _insert(&mut self, transaction: Arc) -> Result<(), ReorgPoolError> { - self.pool_storage.insert(transaction); - Ok(()) + pub fn retrieve_by_excess_sigs(&self, excess_sigs: &[PrivateKey]) -> (Vec>, Vec) { + // Hashset used to prevent duplicates + let mut found = HashSet::new(); + let mut remaining = Vec::new(); + + for sig in excess_sigs { + match self.txs_by_signature.get(sig) { + Some(ids) => found.extend(ids.iter()), + None => remaining.push(sig.clone()), + } + } + + let found = found + .into_iter() + .map(|id| { + self.tx_by_key + .get(id) + .expect("mempool indexes out of sync: transaction exists in txs_by_signature but not in tx_by_key") + }) + .cloned() + .collect(); + + (found, remaining) } /// Check if a transaction is stored in the ReorgPool pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> bool { - self.pool_storage.has_tx_with_excess_sig(excess_sig) + self.txs_by_signature.contains_key(excess_sig.get_signature()) } - /// Remove the transactions from the ReorgPool that were used in provided removed blocks. The transactions can be - /// resubmitted to the Unconfirmed Pool. + /// Remove the transactions from the ReorgPoolthat were used in provided removed blocks. The transactions + /// can be resubmitted to the Unconfirmed Pool. pub fn remove_reorged_txs_and_discard_double_spends( &mut self, removed_blocks: &[Arc], new_blocks: &[Arc], - ) -> Result>, ReorgPoolError> { - Ok(self - .pool_storage - .remove_reorged_txs_and_discard_double_spends(removed_blocks, new_blocks)) + ) -> Vec> { + for block in new_blocks { + debug!( + target: LOG_TARGET, + "Mempool processing reorg added new block {} ({})", + block.header.height, + block.header.hash().to_hex(), + ); + self.discard_double_spends(block); + } + + let mut removed_txs = Vec::new(); + for block in removed_blocks { + debug!( + target: LOG_TARGET, + "Mempool processing reorg removed block {} ({})", + block.header.height, + block.header.hash().to_hex(), + ); + for kernel in block.body.kernels() { + if let Some(removed_tx_ids) = self.txs_by_signature.remove(kernel.excess_sig.get_signature()) { + for tx_id in removed_tx_ids { + if let Some(tx) = self.tx_by_key.remove(&tx_id) { + self.remove_from_height_index(tx_id); + trace!(target: LOG_TARGET, "Removed tx from reorg pool: {:?}", tx_id); + removed_txs.push(tx); + } + } + } + } + } + + removed_txs + } + + fn remove_from_height_index(&mut self, tx_id: TransactionId) { + let mut heights_to_remove = Vec::new(); + for (height, ids) in self.txs_by_height.iter_mut() { + if let Some(pos) = ids.iter().position(|id| *id == tx_id) { + ids.remove(pos); + if ids.is_empty() { + heights_to_remove.push(*height); + } + } + } + + for h in heights_to_remove { + self.txs_by_height.remove(&h); + } + } + + /// Remove double-spends from the ReorgPool. These transactions were orphaned by the provided published + /// block. Check if any of the transactions in the ReorgPool has inputs that was spent by the provided + /// published block. + fn discard_double_spends(&mut self, published_block: &Block) { + let mut to_remove = Vec::new(); + for (id, tx) in self.tx_by_key.iter() { + for input in tx.body.inputs() { + if published_block.body.inputs().contains(input) { + to_remove.push(*id); + } + } + } + + for id in to_remove { + self.remove(id); + trace!(target: LOG_TARGET, "Removed double spend tx {} from reorg pool", id); + } + } + + fn remove(&mut self, tx_id: TransactionId) -> Option> { + let tx = self.tx_by_key.remove(&tx_id)?; + + for kernel in tx.body.kernels() { + let sig = kernel.excess_sig.get_signature(); + let ids = self.txs_by_signature.get_mut(sig).expect("reorg pool out of sync"); + let pos = ids.iter().position(|k| *k == tx_id).expect("reorg mempool out of sync"); + ids.remove(pos); + if ids.is_empty() { + self.txs_by_signature.remove(sig); + } + } + + self.remove_from_height_index(tx_id); + + Some(tx) } /// Returns the total number of published transactions stored in the ReorgPool - pub fn len(&mut self) -> Result { - Ok(self.pool_storage.len()) + pub fn len(&self) -> usize { + self.tx_by_key.len() } /// Returns all transaction stored in the ReorgPool. - pub fn snapshot(&mut self) -> Result>, ReorgPoolError> { - Ok(self.pool_storage.snapshot()) + pub fn snapshot(&self) -> Vec> { + self.tx_by_key.values().cloned().collect() + } + + fn get_next_key(&mut self) -> usize { + let key = self.key_counter; + self.key_counter = (self.key_counter + 1) % usize::MAX; + key + } + + fn cleanup_expired(&mut self, height: u64) { + let height = match height.checked_sub(self.config.expiry_height) { + Some(h) => h, + None => return, + }; + + // let heights_to_remove = self + // .txs_by_height + // .keys() + // .filter(|h| **h <= height) + // .copied() + // .collect::>(); + // for height in heights_to_remove { + if let Some(tx_ids) = self.txs_by_height.remove(&height) { + debug!( + target: LOG_TARGET, + "Clearing {} transactions from mempool for height {}", + tx_ids.len(), + height + ); + for tx_id in tx_ids { + let tx = self.tx_by_key.remove(&tx_id).expect("reorg mempool out of sync"); + + for kernel in tx.body.kernels() { + let sig = kernel.excess_sig.get_signature(); + if let Some(keys) = self.txs_by_signature.get_mut(sig) { + let pos = keys + .iter() + .position(|k| *k == tx_id) + .expect("reorg mempool out of sync"); + keys.remove(pos); + if keys.is_empty() { + self.txs_by_signature.remove(sig); + } + } + } + } + } + } + + pub fn compact(&mut self) { + fn shrink_hashmap(map: &mut HashMap) -> (usize, usize) { + let cap = map.capacity(); + let extra_cap = cap - map.len(); + if extra_cap > 100 { + map.shrink_to(map.len() + (extra_cap / 2)); + } + + (cap, map.capacity()) + } + + let (old, new) = shrink_hashmap(&mut self.tx_by_key); + shrink_hashmap(&mut self.txs_by_signature); + shrink_hashmap(&mut self.txs_by_height); + + if old - new > 0 { + debug!( + target: LOG_TARGET, + "Shrunk reorg mempool memory usage ({}/{}) ~{}%", + new, + old, + (((old - new) as f32 / old as f32) * 100.0).round() as usize + ); + } } } #[cfg(test)] mod test { - use std::{thread, time::Duration}; use tari_common::configuration::Network; @@ -128,7 +350,7 @@ mod test { }; #[test] - fn test_insert_rlu_and_ttl() { + fn test_insert_expire_by_height() { let tx1 = Arc::new(tx!(MicroTari(100_000), fee: MicroTari(100), lock: 4000, inputs: 2, outputs: 1).0); let tx2 = Arc::new(tx!(MicroTari(100_000), fee: MicroTari(60), lock: 3000, inputs: 2, outputs: 1).0); let tx3 = Arc::new(tx!(MicroTari(100_000), fee: MicroTari(20), lock: 2500, inputs: 2, outputs: 1).0); @@ -136,23 +358,24 @@ mod test { let tx5 = Arc::new(tx!(MicroTari(100_000), fee: MicroTari(100), lock: 2000, inputs: 2, outputs: 1).0); let tx6 = Arc::new(tx!(MicroTari(100_000), fee: MicroTari(120), lock: 5500, inputs: 2, outputs: 1).0); - let mut reorg_pool = ReorgPool::new(ReorgPoolConfig { - storage_capacity: 3, - tx_ttl: Duration::from_millis(50), - }); - reorg_pool - .insert_txs(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]) - .unwrap(); + let mut reorg_pool = ReorgPool::new(ReorgPoolConfig { expiry_height: 2 }); + reorg_pool.insert(1, tx1.clone()); + reorg_pool.insert(2, tx2.clone()); + + assert!(reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert!(reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + + reorg_pool.insert(3, tx3.clone()); + reorg_pool.insert(4, tx4.clone()); // Check that oldest utx was removed to make room for new incoming transactions assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); - assert!(reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); + assert!(!reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); assert!(reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); assert!(reorg_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); - // Check that transactions that have been in the pool for longer than their Time-to-live have been removed - thread::sleep(Duration::from_millis(51)); - reorg_pool.insert_txs(vec![tx5.clone(), tx6.clone()]).unwrap(); - assert_eq!(reorg_pool.len().unwrap(), 2); + reorg_pool.insert(5, tx5.clone()); + reorg_pool.insert(6, tx6.clone()); + assert_eq!(reorg_pool.len(), 2); assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); assert!(!reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); assert!(!reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); @@ -172,23 +395,18 @@ mod test { let tx5 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(10), lock: 2000, inputs: 2, outputs: 1).0); let tx6 = Arc::new(tx!(MicroTari(10_000), fee: MicroTari(12), lock: 5500, inputs: 2, outputs: 1).0); - let mut reorg_pool = ReorgPool::new(ReorgPoolConfig { - storage_capacity: 5, - tx_ttl: Duration::from_millis(50), - }); - reorg_pool - .insert_txs(vec![ - tx1.clone(), - tx2.clone(), - tx3.clone(), - tx4.clone(), - tx5.clone(), - tx6.clone(), - ]) - .unwrap(); + let mut reorg_pool = ReorgPool::new(ReorgPoolConfig { expiry_height: 10 }); + reorg_pool.insert_all(1, vec![ + tx1.clone(), + tx2.clone(), + tx3.clone(), + tx4.clone(), + tx5.clone(), + tx6.clone(), + ]); // Oldest transaction tx1 is removed to make space for new incoming transactions - assert_eq!(reorg_pool.len().unwrap(), 5); - assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); + assert_eq!(reorg_pool.len(), 6); + assert!(reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); assert!(reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); assert!(reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); assert!(reorg_pool.has_tx_with_excess_sig(&tx4.body.kernels()[0].excess_sig)); @@ -200,15 +418,14 @@ mod test { create_orphan_block(4000, vec![(*tx1).clone(), (*tx2).clone()], &consensus).into(), ]; - let removed_txs = reorg_pool - .remove_reorged_txs_and_discard_double_spends(reorg_blocks, &[]) - .unwrap(); - assert_eq!(removed_txs.len(), 3); + let removed_txs = reorg_pool.remove_reorged_txs_and_discard_double_spends(reorg_blocks, &[]); + assert_eq!(removed_txs.len(), 4); + assert!(removed_txs.contains(&tx1)); assert!(removed_txs.contains(&tx2)); assert!(removed_txs.contains(&tx3)); assert!(removed_txs.contains(&tx4)); - assert_eq!(reorg_pool.len().unwrap(), 2); + assert_eq!(reorg_pool.len(), 2); assert!(!reorg_pool.has_tx_with_excess_sig(&tx1.body.kernels()[0].excess_sig)); assert!(!reorg_pool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig)); assert!(!reorg_pool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig)); diff --git a/base_layer/core/src/mempool/reorg_pool/reorg_pool_storage.rs b/base_layer/core/src/mempool/reorg_pool/reorg_pool_storage.rs deleted file mode 100644 index d6184ca0fc..0000000000 --- a/base_layer/core/src/mempool/reorg_pool/reorg_pool_storage.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2019 The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::sync::Arc; - -use log::*; -use tari_common_types::types::{PrivateKey, Signature}; -use tari_crypto::tari_utilities::hex::Hex; -use tari_utilities::Hashable; -use ttl_cache::TtlCache; - -use crate::{blocks::Block, mempool::reorg_pool::reorg_pool::ReorgPoolConfig, transactions::transaction::Transaction}; - -pub const LOG_TARGET: &str = "c::mp::reorg_pool::reorg_pool_storage"; - -/// Reorg makes use of ReorgPoolStorage to provide thread save access to its TtlCache. -/// The ReorgPoolStorage consists of all transactions that have recently been added to blocks. -/// When a potential blockchain reorganization occurs the transactions can be recovered from the ReorgPool and can be -/// added back into the UnconfirmedPool. Transactions in the ReOrg pool have a limited Time-to-live and will be removed -/// from the pool when the Time-to-live thresholds is reached. Also, when the capacity of the pool has been reached, the -/// oldest transactions will be removed to make space for incoming transactions. -pub struct ReorgPoolStorage { - config: ReorgPoolConfig, - txs_by_signature: TtlCache>, -} - -impl ReorgPoolStorage { - /// Create a new ReorgPoolStorage with the specified configuration - pub fn new(config: ReorgPoolConfig) -> Self { - Self { - config, - txs_by_signature: TtlCache::new(config.storage_capacity), - } - } - - /// Insert a new transaction into the ReorgPoolStorage. Published transactions will have a limited Time-to-live in - /// the ReorgPoolStorage and will be discarded once the Time-to-live threshold has been reached. - pub fn insert(&mut self, tx: Arc) { - match tx.first_kernel_excess_sig() { - Some(excess_sig) => { - let tx_key = excess_sig.get_signature(); - let tx_key_hex = tx_key.to_hex(); - trace!(target: LOG_TARGET, "{}", tx); - self.txs_by_signature.insert(tx_key.clone(), tx, self.config.tx_ttl); - debug!( - target: LOG_TARGET, - "Inserted transaction with signature {} into reorg pool:", tx_key_hex - ); - }, - None => { - warn!(target: LOG_TARGET, "Inserted transaction without kernels!"); - }, - } - } - - /// Insert a set of new transactions into the ReorgPoolStorage - pub fn insert_txs(&mut self, txs: Vec>) { - for tx in txs.into_iter() { - self.insert(tx); - } - } - - /// Check if a transaction is stored in the ReorgPoolStorage - pub fn has_tx_with_excess_sig(&self, excess_sig: &Signature) -> bool { - self.txs_by_signature.contains_key(excess_sig.get_signature()) - } - - /// Remove double-spends from the ReorgPool. These transactions were orphaned by the provided published - /// block. Check if any of the transactions in the ReorgPool has inputs that was spent by the provided - /// published block. - fn discard_double_spends(&mut self, published_block: &Block) { - let mut removed_tx_keys = Vec::new(); - for (tx_key, ptx) in self.txs_by_signature.iter() { - for input in ptx.body.inputs() { - if published_block.body.inputs().contains(input) { - removed_tx_keys.push(tx_key.clone()); - } - } - } - - for tx_key in &removed_tx_keys { - self.txs_by_signature.remove(tx_key); - trace!( - target: LOG_TARGET, - "Removed double spend tx from reorg pool: {}", - tx_key.to_hex() - ); - } - } - - /// Remove the transactions from the ReorgPoolStorage that were used in provided removed blocks. The transactions - /// can be resubmitted to the Unconfirmed Pool. - pub fn remove_reorged_txs_and_discard_double_spends( - &mut self, - removed_blocks: &[Arc], - new_blocks: &[Arc], - ) -> Vec> { - for block in new_blocks { - debug!( - target: LOG_TARGET, - "Mempool processing reorg added new block {} ({})", - block.header.height, - block.header.hash().to_hex(), - ); - self.discard_double_spends(block); - } - - let mut removed_txs = Vec::new(); - for block in removed_blocks { - debug!( - target: LOG_TARGET, - "Mempool processing reorg removed block {} ({})", - block.header.height, - block.header.hash().to_hex(), - ); - for kernel in block.body.kernels() { - if let Some(removed_tx) = self.txs_by_signature.remove(kernel.excess_sig.get_signature()) { - trace!(target: LOG_TARGET, "Removed tx from reorg pool: {:?}", removed_tx); - removed_txs.push(removed_tx); - } - } - } - removed_txs - } - - /// Returns the total number of published transactions stored in the ReorgPoolStorage - pub fn len(&mut self) -> usize { - self.txs_by_signature.iter().count() - } - - /// Returns all transaction stored in the ReorgPoolStorage. - pub fn snapshot(&mut self) -> Vec> { - self.txs_by_signature.iter().map(|(_, tx)| tx).cloned().collect() - } -} diff --git a/base_layer/core/src/mempool/service/inbound_handlers.rs b/base_layer/core/src/mempool/service/inbound_handlers.rs index 8bc69d454f..491686f949 100644 --- a/base_layer/core/src/mempool/service/inbound_handlers.rs +++ b/base_layer/core/src/mempool/service/inbound_handlers.rs @@ -58,10 +58,10 @@ impl MempoolInboundHandlers { debug!(target: LOG_TARGET, "Handling remote request: {}", request); use MempoolRequest::*; match request { - GetStats => Ok(MempoolResponse::Stats(self.mempool.stats().await?)), - GetState => Ok(MempoolResponse::State(self.mempool.state().await?)), + GetStats => Ok(MempoolResponse::Stats(self.mempool.stats().await)), + GetState => Ok(MempoolResponse::State(self.mempool.state().await)), GetTxStateByExcessSig(excess_sig) => Ok(MempoolResponse::TxStorage( - self.mempool.has_tx_with_excess_sig(&excess_sig).await?, + self.mempool.has_tx_with_excess_sig(&excess_sig).await, )), SubmitTransaction(tx) => { debug!( @@ -150,7 +150,8 @@ impl MempoolInboundHandlers { ) .await?; }, - BlockSyncRewind(removed_blocks) if !removed_blocks.is_empty() => { + ValidBlockAdded(_, _) => {}, + BlockSyncRewind(removed_blocks) => { self.mempool .process_reorg(removed_blocks.iter().map(|b| b.to_arc_block()).collect(), vec![]) .await?; @@ -158,7 +159,7 @@ impl MempoolInboundHandlers { BlockSyncComplete(tip_block) => { self.mempool.process_published_block(tip_block.block()).await?; }, - _ => {}, + AddBlockFailed(_) => {}, } Ok(()) diff --git a/base_layer/core/src/mempool/service/initializer.rs b/base_layer/core/src/mempool/service/initializer.rs index e416d2757b..5eaf65a149 100644 --- a/base_layer/core/src/mempool/service/initializer.rs +++ b/base_layer/core/src/mempool/service/initializer.rs @@ -28,7 +28,6 @@ use tari_comms_dht::Dht; use tari_p2p::{ comms_connector::{PeerMessage, SubscriptionFactory}, domain_message::DomainMessage, - services::utils::{map_decode, ok_or_skip_result}, tari_message::TariMessageType, }; use tari_service_framework::{ @@ -44,7 +43,6 @@ use crate::{ base_node::{comms_interface::LocalNodeCommsInterface, StateMachineHandle}, mempool::{ mempool::Mempool, - proto as mempool_proto, service::{ inbound_handlers::MempoolInboundHandlers, local_service::LocalMempoolService, @@ -52,7 +50,6 @@ use crate::{ service::{MempoolService, MempoolStreams}, MempoolHandle, }, - MempoolServiceConfig, }, proto, transactions::transaction::Transaction, @@ -63,41 +60,19 @@ const SUBSCRIPTION_LABEL: &str = "Mempool"; /// Initializer for the Mempool service and service future. pub struct MempoolServiceInitializer { - inbound_message_subscription_factory: Arc, mempool: Mempool, - config: MempoolServiceConfig, + inbound_message_subscription_factory: Arc, } impl MempoolServiceInitializer { /// Create a new MempoolServiceInitializer from the inbound message subscriber. - pub fn new( - config: MempoolServiceConfig, - mempool: Mempool, - inbound_message_subscription_factory: Arc, - ) -> Self { + pub fn new(mempool: Mempool, inbound_message_subscription_factory: Arc) -> Self { Self { - inbound_message_subscription_factory, mempool, - config, + inbound_message_subscription_factory, } } - /// Get a stream for inbound Mempool service request messages - fn inbound_request_stream(&self) -> impl Stream> { - self.inbound_message_subscription_factory - .get_subscription(TariMessageType::MempoolRequest, SUBSCRIPTION_LABEL) - .map(map_decode::) - .filter_map(ok_or_skip_result) - } - - /// Get a stream for inbound Mempool service response messages - fn inbound_response_stream(&self) -> impl Stream> { - self.inbound_message_subscription_factory - .get_subscription(TariMessageType::MempoolResponse, SUBSCRIPTION_LABEL) - .map(map_decode::) - .filter_map(ok_or_skip_result) - } - /// Create a stream of 'New Transaction` messages fn inbound_transaction_stream(&self) -> impl Stream> { self.inbound_message_subscription_factory @@ -141,8 +116,6 @@ async fn extract_transaction(msg: Arc) -> Option Result<(), ServiceInitializationError> { // Create streams for receiving Mempool service requests and response messages from comms - let inbound_request_stream = self.inbound_request_stream(); - let inbound_response_stream = self.inbound_response_stream(); let inbound_transaction_stream = self.inbound_transaction_stream(); // Connect MempoolOutboundServiceHandle to MempoolService @@ -151,12 +124,9 @@ impl ServiceInitializer for MempoolServiceInitializer { context.register_handle(mempool_handle); let (outbound_tx_sender, outbound_tx_stream) = mpsc::unbounded_channel(); - let (outbound_request_sender_service, outbound_request_stream) = reply_channel::unbounded(); let (local_request_sender_service, local_request_stream) = reply_channel::unbounded(); - let outbound_mp_interface = - OutboundMempoolServiceInterface::new(outbound_request_sender_service, outbound_tx_sender); + let outbound_mp_interface = OutboundMempoolServiceInterface::new(outbound_tx_sender); let local_mp_interface = LocalMempoolService::new(local_request_sender_service); - let config = self.config; let inbound_handlers = MempoolInboundHandlers::new(self.mempool.clone(), outbound_mp_interface.clone()); // Register handle to OutboundMempoolServiceInterface before waiting for handles to be ready @@ -169,16 +139,14 @@ impl ServiceInitializer for MempoolServiceInitializer { let base_node = handles.expect_handle::(); let streams = MempoolStreams { - outbound_request_stream, outbound_tx_stream, - inbound_request_stream, - inbound_response_stream, inbound_transaction_stream, local_request_stream, block_event_stream: base_node.get_block_event_stream(), request_receiver, }; - MempoolService::new(outbound_message_service, inbound_handlers, config, state_machine).start(streams) + debug!(target: LOG_TARGET, "Mempool service started"); + MempoolService::new(outbound_message_service, inbound_handlers, state_machine).start(streams) }); Ok(()) diff --git a/base_layer/core/src/mempool/service/outbound_interface.rs b/base_layer/core/src/mempool/service/outbound_interface.rs index 315457eff8..7eb1a4bc2a 100644 --- a/base_layer/core/src/mempool/service/outbound_interface.rs +++ b/base_layer/core/src/mempool/service/outbound_interface.rs @@ -23,19 +23,10 @@ use std::sync::Arc; use log::*; -use tari_common_types::types::Signature; use tari_comms::peer_manager::NodeId; -use tari_service_framework::{reply_channel::SenderService, Service}; use tokio::sync::mpsc::UnboundedSender; -use crate::{ - mempool::{ - service::{MempoolRequest, MempoolResponse, MempoolServiceError}, - StatsResponse, - TxStorageResponse, - }, - transactions::transaction::Transaction, -}; +use crate::{mempool::service::MempoolServiceError, transactions::transaction::Transaction}; pub const LOG_TARGET: &str = "c::mp::service::outbound_interface"; @@ -43,30 +34,13 @@ pub const LOG_TARGET: &str = "c::mp::service::outbound_interface"; /// nodes. #[derive(Clone)] pub struct OutboundMempoolServiceInterface { - request_sender: SenderService>, tx_sender: UnboundedSender<(Arc, Vec)>, } impl OutboundMempoolServiceInterface { /// Construct a new OutboundMempoolServiceInterface with the specified SenderService. - pub fn new( - request_sender: SenderService>, - tx_sender: UnboundedSender<(Arc, Vec)>, - ) -> Self { - Self { - request_sender, - tx_sender, - } - } - - /// Request the stats from the mempool of a remote base node. - pub async fn get_stats(&mut self) -> Result { - if let MempoolResponse::Stats(stats) = self.request_sender.call(MempoolRequest::GetStats).await?? { - trace!(target: LOG_TARGET, "Mempool stats requested: {:?}", stats,); - Ok(stats) - } else { - Err(MempoolServiceError::UnexpectedApiResponse) - } + pub fn new(tx_sender: UnboundedSender<(Arc, Vec)>) -> Self { + Self { tx_sender } } /// Transmit a transaction to remote base nodes, excluding the provided peers. @@ -83,20 +57,4 @@ impl OutboundMempoolServiceInterface { .map_err(|_| MempoolServiceError::BroadcastFailed) }) } - - /// Check if the specified transaction is stored in the mempool of a remote base node. - pub async fn get_tx_state_by_excess_sig( - &mut self, - excess_sig: Signature, - ) -> Result { - if let MempoolResponse::TxStorage(tx_storage_response) = self - .request_sender - .call(MempoolRequest::GetTxStateByExcessSig(excess_sig)) - .await?? - { - Ok(tx_storage_response) - } else { - Err(MempoolServiceError::UnexpectedApiResponse) - } - } } diff --git a/base_layer/core/src/mempool/service/response.rs b/base_layer/core/src/mempool/service/response.rs index 60949d173e..bd1680cd11 100644 --- a/base_layer/core/src/mempool/service/response.rs +++ b/base_layer/core/src/mempool/service/response.rs @@ -22,13 +22,12 @@ use std::{fmt, fmt::Formatter}; -use serde::{Deserialize, Serialize}; use tari_common_types::waiting_requests::RequestKey; use crate::mempool::{StateResponse, StatsResponse, TxStorageResponse}; /// API Response enum for Mempool responses. -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub enum MempoolResponse { Stats(StatsResponse), State(StateResponse), @@ -47,7 +46,7 @@ impl fmt::Display for MempoolResponse { } /// Response type for a received MempoolService requests -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub struct MempoolServiceResponse { pub request_key: RequestKey, pub response: MempoolResponse, diff --git a/base_layer/core/src/mempool/service/service.rs b/base_layer/core/src/mempool/service/service.rs index 1eeb228e43..28168febf0 100644 --- a/base_layer/core/src/mempool/service/service.rs +++ b/base_layer/core/src/mempool/service/service.rs @@ -20,16 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, - time::Duration, -}; +use std::{convert::TryFrom, sync::Arc}; use futures::{pin_mut, stream::StreamExt, Stream}; use log::*; -use rand::rngs::OsRng; -use tari_common_types::waiting_requests::{generate_request_key, RequestKey, WaitingRequests}; use tari_comms::peer_manager::NodeId; use tari_comms_dht::{ domain_message::OutboundDomainMessage, @@ -39,25 +33,18 @@ use tari_comms_dht::{ use tari_crypto::tari_utilities::hex::Hex; use tari_p2p::{domain_message::DomainMessage, tari_message::TariMessageType}; use tari_service_framework::{reply_channel, reply_channel::RequestContext}; -use tokio::{ - sync::{mpsc, oneshot::Sender as OneshotSender}, - task, -}; +use tokio::{sync::mpsc, task}; use crate::{ base_node::{ comms_interface::{BlockEvent, BlockEventReceiver}, StateMachineHandle, }, - mempool::{ - proto as mempool_proto, - service::{ - error::MempoolServiceError, - inbound_handlers::MempoolInboundHandlers, - MempoolRequest, - MempoolResponse, - }, - MempoolServiceConfig, + mempool::service::{ + error::MempoolServiceError, + inbound_handlers::MempoolInboundHandlers, + MempoolRequest, + MempoolResponse, }, proto, transactions::transaction::Transaction, @@ -66,11 +53,8 @@ use crate::{ const LOG_TARGET: &str = "c::mempool::service::service"; /// A convenience struct to hold all the Mempool service streams -pub struct MempoolStreams { - pub outbound_request_stream: SOutReq, +pub struct MempoolStreams { pub outbound_tx_stream: mpsc::UnboundedReceiver<(Arc, Vec)>, - pub inbound_request_stream: SInReq, - pub inbound_response_stream: SInRes, pub inbound_transaction_stream: STxIn, pub local_request_stream: SLocalReq, pub block_event_stream: BlockEventReceiver, @@ -82,10 +66,6 @@ pub struct MempoolStreams { pub struct MempoolService { outbound_message_service: OutboundMessageRequester, inbound_handlers: MempoolInboundHandlers, - waiting_requests: WaitingRequests>, - timeout_sender: mpsc::Sender, - timeout_receiver_stream: Option>, - config: MempoolServiceConfig, state_machine: StateMachineHandle, } @@ -93,48 +73,29 @@ impl MempoolService { pub fn new( outbound_message_service: OutboundMessageRequester, inbound_handlers: MempoolInboundHandlers, - config: MempoolServiceConfig, state_machine: StateMachineHandle, ) -> Self { - let (timeout_sender, timeout_receiver) = mpsc::channel(100); Self { outbound_message_service, inbound_handlers, - waiting_requests: WaitingRequests::new(), - timeout_sender, - timeout_receiver_stream: Some(timeout_receiver), - config, state_machine, } } - pub async fn start( + pub async fn start( mut self, - streams: MempoolStreams, + streams: MempoolStreams, ) -> Result<(), MempoolServiceError> where - SOutReq: Stream>>, - SInReq: Stream>, - SInRes: Stream>, STxIn: Stream>, SLocalReq: Stream>>, { - let outbound_request_stream = streams.outbound_request_stream.fuse(); - pin_mut!(outbound_request_stream); let mut outbound_tx_stream = streams.outbound_tx_stream; - let inbound_request_stream = streams.inbound_request_stream.fuse(); - pin_mut!(inbound_request_stream); - let inbound_response_stream = streams.inbound_response_stream.fuse(); - pin_mut!(inbound_response_stream); let inbound_transaction_stream = streams.inbound_transaction_stream.fuse(); pin_mut!(inbound_transaction_stream); let local_request_stream = streams.local_request_stream.fuse(); pin_mut!(local_request_stream); let mut block_event_stream = streams.block_event_stream; - let mut timeout_receiver_stream = self - .timeout_receiver_stream - .take() - .expect("Mempool Service initialized without timeout_receiver_stream"); let mut request_receiver = streams.request_receiver; loop { @@ -145,26 +106,11 @@ impl MempoolService { let _ = reply.send(self.handle_request(request).await); }, - // Outbound request messages from the OutboundMempoolServiceInterface - Some(outbound_request_context) = outbound_request_stream.next() => { - self.spawn_handle_outbound_request(outbound_request_context); - }, - // Outbound tx messages from the OutboundMempoolServiceInterface Some((txn, excluded_peers)) = outbound_tx_stream.recv() => { self.spawn_handle_outbound_tx(txn, excluded_peers); }, - // Incoming request messages from the Comms layer - Some(domain_msg) = inbound_request_stream.next() => { - self.spawn_handle_incoming_request(domain_msg); - }, - - // Incoming response messages from the Comms layer - Some(domain_msg) = inbound_response_stream.next() => { - self.spawn_handle_incoming_response(domain_msg); - }, - // Incoming transaction messages from the Comms layer Some(transaction_msg) = inbound_transaction_stream.next() => { self.spawn_handle_incoming_tx(transaction_msg); @@ -182,10 +128,6 @@ impl MempoolService { } }, - // Timeout events for waiting requests - Some(timeout_request_key) = timeout_receiver_stream.recv() => { - self.spawn_handle_request_timeout(timeout_request_key); - }, else => { info!(target: LOG_TARGET, "Mempool service shutting down"); @@ -202,32 +144,6 @@ impl MempoolService { self.inbound_handlers.handle_request(request).await } - fn spawn_handle_outbound_request( - &self, - request_context: RequestContext>, - ) { - let outbound_message_service = self.outbound_message_service.clone(); - let waiting_requests = self.waiting_requests.clone(); - let timeout_sender = self.timeout_sender.clone(); - let config = self.config; - task::spawn(async move { - let (request, reply_tx) = request_context.split(); - let result = handle_outbound_request( - outbound_message_service, - waiting_requests, - timeout_sender, - reply_tx, - request, - config, - ) - .await; - - if let Err(e) = result { - error!(target: LOG_TARGET, "Failed to handle outbound request message: {:?}", e); - } - }); - } - fn spawn_handle_outbound_tx(&self, tx: Arc, excluded_peers: Vec) { let outbound_message_service = self.outbound_message_service.clone(); task::spawn(async move { @@ -238,32 +154,6 @@ impl MempoolService { }); } - fn spawn_handle_incoming_request(&self, domain_msg: DomainMessage) { - let inbound_handlers = self.inbound_handlers.clone(); - let outbound_message_service = self.outbound_message_service.clone(); - task::spawn(async move { - let result = handle_incoming_request(inbound_handlers, outbound_message_service, domain_msg).await; - - if let Err(e) = result { - error!(target: LOG_TARGET, "Failed to handle incoming request message: {:?}", e); - } - }); - } - - fn spawn_handle_incoming_response(&self, domain_msg: DomainMessage) { - let waiting_requests = self.waiting_requests.clone(); - task::spawn(async move { - let result = handle_incoming_response(waiting_requests, domain_msg.into_inner()).await; - - if let Err(e) = result { - error!( - target: LOG_TARGET, - "Failed to handle incoming response message: {:?}", e - ); - } - }); - } - fn spawn_handle_incoming_tx(&self, tx_msg: DomainMessage) { // Determine if we are bootstrapped let status_watch = self.state_machine.get_status_info_watch(); @@ -316,126 +206,6 @@ impl MempoolService { } }); } - - fn spawn_handle_request_timeout(&self, timeout_request_key: u64) { - let waiting_requests = self.waiting_requests.clone(); - task::spawn(async move { - let result = handle_request_timeout(waiting_requests, timeout_request_key).await; - - if let Err(e) = result { - error!(target: LOG_TARGET, "Failed to handle request timeout event: {:?}", e); - } - }); - } -} - -async fn handle_incoming_request( - mut inbound_handlers: MempoolInboundHandlers, - mut outbound_message_service: OutboundMessageRequester, - domain_request_msg: DomainMessage, -) -> Result<(), MempoolServiceError> { - let (origin_public_key, inner_msg) = domain_request_msg.into_origin_and_inner(); - - // Convert mempool_proto::MempoolServiceRequest to a MempoolServiceRequest - let request = inner_msg - .request - .ok_or_else(|| MempoolServiceError::InvalidRequest("Received invalid mempool service request".to_string()))?; - - let response = inbound_handlers - .handle_request(request.try_into().map_err(MempoolServiceError::InvalidRequest)?) - .await?; - - let message = mempool_proto::MempoolServiceResponse { - request_key: inner_msg.request_key, - response: Some(response.try_into().map_err(MempoolServiceError::ConversionError)?), - }; - - outbound_message_service - .send_direct( - origin_public_key, - OutboundDomainMessage::new(TariMessageType::MempoolResponse, message), - ) - .await?; - - Ok(()) -} - -async fn handle_incoming_response( - waiting_requests: WaitingRequests>, - incoming_response: mempool_proto::MempoolServiceResponse, -) -> Result<(), MempoolServiceError> { - let mempool_proto::MempoolServiceResponse { request_key, response } = incoming_response; - let response: MempoolResponse = response - .and_then(|r| r.try_into().ok()) - .ok_or_else(|| MempoolServiceError::InvalidResponse("Received an invalid mempool response".to_string()))?; - - if let Some((reply_tx, started)) = waiting_requests.remove(request_key).await { - trace!( - target: LOG_TARGET, - "Response for {} (request key: {}) received after {}ms", - response, - &request_key, - started.elapsed().as_millis() - ); - let _ = reply_tx.send(Ok(response).map_err(|e| { - warn!( - target: LOG_TARGET, - "Failed to finalize request (request key:{}): {:?}", &request_key, e - ); - e - })); - } - - Ok(()) -} - -async fn handle_outbound_request( - mut outbound_message_service: OutboundMessageRequester, - waiting_requests: WaitingRequests>, - timeout_sender: mpsc::Sender, - reply_tx: OneshotSender>, - request: MempoolRequest, - config: MempoolServiceConfig, -) -> Result<(), MempoolServiceError> { - let request_key = generate_request_key(&mut OsRng); - let service_request = mempool_proto::MempoolServiceRequest { - request_key, - request: Some(request.try_into().map_err(MempoolServiceError::ConversionError)?), - }; - - let send_result = outbound_message_service - .send_random( - 1, - NodeDestination::Unknown, - OutboundEncryption::ClearText, - OutboundDomainMessage::new(TariMessageType::MempoolRequest, service_request), - ) - .await; - - match send_result { - Ok(_) => { - // Spawn timeout and wait for matching response to arrive - waiting_requests.insert(request_key, reply_tx).await; - // Spawn timeout for waiting_request - spawn_request_timeout(timeout_sender, request_key, config.request_timeout); - Ok(()) - }, - Err(DhtOutboundError::NoMessagesQueued) => { - let _ = reply_tx.send(Err(MempoolServiceError::NoBootstrapNodesConfigured).map_err(|e| { - error!( - target: LOG_TARGET, - "Failed to send outbound request as no bootstrap nodes were configured" - ); - e - })); - - Ok(()) - }, - Err(e) => { - error!(target: LOG_TARGET, "mempool outbound request failure. {:?}", e); - Err(MempoolServiceError::OutboundMessageService(e.to_string())) - }, - } } async fn handle_incoming_tx( @@ -446,7 +216,10 @@ async fn handle_incoming_tx( debug!( "New transaction received: {}, from: {}", - inner.body.kernels()[0].excess_sig.get_signature().to_hex(), + inner + .first_kernel_excess_sig() + .map(|s| s.get_signature().to_hex()) + .unwrap_or_else(|| "No kernels!".to_string()), source_peer.public_key, ); trace!( @@ -462,31 +235,6 @@ async fn handle_incoming_tx( Ok(()) } -async fn handle_request_timeout( - waiting_requests: WaitingRequests>, - request_key: RequestKey, -) -> Result<(), MempoolServiceError> { - if let Some((reply_tx, started)) = waiting_requests.remove(request_key).await { - warn!( - target: LOG_TARGET, - "Request (request key {}) timed out after {}ms", - &request_key, - started.elapsed().as_millis() - ); - let reply_msg = Err(MempoolServiceError::RequestTimedOut); - - let _ = reply_tx.send(reply_msg.map_err(|e| { - error!( - target: LOG_TARGET, - "Failed to process outbound request (request key: {}): {:?}", &request_key, e - ); - e - })); - } - - Ok(()) -} - async fn handle_outbound_tx( mut outbound_message_service: OutboundMessageRequester, tx: Arc, @@ -516,10 +264,3 @@ async fn handle_outbound_tx( Ok(()) } - -fn spawn_request_timeout(timeout_sender: mpsc::Sender, request_key: RequestKey, timeout: Duration) { - task::spawn(async move { - tokio::time::sleep(timeout).await; - let _ = timeout_sender.send(request_key).await; - }); -} diff --git a/base_layer/core/src/mempool/sync_protocol/mod.rs b/base_layer/core/src/mempool/sync_protocol/mod.rs index 136a64b19d..8cbbc17347 100644 --- a/base_layer/core/src/mempool/sync_protocol/mod.rs +++ b/base_layer/core/src/mempool/sync_protocol/mod.rs @@ -306,7 +306,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin self.peer_node_id.short_str() ); - let transactions = self.mempool.snapshot().await?; + let transactions = self.mempool.snapshot().await; let items = transactions .iter() .take(self.config.initial_sync_max_transactions) @@ -392,7 +392,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin inventory.items.len() ); - let transactions = self.mempool.snapshot().await?; + let transactions = self.mempool.snapshot().await; let mut duplicate_inventory_items = Vec::new(); let (transactions, _) = transactions.into_iter().partition::, _>(|transaction| { diff --git a/base_layer/core/src/mempool/sync_protocol/test.rs b/base_layer/core/src/mempool/sync_protocol/test.rs index 50eecb22a0..d2ba059e05 100644 --- a/base_layer/core/src/mempool/sync_protocol/test.rs +++ b/base_layer/core/src/mempool/sync_protocol/test.rs @@ -122,10 +122,10 @@ async fn empty_set() { .await .unwrap(); - let transactions = mempool2.snapshot().await.unwrap(); + let transactions = mempool2.snapshot().await; assert_eq!(transactions.len(), 0); - let transactions = mempool1.snapshot().await.unwrap(); + let transactions = mempool1.snapshot().await; assert_eq!(transactions.len(), 0); } @@ -319,14 +319,7 @@ async fn responder_messages() { } async fn get_snapshot(mempool: &Mempool) -> Vec { - mempool - .snapshot() - .await - .unwrap() - .iter() - .map(|t| &**t) - .cloned() - .collect() + mempool.snapshot().await.iter().map(|t| &**t).cloned().collect() } async fn read_message(reader: &mut S) -> T diff --git a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs index de5e33369a..3648c9b7b1 100644 --- a/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs +++ b/base_layer/core/src/mempool/unconfirmed_pool/unconfirmed_pool.rs @@ -21,7 +21,8 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, + hash::Hash, sync::Arc, }; @@ -229,6 +230,31 @@ impl UnconfirmedPool { Ok(results) } + pub fn retrieve_by_excess_sigs(&self, excess_sigs: &[PrivateKey]) -> (Vec>, Vec) { + // Hashset used to prevent duplicates + let mut found = HashSet::new(); + let mut remaining = Vec::new(); + + for sig in excess_sigs { + match self.txs_by_signature.get(sig).cloned() { + Some(ids) => found.extend(ids), + None => remaining.push(sig.clone()), + } + } + + let found = found + .into_iter() + .map(|id| { + self.tx_by_key + .get(&id) + .map(|tx| tx.transaction.clone()) + .expect("mempool indexes out of sync: transaction exists in txs_by_signature but not in tx_by_key") + }) + .collect(); + + (found, remaining) + } + fn get_all_dependent_transactions( &self, transaction: &PrioritizedTransaction, @@ -475,7 +501,7 @@ impl UnconfirmedPool { /// Returns false if there are any inconsistencies in the internal mempool state, otherwise true #[cfg(test)] - fn check_data_contistency(&self) -> bool { + fn check_data_consistency(&self) -> bool { self.tx_by_priority.len() == self.tx_by_key.len() && self.tx_by_priority .values() @@ -493,6 +519,32 @@ impl UnconfirmedPool { self.key_counter = (self.key_counter + 1) % usize::MAX; key } + + pub fn compact(&mut self) { + fn shrink_hashmap(map: &mut HashMap) -> (usize, usize) { + let cap = map.capacity(); + let extra_cap = cap - map.len(); + if extra_cap > 100 { + map.shrink_to(map.len() + (extra_cap / 2)); + } + + (cap, map.capacity()) + } + + let (old, new) = shrink_hashmap(&mut self.tx_by_key); + shrink_hashmap(&mut self.txs_by_signature); + shrink_hashmap(&mut self.txs_by_output); + + if old - new > 0 { + debug!( + target: LOG_TARGET, + "Shrunk reorg mempool memory usage ({}/{}) ~{}%", + new, + old, + (((old - new) as f32 / old as f32) * 100.0).round() as usize + ); + } + } } #[cfg(test)] @@ -573,7 +625,7 @@ mod test { // Note that transaction tx5 could not be included as its weight was to big to fit into the remaining allocated // space, the second best transaction was then included - assert!(unconfirmed_pool.check_data_contistency()); + assert!(unconfirmed_pool.check_data_consistency()); } #[test] @@ -690,7 +742,7 @@ mod test { assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig),); assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig),); - assert!(unconfirmed_pool.check_data_contistency()); + assert!(unconfirmed_pool.check_data_consistency()); } #[test] @@ -739,7 +791,7 @@ mod test { assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig)); assert!(!unconfirmed_pool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig)); - assert!(unconfirmed_pool.check_data_contistency()); + assert!(unconfirmed_pool.check_data_consistency()); } #[test] diff --git a/base_layer/core/src/proto/block.proto b/base_layer/core/src/proto/block.proto index 8f5edd5b12..21efa21a36 100644 --- a/base_layer/core/src/proto/block.proto +++ b/base_layer/core/src/proto/block.proto @@ -59,7 +59,10 @@ message Block { // A new block message. This is the message that is propagated around the network. It contains the // minimal information required to identify and optionally request the full block. message NewBlock { - bytes block_hash = 1; + BlockHeader header = 1; + tari.types.TransactionKernel coinbase_kernel = 2; + tari.types.TransactionOutput coinbase_output = 3; + repeated bytes kernel_excess_sigs = 4; } // The representation of a historical block in the blockchain. It is essentially identical to a protocol-defined diff --git a/base_layer/core/src/proto/block.rs b/base_layer/core/src/proto/block.rs index 60fd6af5d9..ab6787b8ae 100644 --- a/base_layer/core/src/proto/block.rs +++ b/base_layer/core/src/proto/block.rs @@ -22,7 +22,7 @@ use std::convert::{TryFrom, TryInto}; -use tari_common_types::types::{BlindingFactor, BLOCK_HASH_LENGTH}; +use tari_common_types::types::{BlindingFactor, PrivateKey}; use tari_crypto::tari_utilities::ByteArray; use super::core as proto; @@ -231,23 +231,33 @@ impl TryFrom for NewBlock { type Error = String; fn try_from(new_block: proto::NewBlock) -> Result { - let block_hash = new_block.block_hash; - if block_hash.len() != BLOCK_HASH_LENGTH { - return Err(format!( - "Block hash has an incorrect length. (len={}, expected={})", - block_hash.len(), - BLOCK_HASH_LENGTH - )); - } - - Ok(Self { block_hash }) + Ok(Self { + header: new_block.header.ok_or("No new block header provided")?.try_into()?, + coinbase_kernel: new_block + .coinbase_kernel + .ok_or("No coinbase kernel given")? + .try_into()?, + coinbase_output: new_block + .coinbase_output + .ok_or("No coinbase kernel given")? + .try_into()?, + kernel_excess_sigs: new_block + .kernel_excess_sigs + .iter() + .map(|bytes| PrivateKey::from_bytes(bytes)) + .collect::, _>>() + .map_err(|_| "Invalid excess signature scalar")?, + }) } } impl From for proto::NewBlock { fn from(new_block: NewBlock) -> Self { Self { - block_hash: new_block.block_hash, + header: Some(new_block.header.into()), + coinbase_kernel: Some(new_block.coinbase_kernel.into()), + coinbase_output: Some(new_block.coinbase_output.into()), + kernel_excess_sigs: new_block.kernel_excess_sigs.into_iter().map(|s| s.to_vec()).collect(), } } } diff --git a/base_layer/core/src/transactions/transaction/transaction.rs b/base_layer/core/src/transactions/transaction/transaction.rs index b51cd0b69b..3f497a1e16 100644 --- a/base_layer/core/src/transactions/transaction/transaction.rs +++ b/base_layer/core/src/transactions/transaction/transaction.rs @@ -172,12 +172,12 @@ impl Add for Transaction { impl Display for Transaction { fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { - fmt.write_str("-------------- Transaction --------------\n")?; - fmt.write_str("--- Offset ---\n")?; - fmt.write_str(&format!("{}\n", self.offset.to_hex()))?; - fmt.write_str("--- Script Offset ---\n")?; - fmt.write_str(&format!("{}\n", self.script_offset.to_hex()))?; - fmt.write_str("--- Body ---\n")?; - fmt.write_str(&format!("{}\n", self.body)) + writeln!(fmt, "-------------- Transaction --------------")?; + writeln!(fmt, "--- Offset ---")?; + writeln!(fmt, "{}", self.offset.to_hex())?; + writeln!(fmt, "--- Script Offset ---")?; + writeln!(fmt, "{}", self.script_offset.to_hex())?; + writeln!(fmt, "--- Body ---")?; + writeln!(fmt, "{}", self.body) } } diff --git a/base_layer/core/src/transactions/transaction/transaction_output.rs b/base_layer/core/src/transactions/transaction/transaction_output.rs index 12884b107f..b5661e5b91 100644 --- a/base_layer/core/src/transactions/transaction/transaction_output.rs +++ b/base_layer/core/src/transactions/transaction/transaction_output.rs @@ -28,8 +28,7 @@ use std::{ fmt::{Display, Formatter}, }; -use blake2::Digest; -use digest::FixedOutput; +use digest::{Digest, FixedOutput}; use rand::rngs::OsRng; use serde::{Deserialize, Serialize}; use tari_common_types::types::{ diff --git a/base_layer/core/src/transactions/transaction/unblinded_output.rs b/base_layer/core/src/transactions/transaction/unblinded_output.rs index 35fbd34ccd..8c2add33dc 100644 --- a/base_layer/core/src/transactions/transaction/unblinded_output.rs +++ b/base_layer/core/src/transactions/transaction/unblinded_output.rs @@ -219,8 +219,8 @@ impl UnblindedOutput { self.covenant.consensus_encode_exact_size() } - // Note: The Hashable trait is not used here due to the dependency on `CryptoFactories`, and `commitment` us not - // Note: added to the struct to ensure the atomic nature between `commitment`, `spending_key` and `value`. + // Note: The Hashable trait is not used here due to the dependency on `CryptoFactories`, and `commitment` is not + // Note: added to the struct to ensure consistency between `commitment`, `spending_key` and `value`. pub fn hash(&self, factories: &CryptoFactories) -> Vec { let commitment = factories.commitment.commit_value(&self.spending_key, self.value.into()); transaction::hash_output(&self.features, &commitment, &self.script, &self.covenant) diff --git a/base_layer/core/tests/base_node_rpc.rs b/base_layer/core/tests/base_node_rpc.rs index a5862156c8..7e175f4df9 100644 --- a/base_layer/core/tests/base_node_rpc.rs +++ b/base_layer/core/tests/base_node_rpc.rs @@ -390,9 +390,9 @@ async fn test_sync_utxos_by_block() { assert_eq!( vec![ (0, block0.header().hash(), 0), - (1, block1.header.hash(), 9), - (2, block2.header.hash(), 3), - (3, block3.header.hash(), 6) + (1, block1.header.hash(), 10), + (2, block2.header.hash(), 4), + (3, block3.header.hash(), 7) ], responses .iter() @@ -415,7 +415,7 @@ async fn test_sync_utxos_by_block() { let responses = convert_mpsc_to_stream(&mut streaming).collect::>().await; assert_eq!( - vec![(1, block1.header.hash(), 9), (2, block2.header.hash(), 5),], + vec![(1, block1.header.hash(), 10), (2, block2.header.hash(), 6),], responses .iter() .map(|r| { diff --git a/base_layer/core/tests/helpers/block_builders.rs b/base_layer/core/tests/helpers/block_builders.rs index 008dcf5140..2ecc785b3b 100644 --- a/base_layer/core/tests/helpers/block_builders.rs +++ b/base_layer/core/tests/helpers/block_builders.rs @@ -245,10 +245,20 @@ pub fn chain_block( let mut header = BlockHeader::from_previous(&prev_block.header); header.version = consensus.consensus_constants(header.height).blockchain_version(); let height = header.height; + let reward = consensus.get_block_reward_at(height); + let (coinbase_utxo, coinbase_kernel, _) = create_coinbase( + &Default::default(), + reward, + consensus.consensus_constants(height).coinbase_lock_height(), + ); NewBlockTemplate::from_block( - header.into_builder().with_transactions(transactions).build(), + header + .into_builder() + .with_coinbase_utxo(coinbase_utxo, coinbase_kernel) + .with_transactions(transactions) + .build(), 1.into(), - consensus.get_block_reward_at(height), + reward, ) } diff --git a/base_layer/core/tests/helpers/nodes.rs b/base_layer/core/tests/helpers/nodes.rs index e8c089eac0..bebd51aecc 100644 --- a/base_layer/core/tests/helpers/nodes.rs +++ b/base_layer/core/tests/helpers/nodes.rs @@ -209,7 +209,6 @@ impl BaseNodeBuilder { mempool, consensus_manager.clone(), self.base_node_service_config.unwrap_or_default(), - self.mempool_service_config.unwrap_or_default(), self.liveness_service_config.unwrap_or_default(), data_path, ) @@ -402,7 +401,6 @@ async fn setup_base_node_services( mempool: Mempool, consensus_manager: ConsensusManager, base_node_service_config: BaseNodeServiceConfig, - mempool_service_config: MempoolServiceConfig, liveness_service_config: LivenessConfig, data_path: &str, ) -> NodeInterfaces { @@ -427,11 +425,7 @@ async fn setup_base_node_services( consensus_manager, base_node_service_config, )) - .add_initializer(MempoolServiceInitializer::new( - mempool_service_config, - mempool.clone(), - subscription_factory, - )) + .add_initializer(MempoolServiceInitializer::new(mempool.clone(), subscription_factory)) .add_initializer(mock_state_machine.get_initializer()) .add_initializer(ChainMetadataServiceInitializer) .build() diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index fdf07d8253..fdf3de1496 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -44,7 +44,7 @@ use tari_core::{ state_machine_service::states::{ListeningInfo, StateInfo, StatusInfo}, }, consensus::{ConsensusConstantsBuilder, ConsensusManager, NetworkConsensus}, - mempool::{Mempool, MempoolConfig, MempoolServiceConfig, MempoolServiceError, TxStorageResponse}, + mempool::{Mempool, MempoolConfig, MempoolServiceConfig, TxStorageResponse}, proof_of_work::Difficulty, proto, transactions::{ @@ -120,45 +120,32 @@ async fn test_insert_and_process_published_block() { assert_eq!( mempool .has_tx_with_excess_sig(&orphan.body.kernels()[0].excess_sig) - .await - .unwrap(), + .await, TxStorageResponse::NotStored ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig).await, TxStorageResponse::UnconfirmedPool ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig).await, TxStorageResponse::NotStored ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig).await, TxStorageResponse::NotStored ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig).await, TxStorageResponse::NotStored ); - let snapshot_txs = mempool.snapshot().await.unwrap(); + let snapshot_txs = mempool.snapshot().await; assert_eq!(snapshot_txs.len(), 1); assert!(snapshot_txs.contains(&tx2)); - let stats = mempool.stats().await.unwrap(); + let stats = mempool.stats().await; assert_eq!(stats.total_txs, 1); assert_eq!(stats.unconfirmed_txs, 1); assert_eq!(stats.reorg_txs, 0); @@ -177,44 +164,31 @@ async fn test_insert_and_process_published_block() { assert_eq!( mempool .has_tx_with_excess_sig(&orphan.body.kernels()[0].excess_sig) - .await - .unwrap(), + .await, TxStorageResponse::NotStored ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx2.body.kernels()[0].excess_sig).await, TxStorageResponse::ReorgPool ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx3.body.kernels()[0].excess_sig).await, TxStorageResponse::NotStored ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx5.body.kernels()[0].excess_sig).await, TxStorageResponse::NotStored ); assert_eq!( - mempool - .has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig) - .await - .unwrap(), + mempool.has_tx_with_excess_sig(&tx6.body.kernels()[0].excess_sig).await, TxStorageResponse::NotStored ); - let snapshot_txs = mempool.snapshot().await.unwrap(); + let snapshot_txs = mempool.snapshot().await; assert_eq!(snapshot_txs.len(), 0); - let stats = mempool.stats().await.unwrap(); - assert_eq!(stats.total_txs, 0); + let stats = mempool.stats().await; + assert_eq!(stats.total_txs, 1); assert_eq!(stats.unconfirmed_txs, 0); assert_eq!(stats.reorg_txs, 1); assert_eq!(stats.total_weight, 0); @@ -317,7 +291,7 @@ async fn test_retrieve() { assert!(retrieved_txs.contains(&tx[6])); assert!(retrieved_txs.contains(&tx[2])); assert!(retrieved_txs.contains(&tx[3])); - let stats = mempool.stats().await.unwrap(); + let stats = mempool.stats().await; assert_eq!(stats.unconfirmed_txs, 7); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 0); @@ -334,7 +308,7 @@ async fn test_retrieve() { outputs.push(utxos); mempool.process_published_block(blocks[2].block()).await.unwrap(); // 2-blocks, 2 unconfirmed txs in mempool - let stats = mempool.stats().await.unwrap(); + let stats = mempool.stats().await; assert_eq!(stats.unconfirmed_txs, 2); // assert_eq!(stats.timelocked_txs, 0); assert_eq!(stats.reorg_txs, 5); @@ -353,7 +327,7 @@ async fn test_retrieve() { // Top 2 txs are tx[3] (fee/g = 50) and tx2[1] (fee/g = 40). tx2[0] (fee/g = 80) is still not matured. let weight = tx[3].calculate_weight(weighting) + tx2[1].calculate_weight(weighting); let retrieved_txs = mempool.retrieve(weight).await.unwrap(); - let stats = mempool.stats().await.unwrap(); + let stats = mempool.stats().await; assert_eq!(stats.unconfirmed_txs, 3); // assert_eq!(stats.timelocked_txs, 1); @@ -577,10 +551,7 @@ async fn test_zero_conf() { ); // Try to retrieve all transactions in the mempool (a couple of our transactions should be missing from retrieved) - let retrieved_txs = mempool - .retrieve(mempool.stats().await.unwrap().total_weight) - .await - .unwrap(); + let retrieved_txs = mempool.retrieve(mempool.stats().await.total_weight).await.unwrap(); assert_eq!(retrieved_txs.len(), 10); assert!(retrieved_txs.contains(&Arc::new(tx01.clone()))); assert!(!retrieved_txs.contains(&Arc::new(tx02.clone()))); // Missing @@ -629,10 +600,7 @@ async fn test_zero_conf() { ); // Try to retrieve all transactions in the mempool (all transactions should be retrieved) - let retrieved_txs = mempool - .retrieve(mempool.stats().await.unwrap().total_weight) - .await - .unwrap(); + let retrieved_txs = mempool.retrieve(mempool.stats().await.total_weight).await.unwrap(); assert_eq!(retrieved_txs.len(), 16); assert!(retrieved_txs.contains(&Arc::new(tx01.clone()))); assert!(retrieved_txs.contains(&Arc::new(tx02.clone()))); @@ -653,7 +621,7 @@ async fn test_zero_conf() { // Verify that a higher priority transaction is not retrieved due to its zero-conf dependency instead of the lowest // priority transaction - let weight = mempool.stats().await.unwrap().total_weight - 1; + let weight = mempool.stats().await.total_weight - 1; let retrieved_txs = mempool.retrieve(weight).await.unwrap(); assert_eq!(retrieved_txs.len(), 15); assert!(retrieved_txs.contains(&Arc::new(tx01))); @@ -704,7 +672,7 @@ async fn test_reorg() { for tx in &txns2 { mempool.insert(tx.clone()).await.unwrap(); } - let stats = mempool.stats().await.unwrap(); + let stats = mempool.stats().await; assert_eq!(stats.unconfirmed_txs, 3); let txns2 = txns2.iter().map(|t| t.deref().clone()).collect(); generate_block(&db, &mut blocks, txns2, &consensus_manager).unwrap(); @@ -732,7 +700,7 @@ async fn test_reorg() { .unwrap(); mempool.process_published_block(blocks[3].block()).await.unwrap(); - let stats = mempool.stats().await.unwrap(); + let stats = mempool.stats().await; assert_eq!(stats.unconfirmed_txs, 0); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 5); @@ -746,7 +714,7 @@ async fn test_reorg() { .process_reorg(vec![blocks[3].to_arc_block()], vec![reorg_block3.into()]) .await .unwrap(); - let stats = mempool.stats().await.unwrap(); + let stats = mempool.stats().await; assert_eq!(stats.unconfirmed_txs, 2); // assert_eq!(stats.timelocked_txs, 1); assert_eq!(stats.reorg_txs, 3); @@ -760,126 +728,6 @@ async fn test_reorg() { mempool.process_reorg(vec![], vec![reorg_block4.into()]).await.unwrap(); } -// TODO: This test returns 0 in the unconfirmed pool, so might not catch errors. It should be updated to return better -// data -#[allow(clippy::identity_op)] -#[tokio::test] -async fn request_response_get_stats() { - let factories = CryptoFactories::default(); - let temp_dir = tempdir().unwrap(); - let network = Network::LocalNet; - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_coinbase_lockheight(100) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); - let (block0, utxo) = create_genesis_block(&factories, &consensus_constants); - let consensus_manager = ConsensusManager::builder(network) - .add_consensus_constants(consensus_constants) - .with_block(block0) - .build(); - let (mut alice, bob, _consensus_manager) = create_network_with_2_base_nodes_with_config( - BaseNodeServiceConfig::default(), - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - temp_dir.path(), - ) - .await; - - // Create a tx spending the genesis output. Then create 2 orphan txs - let (tx1, _) = spend_utxos(txn_schema!(from: vec![utxo], to: vec![2 * T, 2 * T, 2 * T])); - let tx1 = Arc::new(tx1); - let (orphan1, _, _) = tx!(1*T, fee: 100*uT); - let orphan1 = Arc::new(orphan1); - let (orphan2, _, _) = tx!(2*T, fee: 200*uT); - let orphan2 = Arc::new(orphan2); - - bob.mempool.insert(tx1).await.unwrap(); - bob.mempool.insert(orphan1).await.unwrap(); - bob.mempool.insert(orphan2).await.unwrap(); - - // The coinbase tx cannot be spent until maturity, so txn1 will be in the timelocked pool. The other 2 txns are - // orphans. - let stats = bob.mempool.stats().await.unwrap(); - assert_eq!(stats.total_txs, 0); - assert_eq!(stats.unconfirmed_txs, 0); - assert_eq!(stats.reorg_txs, 0); - assert_eq!(stats.total_weight, 0); - - // Alice will request mempool stats from Bob, and thus should be identical - let received_stats = alice.outbound_mp_interface.get_stats().await.unwrap(); - assert_eq!(received_stats.total_txs, 0); - assert_eq!(received_stats.unconfirmed_txs, 0); - assert_eq!(received_stats.reorg_txs, 0); - assert_eq!(received_stats.total_weight, 0); -} - -#[tokio::test] -#[allow(clippy::identity_op)] -async fn request_response_get_tx_state_by_excess_sig() { - let factories = CryptoFactories::default(); - let temp_dir = tempdir().unwrap(); - let network = Network::LocalNet; - let consensus_constants = ConsensusConstantsBuilder::new(network) - .with_coinbase_lockheight(100) - .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) - .build(); - let (block0, utxo) = create_genesis_block(&factories, &consensus_constants); - let consensus_manager = ConsensusManager::builder(network) - .add_consensus_constants(consensus_constants) - .with_block(block0) - .build(); - let (mut alice_node, bob_node, carol_node, _consensus_manager) = create_network_with_3_base_nodes_with_config( - BaseNodeServiceConfig::default(), - MempoolServiceConfig::default(), - LivenessConfig::default(), - consensus_manager, - temp_dir.path().to_str().unwrap(), - ) - .await; - - let (tx, _) = spend_utxos(txn_schema!(from: vec![utxo.clone()], to: vec![2 * T, 2 * T, 2 * T])); - let (unpublished_tx, _) = spend_utxos(txn_schema!(from: vec![utxo], to: vec![3 * T])); - let (orphan_tx, _, _) = tx!(1*T, fee: 100*uT); - let tx = Arc::new(tx); - let orphan_tx = Arc::new(orphan_tx); - bob_node.mempool.insert(tx.clone()).await.unwrap(); - carol_node.mempool.insert(tx.clone()).await.unwrap(); - bob_node.mempool.insert(orphan_tx.clone()).await.unwrap(); - carol_node.mempool.insert(orphan_tx.clone()).await.unwrap(); - - // Check that the transactions are in the expected pools. - // Spending the coinbase utxo will be in the pending pool, because cb utxos have a maturity. - // The orphan tx will be in the orphan pool, while the unadded tx won't be found - let tx_excess_sig = tx.body.kernels()[0].excess_sig.clone(); - let unpublished_tx_excess_sig = unpublished_tx.body.kernels()[0].excess_sig.clone(); - let orphan_tx_excess_sig = orphan_tx.body.kernels()[0].excess_sig.clone(); - assert_eq!( - alice_node - .outbound_mp_interface - .get_tx_state_by_excess_sig(tx_excess_sig) - .await - .unwrap(), - TxStorageResponse::NotStored - ); - assert_eq!( - alice_node - .outbound_mp_interface - .get_tx_state_by_excess_sig(unpublished_tx_excess_sig) - .await - .unwrap(), - TxStorageResponse::NotStored - ); - assert_eq!( - alice_node - .outbound_mp_interface - .get_tx_state_by_excess_sig(orphan_tx_excess_sig) - .await - .unwrap(), - TxStorageResponse::NotStored - ); -} - static EMISSION: [u64; 2] = [10, 10]; #[tokio::test] #[allow(clippy::identity_op)] @@ -955,24 +803,20 @@ async fn receive_and_propagate_transaction() { .unwrap(); async_assert_eventually!( - bob_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await.unwrap(), + bob_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await, expect = TxStorageResponse::NotStored, max_attempts = 20, interval = Duration::from_millis(1000) ); async_assert_eventually!( - carol_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await.unwrap(), + carol_node.mempool.has_tx_with_excess_sig(&tx_excess_sig).await, expect = TxStorageResponse::NotStored, max_attempts = 10, interval = Duration::from_millis(1000) ); // Carol got sent the orphan tx directly, so it will be in her mempool async_assert_eventually!( - carol_node - .mempool - .has_tx_with_excess_sig(&orphan_excess_sig) - .await - .unwrap(), + carol_node.mempool.has_tx_with_excess_sig(&orphan_excess_sig).await, expect = TxStorageResponse::NotStored, max_attempts = 10, interval = Duration::from_millis(1000) @@ -980,11 +824,7 @@ async fn receive_and_propagate_transaction() { // It's difficult to test a negative here, but let's at least make sure that the orphan TX was not propagated // by the time we check it async_assert_eventually!( - bob_node - .mempool - .has_tx_with_excess_sig(&orphan_excess_sig) - .await - .unwrap(), + bob_node.mempool.has_tx_with_excess_sig(&orphan_excess_sig).await, expect = TxStorageResponse::NotStored, ); } @@ -1216,32 +1056,6 @@ async fn consensus_validation_unique_id() { assert!(matches!(response, TxStorageResponse::NotStoredConsensus)); } -#[tokio::test] -async fn service_request_timeout() { - let network = Network::LocalNet; - let consensus_manager = ConsensusManager::builder(network).build(); - let mempool_service_config = MempoolServiceConfig { - request_timeout: Duration::from_millis(1), - ..Default::default() - }; - let temp_dir = tempdir().unwrap(); - let (mut alice_node, bob_node, _consensus_manager) = create_network_with_2_base_nodes_with_config( - BaseNodeServiceConfig::default(), - mempool_service_config, - LivenessConfig::default(), - consensus_manager, - temp_dir.path().to_str().unwrap(), - ) - .await; - - bob_node.shutdown().await; - - match alice_node.outbound_mp_interface.get_stats().await { - Err(MempoolServiceError::RequestTimedOut) => {}, - _ => panic!(), - } -} - #[tokio::test] #[allow(clippy::identity_op)] async fn block_event_and_reorg_event_handling() { @@ -1320,7 +1134,7 @@ async fn block_event_and_reorg_event_handling() { // Add Block1 - tx1 will be moved to the ReorgPool. assert!(bob.local_nci.submit_block(block1.clone(),).await.is_ok()); async_assert_eventually!( - alice.mempool.has_tx_with_excess_sig(&tx1_excess_sig).await.unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx1_excess_sig).await, expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) @@ -1350,27 +1164,27 @@ async fn block_event_and_reorg_event_handling() { assert!(bob.local_nci.submit_block(block2a.clone(),).await.is_ok()); async_assert_eventually!( - bob.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await.unwrap(), + bob.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await, expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) ); async_assert_eventually!( - alice.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await.unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx2a_excess_sig).await, expect = TxStorageResponse::ReorgPool, max_attempts = 20, interval = Duration::from_millis(1000) ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(&tx3a_excess_sig).await.unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx3a_excess_sig).await, TxStorageResponse::ReorgPool ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(&tx2b_excess_sig).await.unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx2b_excess_sig).await, TxStorageResponse::ReorgPool ); assert_eq!( - alice.mempool.has_tx_with_excess_sig(&tx3b_excess_sig).await.unwrap(), + alice.mempool.has_tx_with_excess_sig(&tx3b_excess_sig).await, TxStorageResponse::ReorgPool ); } diff --git a/base_layer/core/tests/node_comms_interface.rs b/base_layer/core/tests/node_comms_interface.rs index 0c74711e15..cec81080b5 100644 --- a/base_layer/core/tests/node_comms_interface.rs +++ b/base_layer/core/tests/node_comms_interface.rs @@ -23,6 +23,7 @@ use helpers::block_builders::append_block; use tari_common::configuration::Network; use tari_common_types::types::PublicKey; +use tari_comms::test_utils::mocks::create_connectivity_mock; use tari_core::{ base_node::comms_interface::{ InboundNodeCommsHandlers, @@ -77,12 +78,15 @@ async fn inbound_get_metadata() { let (request_sender, _) = reply_channel::unbounded(); let (block_sender, _) = mpsc::unbounded_channel(); let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender.clone()); + + let (connectivity, _) = create_connectivity_mock(); let inbound_nch = InboundNodeCommsHandlers::new( block_event_sender, store.clone().into(), mempool, consensus_manager, outbound_nci, + connectivity, ); let block = store.fetch_block(0).unwrap().block().clone(); @@ -108,12 +112,14 @@ async fn inbound_fetch_kernel_by_excess_sig() { let (request_sender, _) = reply_channel::unbounded(); let (block_sender, _) = mpsc::unbounded_channel(); let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender.clone()); + let (connectivity, _) = create_connectivity_mock(); let inbound_nch = InboundNodeCommsHandlers::new( block_event_sender, store.clone().into(), mempool, consensus_manager, outbound_nci, + connectivity, ); let block = store.fetch_block(0).unwrap().block().clone(); let sig = block.body.kernels()[0].excess_sig.clone(); @@ -139,12 +145,14 @@ async fn inbound_fetch_headers() { let (request_sender, _) = reply_channel::unbounded(); let (block_sender, _) = mpsc::unbounded_channel(); let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender); + let (connectivity, _) = create_connectivity_mock(); let inbound_nch = InboundNodeCommsHandlers::new( block_event_sender, store.clone().into(), mempool, consensus_manager, outbound_nci, + connectivity, ); let header = store.fetch_block(0).unwrap().header().clone(); @@ -170,12 +178,14 @@ async fn inbound_fetch_utxos() { let (request_sender, _) = reply_channel::unbounded(); let (block_sender, _) = mpsc::unbounded_channel(); let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender); + let (connectivity, _) = create_connectivity_mock(); let inbound_nch = InboundNodeCommsHandlers::new( block_event_sender, store.clone().into(), mempool, consensus_manager, outbound_nci, + connectivity, ); let block = store.fetch_block(0).unwrap().block().clone(); let utxo_1 = block.body.outputs()[0].clone(); @@ -213,12 +223,14 @@ async fn inbound_fetch_txos() { let (request_sender, _) = reply_channel::unbounded(); let (block_sender, _) = mpsc::unbounded_channel(); let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender); + let (connectivity, _) = create_connectivity_mock(); let inbound_nch = InboundNodeCommsHandlers::new( block_event_sender, store.clone().into(), mempool, consensus_manager, outbound_nci, + connectivity, ); let (utxo, _, _) = create_utxo( @@ -285,12 +297,14 @@ async fn inbound_fetch_blocks() { let (request_sender, _) = reply_channel::unbounded(); let (block_sender, _) = mpsc::unbounded_channel(); let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender); + let (connectivity, _) = create_connectivity_mock(); let inbound_nch = InboundNodeCommsHandlers::new( block_event_sender, store.clone().into(), mempool, consensus_manager, outbound_nci, + connectivity, ); let block = store.fetch_block(0).unwrap().block().clone(); @@ -333,12 +347,14 @@ async fn inbound_fetch_blocks_before_horizon_height() { let (request_sender, _) = reply_channel::unbounded(); let (block_sender, _) = mpsc::unbounded_channel(); let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender); + let (connectivity, _) = create_connectivity_mock(); let inbound_nch = InboundNodeCommsHandlers::new( block_event_sender, store.clone().into(), mempool, consensus_manager.clone(), outbound_nci, + connectivity, ); let script = script!(Nop); let (utxo, key, offset) = create_utxo( diff --git a/base_layer/core/tests/node_service.rs b/base_layer/core/tests/node_service.rs index 7766dafd7c..bd23b961c1 100644 --- a/base_layer/core/tests/node_service.rs +++ b/base_layer/core/tests/node_service.rs @@ -29,7 +29,7 @@ use helpers::{ }; use randomx_rs::RandomXFlag; use tari_common::configuration::Network; -use tari_comms::protocol::messaging::MessagingEvent; +use tari_comms::{connectivity::ConnectivityEvent, protocol::messaging::MessagingEvent}; use tari_core::{ base_node::{ comms_interface::{BlockEvent, CommsInterfaceError}, @@ -198,7 +198,7 @@ async fn propagate_and_forward_invalid_block_hash() { let consensus_constants = ConsensusConstantsBuilder::new(network) .with_emission_amounts(100_000_000.into(), &EMISSION, 100.into()) .build(); - let (block0, _) = create_genesis_block(&factories, &consensus_constants); + let (block0, genesis_coinbase) = create_genesis_block(&factories, &consensus_constants); let rules = ConsensusManager::builder(network) .add_consensus_constants(consensus_constants) .with_block(block0.clone()) @@ -241,11 +241,14 @@ async fn propagate_and_forward_invalid_block_hash() { randomx_vm_flags: RandomXFlag::FLAG_DEFAULT, }); - let block1 = append_block(&alice_node.blockchain_db, &block0, vec![], &rules, 1.into()).unwrap(); + // Add a transaction that Bob does not have to force a request + let (txs, _) = schema_to_transaction(&[txn_schema!(from: vec![genesis_coinbase], to: vec![5 * T], fee: 5.into())]); + let txs = txs.into_iter().map(|tx| (*tx).clone()).collect(); + let block1 = append_block(&alice_node.blockchain_db, &block0, txs, &rules, 1.into()).unwrap(); let block1 = { // Create unknown block hash let mut block = block1.block().clone(); - block.header.height = 0; + block.header.version += 1; let mut accum_data = block1.accumulated_data().clone(); accum_data.hash = block.hash(); ChainBlock::try_construct(block.into(), accum_data).unwrap() @@ -266,16 +269,17 @@ async fn propagate_and_forward_invalid_block_hash() { .await .unwrap(); unpack_enum!(MessagingEvent::MessageReceived(_a, _b) = &*msg_event); - // Sent the request for the block to Alice - // Bob received a response from Alice + + // Bob asks Alice for missing transaction let msg_event = event_stream_next(&mut bob_message_events, Duration::from_secs(10)) .await .unwrap(); unpack_enum!(MessagingEvent::MessageReceived(node_id, _a) = &*msg_event); - assert_eq!(&*node_id, alice_node.node_identity.node_id()); + assert_eq!(node_id, alice_node.node_identity.node_id()); + // Checking a negative: Bob should not have propagated this hash to Carol. If Bob does, this assertion will be // flaky. - let msg_event = event_stream_next(&mut carol_message_events, Duration::from_millis(500)).await; + let msg_event = event_stream_next(&mut carol_message_events, Duration::from_secs(1)).await; assert!(msg_event.is_none()); alice_node.shutdown().await; @@ -340,6 +344,12 @@ async fn propagate_and_forward_invalid_block() { .start(temp_dir.path().join("alice").to_str().unwrap()) .await; + alice_node + .comms + .connectivity() + .dial_peer(bob_node.node_identity.node_id().clone()) + .await + .unwrap(); wait_until_online(&[&alice_node, &bob_node, &carol_node, &dan_node]).await; alice_node.mock_base_node_state_machine.publish_status(StatusInfo { @@ -372,33 +382,34 @@ async fn propagate_and_forward_invalid_block() { let block1 = append_block(&alice_node.blockchain_db, &block0, vec![], &rules, 1.into()).unwrap(); let block1_hash = block1.hash(); - let mut bob_block_event_stream = bob_node.local_nci.get_block_event_stream(); - let mut carol_block_event_stream = carol_node.local_nci.get_block_event_stream(); - let mut dan_block_event_stream = dan_node.local_nci.get_block_event_stream(); - + let mut bob_connectivity_events = bob_node.comms.connectivity().get_event_subscription(); assert!(alice_node .outbound_nci .propagate_block(NewBlock::from(block1.block()), vec![]) .await .is_ok()); - let bob_block_event_fut = event_stream_next(&mut bob_block_event_stream, Duration::from_millis(20000)); - let carol_block_event_fut = event_stream_next(&mut carol_block_event_stream, Duration::from_millis(20000)); - let dan_block_event_fut = event_stream_next(&mut dan_block_event_stream, Duration::from_millis(5000)); - let (bob_block_event, carol_block_event, dan_block_event) = - tokio::join!(bob_block_event_fut, carol_block_event_fut, dan_block_event_fut); - - if let BlockEvent::AddBlockFailed(received_block) = &*bob_block_event.unwrap() { - assert_eq!(&received_block.hash(), block1_hash); - } else { - panic!("Bob's node should have detected an invalid block"); - } - if let BlockEvent::AddBlockFailed(received_block) = &*carol_block_event.unwrap() { - assert_eq!(&received_block.hash(), block1_hash); - } else { - panic!("Carol's node should have detected an invalid block"); + #[allow(unused_assignments)] + let mut has_banned = false; + loop { + let event = event_stream_next(&mut bob_connectivity_events, Duration::from_secs(10)) + .await + .unwrap(); + #[allow(clippy::single_match)] + match event { + ConnectivityEvent::PeerBanned(node_id) => { + assert_eq!(node_id, *alice_node.node_identity.node_id()); + has_banned = true; + break; + }, + _ => {}, + } } - assert!(dan_block_event.is_none()); + assert!(has_banned); + + assert!(!bob_node.blockchain_db.block_exists(block1_hash.clone()).unwrap()); + assert!(!carol_node.blockchain_db.block_exists(block1_hash.clone()).unwrap()); + assert!(!dan_node.blockchain_db.block_exists(block1_hash.clone()).unwrap()); alice_node.shutdown().await; bob_node.shutdown().await; @@ -432,7 +443,7 @@ async fn service_request_timeout() { unpack_enum!( CommsInterfaceError::RequestTimedOut = alice_node .outbound_nci - .request_blocks_with_hashes_from_peer(vec![], Some(bob_node_id)) + .request_blocks_by_hashes_from_peer(vec![], Some(bob_node_id)) .await .unwrap_err() ); diff --git a/comms/dht/src/connectivity/mod.rs b/comms/dht/src/connectivity/mod.rs index c396e6155b..a3d8042521 100644 --- a/comms/dht/src/connectivity/mod.rs +++ b/comms/dht/src/connectivity/mod.rs @@ -277,7 +277,7 @@ impl DhtConnectivity { async fn refresh_peer_pools(&mut self) -> Result<(), DhtConnectivityError> { info!( target: LOG_TARGET, - "Reinitializing neighbour pool. Draining neighbour list (len={})", + "Reinitializing neighbour pool. (size={})", self.neighbours.len(), ); @@ -311,6 +311,15 @@ impl DhtConnectivity { .fetch_neighbouring_peers(self.config.num_neighbouring_nodes, &[]) .await?; + if new_neighbours.is_empty() { + info!( + target: LOG_TARGET, + "Unable to refresh neighbouring peer pool because there are insufficient known/online peers", + ); + self.redial_neighbours_as_required().await?; + return Ok(()); + } + let (intersection, difference) = self .neighbours .iter() @@ -465,13 +474,15 @@ impl DhtConnectivity { conn.peer_node_id().short_str() ); - if let Some(node_id) = self.insert_neighbour(conn.peer_node_id().clone()) { + let peer_to_insert = conn.peer_node_id().clone(); + self.insert_connection_handle(conn); + if let Some(node_id) = self.insert_neighbour(peer_to_insert.clone()) { // If we kicked a neighbour out of our neighbour pool but the random pool is not full. // Add the neighbour to the random pool, otherwise remove the handle from the connection pool if self.random_pool.len() < self.config.num_random_nodes { debug!( target: LOG_TARGET, - "Moving peer '{}' from neighbouring pool to random pool", conn + "Moving peer '{}' from neighbouring pool to random pool", peer_to_insert ); self.random_pool.push(node_id); } else {