Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge dev into tari-script, update peer seeds #2974

Merged
merged 10 commits into from
May 28, 2021
7 changes: 2 additions & 5 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,9 @@ report
*.mdb
/data/
*.sqlite3
base_layer/wallet_ffi/build.config
/base_layer/wallet_ffi/build.config
/base_layer/wallet_ffi/logs/
base_layer/wallet_ffi/.cargo/config
/config
/stibbons
/wallet
/base_layer/wallet_ffi/.cargo/config
keys.json
node_modules
/integration_tests/temp
Expand Down
62 changes: 60 additions & 2 deletions applications/tari_app_utilities/src/initialization.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
use crate::{consts, utilities::ExitCodes};
use config::Config;
use std::path::PathBuf;
use structopt::StructOpt;
use tari_common::{configuration::bootstrap::ApplicationType, ConfigBootstrap, GlobalConfig};
use tari_common::{configuration::bootstrap::ApplicationType, ConfigBootstrap, DatabaseType, GlobalConfig};

pub const LOG_TARGET: &str = "tari::application";

Expand All @@ -23,7 +24,64 @@ pub fn init_configuration(
log::info!(target: LOG_TARGET, "{} ({})", application_type, consts::APP_VERSION);

// Populate the configuration struct
let global_config =
let mut global_config =
GlobalConfig::convert_from(cfg.clone()).map_err(|err| ExitCodes::ConfigError(err.to_string()))?;
check_file_paths(&mut global_config, &bootstrap);
Ok((bootstrap, global_config, cfg))
}

fn check_file_paths(config: &mut GlobalConfig, bootstrap: &ConfigBootstrap) {
let prepend = bootstrap.base_path.clone();
if !config.data_dir.is_absolute() {
config.data_dir = concatenate_paths_normalized(prepend.clone(), config.data_dir.clone());
if let DatabaseType::LMDB(_) = config.db_type {
config.db_type = DatabaseType::LMDB(config.data_dir.join("db"));
}
}
if !config.peer_db_path.is_absolute() {
config.peer_db_path = concatenate_paths_normalized(prepend.clone(), config.peer_db_path.clone());
}
if !config.base_node_identity_file.is_absolute() {
config.base_node_identity_file =
concatenate_paths_normalized(prepend.clone(), config.base_node_identity_file.clone());
}
if !config.base_node_tor_identity_file.is_absolute() {
config.base_node_tor_identity_file =
concatenate_paths_normalized(prepend.clone(), config.base_node_tor_identity_file.clone());
}
if !config.console_wallet_db_file.is_absolute() {
config.console_wallet_db_file =
concatenate_paths_normalized(prepend.clone(), config.console_wallet_db_file.clone());
}
if !config.console_wallet_peer_db_path.is_absolute() {
config.console_wallet_peer_db_path =
concatenate_paths_normalized(prepend.clone(), config.console_wallet_peer_db_path.clone());
}
if !config.console_wallet_identity_file.is_absolute() {
config.console_wallet_identity_file =
concatenate_paths_normalized(prepend.clone(), config.console_wallet_identity_file.clone());
}
if !config.console_wallet_tor_identity_file.is_absolute() {
config.console_wallet_tor_identity_file =
concatenate_paths_normalized(prepend.clone(), config.console_wallet_tor_identity_file.clone());
}
if !config.wallet_db_file.is_absolute() {
config.wallet_db_file = concatenate_paths_normalized(prepend.clone(), config.wallet_db_file.clone());
}
if !config.wallet_peer_db_path.is_absolute() {
config.wallet_peer_db_path = concatenate_paths_normalized(prepend.clone(), config.wallet_peer_db_path.clone());
}
if let Some(file_path) = config.console_wallet_notify_file.clone() {
if file_path.is_absolute() {
config.console_wallet_notify_file = Some(concatenate_paths_normalized(prepend, file_path));
}
}
}

fn concatenate_paths_normalized(prepend: PathBuf, extension_path: PathBuf) -> PathBuf {
let mut result = prepend;
for component in extension_path.components() {
result.push(component);
}
result
}
4 changes: 2 additions & 2 deletions applications/tari_base_node/src/command_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,16 +141,16 @@ impl CommandHandler {
),
);

let banned_peers = fetch_banned_peers(&peer_manager).await.unwrap();
let conns = connectivity.get_active_connections().await.unwrap();
status_line.add_field("Connections", conns.len());
let banned_peers = fetch_banned_peers(&peer_manager).await.unwrap();
status_line.add_field("Banned", banned_peers.len());

let num_messages = metrics
.get_total_message_count_in_timespan(Duration::from_secs(60))
.await
.unwrap();
status_line.add_field("Messages (last 60s)", num_messages);
status_line.add_field("Banned", banned_peers.len());

let num_active_rpc_sessions = rpc_server.get_num_active_sessions().await.unwrap();
status_line.add_field(
Expand Down
4 changes: 2 additions & 2 deletions applications/tari_base_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,10 @@ const LOG_TARGET: &str = "base_node::app";
/// Application entry point
fn main() {
if let Err(exit_code) = main_inner() {
eprintln!("{}", exit_code);
eprintln!("{:?}", exit_code);
error!(
target: LOG_TARGET,
"Exiting with code ({}): {}",
"Exiting with code ({}): {:?}",
exit_code.as_i32(),
exit_code
);
Expand Down
4 changes: 2 additions & 2 deletions applications/tari_console_wallet/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,10 @@ fn main() {
match main_inner() {
Ok(_) => process::exit(0),
Err(exit_code) => {
eprintln!("{}", exit_code);
eprintln!("{:?}", exit_code);
error!(
target: LOG_TARGET,
"Exiting with code ({}): {}",
"Exiting with code ({}): {:?}",
exit_code.as_i32(),
exit_code
);
Expand Down
16 changes: 8 additions & 8 deletions base_layer/core/src/chain_storage/blockchain_backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,25 +22,25 @@ use croaring::Bitmap;
use tari_common_types::chain_metadata::ChainMetadata;
use tari_mmr::Hash;

/// Identify behaviour for Blockchain database back ends. Implementations must support `Send` and `Sync` so that
/// Identify behaviour for Blockchain database backends. Implementations must support `Send` and `Sync` so that
/// `BlockchainDatabase` can be thread-safe. The backend *must* also execute transactions atomically; i.e., every
/// operation within it must succeed, or they all fail. Failure to support this contract could lead to
/// synchronisation issues in your database backend.
///
/// Data is passed to and from the backend via the [DbKey], [DbValue], and [DbValueKey] enums. This strategy allows
/// us to keep the reading and writing API extremely simple. Extending the types of data that the back ends can handle
/// will entail adding to those enums, and the back ends, while this trait can remain unchanged.
/// us to keep the reading and writing API extremely simple. Extending the types of data that the backends can handle
/// will entail adding to those enums, and the backends, while this trait can remain unchanged.
#[allow(clippy::ptr_arg)]
pub trait BlockchainBackend: Send + Sync {
/// Commit the transaction given to the backend. If there is an error, the transaction must be rolled back, and
/// the error condition returned. On success, every operation in the transaction will have been committed, and
/// the function will return `Ok(())`.
fn write(&mut self, tx: DbTransaction) -> Result<(), ChainStorageError>;
/// Fetch a value from the back end corresponding to the given key. If the value is not found, `get` must return
/// `Ok(None)`. It should only error if there is an access or integrity issue with the underlying back end.
/// Fetch a value from the backend corresponding to the given key. If the value is not found, `get` must return
/// `Ok(None)`. It should only error if there is an access or integrity issue with the underlying backend.
fn fetch(&self, key: &DbKey) -> Result<Option<DbValue>, ChainStorageError>;
/// Checks to see whether the given key exists in the back end. This function should only fail if there is an
/// access or integrity issue with the back end.
/// Checks to see whether the given key exists in the backend. This function should only fail if there is an
/// access or integrity issue with the backend.
fn contains(&self, key: &DbKey) -> Result<bool, ChainStorageError>;

/// Fetches data that is calculated and accumulated for blocks that have been
Expand Down Expand Up @@ -132,7 +132,7 @@ pub trait BlockchainBackend: Send + Sync {
/// Returns the kernel count
fn kernel_count(&self) -> Result<usize, ChainStorageError>;

/// Fetches an current tip orphan by hash or returns None if the prohan is not found or is not a tip of any
/// Fetches an current tip orphan by hash or returns None if the orphan is not found or is not a tip of any
/// alternate chain
fn fetch_orphan_chain_tip_by_hash(&self, hash: &HashOutput) -> Result<Option<ChainHeader>, ChainStorageError>;
/// Fetch all orphans that have `hash` as a previous hash
Expand Down
17 changes: 14 additions & 3 deletions base_layer/core/src/chain_storage/blockchain_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1243,6 +1243,8 @@ fn check_for_valid_height<T: BlockchainBackend>(db: &T, height: u64) -> Result<(
Ok((tip_height, height < pruned_height))
}

/// Removes blocks from the db from current tip to specified height.
/// Returns the blocks removed, ordered from tip to height.
fn rewind_to_height<T: BlockchainBackend>(
db: &mut T,
mut height: u64,
Expand Down Expand Up @@ -1487,7 +1489,6 @@ fn handle_possible_reorg<T: BlockchainBackend>(

let num_added_blocks = reorg_chain.len();
let removed_blocks = reorganize_chain(db, block_validator, fork_height, &reorg_chain)?;

let num_removed_blocks = removed_blocks.len();

// reorg is required when any blocks are removed or more than one are added
Expand Down Expand Up @@ -1523,7 +1524,8 @@ fn handle_possible_reorg<T: BlockchainBackend>(
}
}

// Reorganize the main chain with the provided fork chain, starting at the specified height.
/// Reorganize the main chain with the provided fork chain, starting at the specified height.
/// Returns the blocks that were removed (if any), ordered from tip to fork (ie. height desc).
fn reorganize_chain<T: BlockchainBackend>(
backend: &mut T,
block_validator: &dyn PostOrphanBodyValidation<T>,
Expand Down Expand Up @@ -1577,6 +1579,15 @@ fn reorganize_chain<T: BlockchainBackend>(
}
}

if let Some(block) = removed_blocks.first() {
// insert the new orphan chain tip
let mut txn = DbTransaction::new();
let hash = block.hash().clone();
debug!(target: LOG_TARGET, "Inserting new orphan chain tip: {}", hash.to_hex());
txn.insert_orphan_chain_tip(hash);
backend.write(txn)?;
}

Ok(removed_blocks)
}

Expand Down Expand Up @@ -1605,7 +1616,7 @@ fn restore_reorged_chain<T: BlockchainBackend>(
Ok(())
}

// Insert the provided block into the orphan pool and returns any new tips that were created
/// Insert the provided block into the orphan pool and returns any new tips that were created.
fn insert_orphan_and_find_new_tips<T: BlockchainBackend>(
db: &mut T,
block: Arc<Block>,
Expand Down
8 changes: 4 additions & 4 deletions base_layer/core/src/mempool/sync_protocol/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,18 +44,18 @@
//! +-------+ +-----+
//! | |
//! | Txn Inventory |
//! |------------------------------->|
//! |------------------------------->|
//! | |
//! | TransactionItem(tx_b1) |
//! |<-------------------------------|
//! |<-------------------------------|
//! | ...streaming... |
//! | TransactionItem(empty) |
//! |<-------------------------------|
//! | Inventory missing txn indexes |
//! |<-------------------------------|
//! | |
//! | TransactionItem(tx_a1) |
//! |------------------------------->|
//! |------------------------------->|
//! | ...streaming... |
//! | TransactionItem(empty) |
//! |------------------------------->|
Expand Down Expand Up @@ -146,7 +146,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + Sync + 'static
if (*status_watch.borrow()).bootstrapped {
break;
}
debug!(
trace!(
target: LOG_TARGET,
"Mempool sync still on hold, waiting for bootstrap to finish",
);
Expand Down
Loading