Skip to content

Commit

Permalink
combione reset pow and add block
Browse files Browse the repository at this point in the history
  • Loading branch information
SWvheerden committed Nov 25, 2022
1 parent 0efd77f commit 740166e
Show file tree
Hide file tree
Showing 2 changed files with 81 additions and 255 deletions.
250 changes: 81 additions & 169 deletions base_layer/core/src/chain_storage/blockchain_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1120,9 +1120,11 @@ where B: BlockchainBackend
let mut db = self.db_write_access()?;
swap_to_highest_pow_chain(
&mut *db,
&self.config,
&*self.validators.block,
self.consensus_manager.chain_strength_comparer(),
)
)?;
Ok(())
}

pub fn fetch_horizon_data(&self) -> Result<HorizonData, ChainStorageError> {
Expand Down Expand Up @@ -1864,151 +1866,9 @@ fn handle_possible_reorg<T: BlockchainBackend>(
new_block: Arc<Block>,
) -> Result<BlockAddResult, ChainStorageError> {
let db_height = db.fetch_chain_metadata()?.height_of_longest_chain();
let new_block_hash = new_block.hash();

let new_tips = insert_orphan_and_find_new_tips(db, new_block.clone(), header_validator, difficulty_calculator)?;
debug!(
target: LOG_TARGET,
"Added candidate block #{} ({}) to the orphan database. Best height is {}. New tips found: {} ",
new_block.header.height,
new_block_hash.to_hex(),
db_height,
new_tips.len()
);

if new_tips.is_empty() {
debug!(
target: LOG_TARGET,
"No reorg required, could not construct complete chain using block #{} ({}).",
new_block.header.height,
new_block_hash.to_hex()
);
return Ok(BlockAddResult::OrphanBlock);
}

// Check the accumulated difficulty of the best fork chain compared to the main chain.
let fork_header = find_strongest_orphan_tip(new_tips, chain_strength_comparer).ok_or_else(|| {
// This should never happen because a block is always added to the orphan pool before
// checking, but just in case
warn!(
target: LOG_TARGET,
"Unable to find strongest orphan tip when adding block `{}`. This should never happen.",
new_block_hash.to_hex()
);
ChainStorageError::InvalidOperation("No chain tips found in orphan pool".to_string())
})?;

let tip_header = db.fetch_tip_header()?;
if fork_header.hash() == &new_block_hash {
debug!(
target: LOG_TARGET,
"Comparing candidate block #{} (accum_diff:{}, hash:{}) to main chain #{} (accum_diff: {}, hash: ({})).",
new_block.header.height,
fork_header.accumulated_data().total_accumulated_difficulty,
fork_header.accumulated_data().hash.to_hex(),
tip_header.header().height,
tip_header.accumulated_data().total_accumulated_difficulty,
tip_header.accumulated_data().hash.to_hex()
);
} else {
debug!(
target: LOG_TARGET,
"Comparing fork (accum_diff:{}, hash:{}) with block #{} ({}) to main chain #{} (accum_diff: {}, hash: \
({})).",
fork_header.accumulated_data().total_accumulated_difficulty,
fork_header.accumulated_data().hash.to_hex(),
new_block.header.height,
new_block_hash.to_hex(),
tip_header.header().height,
tip_header.accumulated_data().total_accumulated_difficulty,
tip_header.accumulated_data().hash.to_hex()
);
}

match chain_strength_comparer.compare(&fork_header, &tip_header) {
Ordering::Greater => {
debug!(
target: LOG_TARGET,
"Fork chain (accum_diff:{}, hash:{}) is stronger than the current tip (#{} ({})).",
fork_header.accumulated_data().total_accumulated_difficulty,
fork_header.accumulated_data().hash.to_hex(),
tip_header.height(),
tip_header.hash().to_hex()
);
},
Ordering::Less | Ordering::Equal => {
debug!(
target: LOG_TARGET,
"Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker difficulty.",
fork_header.accumulated_data().total_accumulated_difficulty,
fork_header.accumulated_data().hash.to_hex(),
new_block.header.height,
new_block_hash.to_hex(),
);
debug!(
target: LOG_TARGET,
"Orphan block received: #{} ", new_block.header.height
);
return Ok(BlockAddResult::OrphanBlock);
},
}
insert_orphan_and_find_new_tips(db, new_block.clone(), header_validator, difficulty_calculator)?;

// TODO: We already have the first link in this chain, can be optimized to exclude it
let reorg_chain = get_orphan_link_main_chain(db, fork_header.hash())?;

let fork_hash = reorg_chain
.front()
.expect("The new orphan block should be in the queue")
.header()
.prev_hash;

let num_added_blocks = reorg_chain.len();
let removed_blocks = reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?;
let num_removed_blocks = removed_blocks.len();

// reorg is required when any blocks are removed or more than one are added
// see https://github.com/tari-project/tari/issues/2101
if num_removed_blocks > 0 || num_added_blocks > 1 {
if config.track_reorgs {
let mut txn = DbTransaction::new();
txn.insert_reorg(Reorg::from_reorged_blocks(&reorg_chain, &removed_blocks));
if let Err(e) = db.write(txn) {
error!(target: LOG_TARGET, "Failed to track reorg: {}", e);
}
}

log!(
target: LOG_TARGET,
if num_removed_blocks > 1 {
Level::Warn
} else {
Level::Info
}, // We want a warning if the number of removed blocks is at least 2.
"Chain reorg required from {} to {} (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{}). Number of \
blocks to remove: {}, to add: {}.",
tip_header.header().height,
fork_header.header().height,
tip_header.accumulated_data().total_accumulated_difficulty,
tip_header.accumulated_data().hash.to_hex(),
fork_header.accumulated_data().total_accumulated_difficulty,
fork_header.accumulated_data().hash.to_hex(),
num_removed_blocks,
num_added_blocks,
);
Ok(BlockAddResult::ChainReorg {
removed: removed_blocks,
added: reorg_chain.into(),
})
} else {
trace!(
target: LOG_TARGET,
"No reorg required. Number of blocks to remove: {}, to add: {}.",
num_removed_blocks,
num_added_blocks,
);
// NOTE: panic is not possible because get_orphan_link_main_chain cannot return an empty Vec (reorg_chain)
Ok(BlockAddResult::Ok(reorg_chain.front().unwrap().clone()))
}
swap_to_highest_pow_chain(db, config, block_validator, chain_strength_comparer)
}

/// Reorganize the main chain with the provided fork chain, starting at the specified height.
Expand Down Expand Up @@ -2072,19 +1932,20 @@ fn reorganize_chain<T: BlockchainBackend>(

fn swap_to_highest_pow_chain<T: BlockchainBackend>(
db: &mut T,
config: &BlockchainDatabaseConfig,
block_validator: &dyn PostOrphanBodyValidation<T>,
chain_strength_comparer: &dyn ChainStrengthComparer,
) -> Result<(), ChainStorageError> {
) -> Result<BlockAddResult, ChainStorageError> {
let metadata = db.fetch_chain_metadata()?;
// lets clear out all remaining headers that done have a matching block
// lets clear out all remaining headers that dont have a matching block
// rewind to height will first delete the headers, then try delete from blocks, if we call this to the current
// height it will only trim the extra headers with no blocks
rewind_to_height(db, metadata.height_of_longest_chain())?;
let all_orphan_tips = db.fetch_all_orphan_chain_tips()?;
if all_orphan_tips.is_empty() {
// we have no orphan chain tips, we have trimmed remaining headers, we are on the best tip we have, so lets
// return ok
return Ok(());
return Ok(BlockAddResult::OrphanBlock);
}
// Check the accumulated difficulty of the best fork chain compared to the main chain.
let best_fork_header = find_strongest_orphan_tip(all_orphan_tips, chain_strength_comparer).ok_or_else(|| {
Expand All @@ -2109,7 +1970,15 @@ fn swap_to_highest_pow_chain<T: BlockchainBackend>(
);
},
Ordering::Less | Ordering::Equal => {
return Ok(());
debug!(
target: LOG_TARGET,
"Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker difficulty.",
best_fork_header.accumulated_data().total_accumulated_difficulty,
best_fork_header.accumulated_data().hash.to_hex(),
tip_header.header().height,
tip_header.hash().to_hex(),
);
return Ok(BlockAddResult::OrphanBlock);
},
}

Expand All @@ -2119,8 +1988,54 @@ fn swap_to_highest_pow_chain<T: BlockchainBackend>(
.expect("The new orphan block should be in the queue")
.header()
.prev_hash;
reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?;
Ok(())

let num_added_blocks = reorg_chain.len();
let removed_blocks = reorganize_chain(db, block_validator, fork_hash, &reorg_chain)?;
let num_removed_blocks = removed_blocks.len();

// reorg is required when any blocks are removed or more than one are added
// see https://github.com/tari-project/tari/issues/2101
if num_removed_blocks > 0 || num_added_blocks > 1 {
if config.track_reorgs {
let mut txn = DbTransaction::new();
txn.insert_reorg(Reorg::from_reorged_blocks(&reorg_chain, &removed_blocks));
if let Err(e) = db.write(txn) {
error!(target: LOG_TARGET, "Failed to track reorg: {}", e);
}
}

log!(
target: LOG_TARGET,
if num_removed_blocks > 1 {
Level::Warn
} else {
Level::Info
}, // We want a warning if the number of removed blocks is at least 2.
"Chain reorg required from {} to {} (accum_diff:{}, hash:{}) to (accum_diff:{}, hash:{}). Number of \
blocks to remove: {}, to add: {}.",
tip_header.header().height,
best_fork_header.header().height,
tip_header.accumulated_data().total_accumulated_difficulty,
tip_header.accumulated_data().hash.to_hex(),
best_fork_header.accumulated_data().total_accumulated_difficulty,
best_fork_header.accumulated_data().hash.to_hex(),
num_removed_blocks,
num_added_blocks,
);
Ok(BlockAddResult::ChainReorg {
removed: removed_blocks,
added: reorg_chain.into(),
})
} else {
trace!(
target: LOG_TARGET,
"No reorg required. Number of blocks to remove: {}, to add: {}.",
num_removed_blocks,
num_added_blocks,
);
// NOTE: panic is not possible because get_orphan_link_main_chain cannot return an empty Vec (reorg_chain)
Ok(BlockAddResult::Ok(reorg_chain.front().unwrap().clone()))
}
}

fn restore_reorged_chain<T: BlockchainBackend>(
Expand Down Expand Up @@ -2154,12 +2069,12 @@ fn insert_orphan_and_find_new_tips<T: BlockchainBackend>(
block: Arc<Block>,
validator: &dyn HeaderValidation<T>,
difficulty_calculator: &DifficultyCalculator,
) -> Result<Vec<ChainHeader>, ChainStorageError> {
) -> Result<(), ChainStorageError> {
let hash = block.hash();

// There cannot be any _new_ tips if we've seen this orphan block before
if db.contains(&DbKey::OrphanBlock(hash))? {
return Ok(vec![]);
return Ok(());
}

let parent = match db.fetch_orphan_chain_tip_by_hash(&block.header.prev_hash)? {
Expand Down Expand Up @@ -2206,7 +2121,7 @@ fn insert_orphan_and_find_new_tips<T: BlockchainBackend>(
txn.insert_orphan(block);
db.write(txn)?;
}
return Ok(vec![]);
return Ok(());
},
},
};
Expand Down Expand Up @@ -2239,7 +2154,7 @@ fn insert_orphan_and_find_new_tips<T: BlockchainBackend>(
}

db.write(txn)?;
Ok(tips)
Ok(())
}

// Find the tip set of any orphans that have hash as an ancestor
Expand Down Expand Up @@ -2653,15 +2568,13 @@ mod test {
let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block);
let block = chain.get("A").unwrap().clone();
let mut access = db.db_write_access().unwrap();
let chain = insert_orphan_and_find_new_tips(
insert_orphan_and_find_new_tips(
&mut *access,
block.to_arc_block(),
&validator,
&db.difficulty_calculator,
)
.unwrap();
assert_eq!(chain.len(), 1);
assert_eq!(chain[0].hash(), block.hash());

let maybe_block = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap();
assert_eq!(maybe_block.unwrap().header(), block.header());
Expand All @@ -2679,24 +2592,22 @@ mod test {
let mut access = db.db_write_access().unwrap();

let block_d2 = orphan_chain.get("D2").unwrap().clone();
let chain = insert_orphan_and_find_new_tips(
insert_orphan_and_find_new_tips(
&mut *access,
block_d2.to_arc_block(),
&validator,
&db.difficulty_calculator,
)
.unwrap();
assert!(chain.is_empty());

let block_e2 = orphan_chain.get("E2").unwrap().clone();
let chain = insert_orphan_and_find_new_tips(
insert_orphan_and_find_new_tips(
&mut *access,
block_e2.to_arc_block(),
&validator,
&db.difficulty_calculator,
)
.unwrap();
assert!(chain.is_empty());

let maybe_block = access.fetch_orphan_children_of(*block_d2.hash()).unwrap();
assert_eq!(maybe_block[0], *block_e2.to_arc_block());
Expand All @@ -2713,28 +2624,29 @@ mod test {
let mut access = db.db_write_access().unwrap();

let block = orphan_chain.get("B2").unwrap().clone();
let chain = insert_orphan_and_find_new_tips(
insert_orphan_and_find_new_tips(
&mut *access,
block.to_arc_block(),
&validator,
&db.difficulty_calculator,
)
.unwrap();
assert_eq!(chain.len(), 1);
assert_eq!(chain[0].header(), block.header());
assert_eq!(chain[0].accumulated_data().total_accumulated_difficulty, 4);
let fork_tip = access.fetch_orphan_chain_tip_by_hash(chain[0].hash()).unwrap().unwrap();
let fork_tip = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap().unwrap();
assert_eq!(fork_tip, block.to_chain_header());
assert_eq!(fork_tip.accumulated_data().total_accumulated_difficulty, 4);
let all_tips = access.fetch_all_orphan_chain_tips().unwrap().len();
assert_eq!(all_tips, 1);

// Insert again (block was received more than once), no new tips
let chain = insert_orphan_and_find_new_tips(
insert_orphan_and_find_new_tips(
&mut *access,
block.to_arc_block(),
&validator,
&db.difficulty_calculator,
)
.unwrap();
assert_eq!(chain.len(), 0);
let all_tips = access.fetch_all_orphan_chain_tips().unwrap().len();
assert_eq!(all_tips, 1);
}
}

Expand Down
Loading

0 comments on commit 740166e

Please sign in to comment.