Skip to content

Commit

Permalink
Do not run forced_canonicalization for archive nodes (paritytech#13051
Browse files Browse the repository at this point in the history
)

We don't canonicalize on archive nodes and thus `best_canonical` always returned `None`. So, the
moment such a node tried to force canonicalize, it was trapped in some endless loop.

This pr solves this by renaming `best_canonical` to `last_canonicalized` and also making the return
value more clear by introducing a custom enum `LastCanonicalized`.
  • Loading branch information
bkchr authored and ark0f committed Feb 27, 2023
1 parent 3dad32c commit f547f59
Show file tree
Hide file tree
Showing 2 changed files with 216 additions and 90 deletions.
270 changes: 186 additions & 84 deletions client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ use sc_client_api::{
utils::is_descendent_of,
IoInfo, MemoryInfo, MemorySize, UsageInfo,
};
use sc_state_db::{IsPruned, StateDb};
use sc_state_db::{IsPruned, LastCanonicalized, StateDb};
use sp_arithmetic::traits::Saturating;
use sp_blockchain::{
well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend,
Expand Down Expand Up @@ -1315,19 +1315,25 @@ impl<Block: BlockT> Backend<Block> {
&self,
transaction: &mut Transaction<DbHash>,
) -> ClientResult<()> {
let best_canonical = || self.storage.state_db.best_canonical().unwrap_or(0);
let best_canonical = match self.storage.state_db.last_canonicalized() {
LastCanonicalized::None => 0,
LastCanonicalized::Block(b) => b,
// Nothing needs to be done when canonicalization is not happening.
LastCanonicalized::NotCanonicalizing => return Ok(()),
};

let info = self.blockchain.info();
let best_number: u64 = self.blockchain.info().best_number.saturated_into();

while best_number.saturating_sub(best_canonical()) > self.canonicalization_delay {
let to_canonicalize = best_canonical() + 1;

for to_canonicalize in
best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay)
{
let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash(
&self.blockchain,
to_canonicalize.saturated_into(),
)?
.ok_or_else(|| {
let best_hash = info.best_hash;
let best_hash = info.best_hash;

sp_blockchain::Error::Backend(format!(
"Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})",
Expand Down Expand Up @@ -1697,13 +1703,13 @@ impl<Block: BlockT> Backend<Block> {
}
transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key);

if sc_client_api::Backend::have_state_at(self, f_hash, f_num) &&
self.storage
.state_db
.best_canonical()
.map(|c| f_num.saturated_into::<u64>() > c)
.unwrap_or(true)
{
let requires_canonicalization = match self.storage.state_db.last_canonicalized() {
LastCanonicalized::None => true,
LastCanonicalized::Block(b) => f_num.saturated_into::<u64>() > b,
LastCanonicalized::NotCanonicalizing => false,
};

if requires_canonicalization && sc_client_api::Backend::have_state_at(self, f_hash, f_num) {
let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err(
sp_blockchain::Error::from_state_db::<
sc_state_db::Error<sp_database::error::DatabaseError>,
Expand Down Expand Up @@ -3824,102 +3830,198 @@ pub(crate) mod tests {

#[test]
fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() {
let backend = Backend::<Block>::new_test(10, 1);
let pruning_modes = [BlocksPruning::Some(10), BlocksPruning::KeepAll];

let genesis =
insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None)
for pruning_mode in pruning_modes {
eprintln!("Running with pruning mode: {:?}", pruning_mode);

let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);

let genesis = insert_block(
&backend,
0,
Default::default(),
None,
Default::default(),
vec![],
None,
)
.unwrap();

let block1 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, genesis).unwrap();
let mut header = Header {
number: 1,
parent_hash: genesis,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];

let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

op.update_storage(storage, Vec::new()).unwrap();

op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Normal,
)
.unwrap();

let block1 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, genesis).unwrap();
let header = Header {
number: 1,
parent_hash: genesis,
state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
digest: Default::default(),
extrinsics_root: Default::default(),
backend.commit_operation(op).unwrap();

header.hash()
};

op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal)
.unwrap();
if matches!(pruning_mode, BlocksPruning::Some(_)) {
assert_eq!(
LastCanonicalized::Block(0),
backend.storage.state_db.last_canonicalized()
);
}

backend.commit_operation(op).unwrap();
// This should not trigger any forced canonicalization as we didn't have imported any
// best block by now.
let block2 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block1).unwrap();
let mut header = Header {
number: 2,
parent_hash: block1,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

header.hash()
};
let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))];

assert_eq!(0, backend.storage.state_db.best_canonical().unwrap());
let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

// This should not trigger any forced canonicalization as we didn't have imported any best
// block by now.
let block2 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block1).unwrap();
let header = Header {
number: 2,
parent_hash: block1,
state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
digest: Default::default(),
extrinsics_root: Default::default(),
};
op.update_storage(storage, Vec::new()).unwrap();

op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal)
op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Normal,
)
.unwrap();

backend.commit_operation(op).unwrap();
backend.commit_operation(op).unwrap();

header.hash()
};
header.hash()
};

assert_eq!(0, backend.storage.state_db.best_canonical().unwrap());
if matches!(pruning_mode, BlocksPruning::Some(_)) {
assert_eq!(
LastCanonicalized::Block(0),
backend.storage.state_db.last_canonicalized()
);
}

// This should also not trigger it yet, because we import a best block, but the best block
// from the POV of the db is still at `0`.
let block3 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block2).unwrap();
let header = Header {
number: 3,
parent_hash: block2,
state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
digest: Default::default(),
extrinsics_root: Default::default(),
};
// This should also not trigger it yet, because we import a best block, but the best
// block from the POV of the db is still at `0`.
let block3 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block2).unwrap();
let mut header = Header {
number: 3,
parent_hash: block2,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Best)
.unwrap();
let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))];

backend.commit_operation(op).unwrap();
let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

header.hash()
};
op.update_storage(storage, Vec::new()).unwrap();

// Now it should kick in.
let block4 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block3).unwrap();
let header = Header {
number: 4,
parent_hash: block3,
state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1),
digest: Default::default(),
extrinsics_root: Default::default(),
op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Best,
)
.unwrap();

backend.commit_operation(op).unwrap();

header.hash()
};

op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Best)
// Now it should kick in.
let block4 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block3).unwrap();
let mut header = Header {
number: 4,
parent_hash: block3,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))];

let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

op.update_storage(storage, Vec::new()).unwrap();

op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Best,
)
.unwrap();

backend.commit_operation(op).unwrap();
backend.commit_operation(op).unwrap();

header.hash()
};
header.hash()
};

assert_eq!(2, backend.storage.state_db.best_canonical().unwrap());
if matches!(pruning_mode, BlocksPruning::Some(_)) {
assert_eq!(
LastCanonicalized::Block(2),
backend.storage.state_db.last_canonicalized()
);
}

assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
}
}
}
36 changes: 30 additions & 6 deletions client/state-db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,17 @@ fn to_meta_key<S: Codec>(suffix: &[u8], data: &S) -> Vec<u8> {
buffer
}

/// Status information about the last canonicalized block.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum LastCanonicalized {
/// Not yet have canonicalized any block.
None,
/// The given block number is the last canonicalized block.
Block(u64),
/// No canonicalization is happening (pruning mode is archive all).
NotCanonicalizing,
}

pub struct StateDbSync<BlockHash: Hash, Key: Hash, D: MetaDb> {
mode: PruningMode,
non_canonical: NonCanonicalOverlay<BlockHash, Key>,
Expand Down Expand Up @@ -349,15 +360,28 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> StateDbSync<BlockHash, Key, D> {
Ok(commit)
}

fn best_canonical(&self) -> Option<u64> {
self.non_canonical.last_canonicalized_block_number()
/// Returns the block number of the last canonicalized block.
fn last_canonicalized(&self) -> LastCanonicalized {
if self.mode == PruningMode::ArchiveAll {
LastCanonicalized::NotCanonicalizing
} else {
self.non_canonical
.last_canonicalized_block_number()
.map(LastCanonicalized::Block)
.unwrap_or_else(|| LastCanonicalized::None)
}
}

fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned {
match self.mode {
PruningMode::ArchiveAll => IsPruned::NotPruned,
PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => {
if self.best_canonical().map(|c| number > c).unwrap_or(true) {
if self
.non_canonical
.last_canonicalized_block_number()
.map(|c| number > c)
.unwrap_or(true)
{
if self.non_canonical.have_block(hash) {
IsPruned::NotPruned
} else {
Expand Down Expand Up @@ -611,9 +635,9 @@ impl<BlockHash: Hash, Key: Hash, D: MetaDb> StateDb<BlockHash, Key, D> {
self.db.write().remove(hash)
}

/// Returns last finalized block number.
pub fn best_canonical(&self) -> Option<u64> {
return self.db.read().best_canonical()
/// Returns last canonicalized block.
pub fn last_canonicalized(&self) -> LastCanonicalized {
self.db.read().last_canonicalized()
}

/// Check if block is pruned away.
Expand Down

0 comments on commit f547f59

Please sign in to comment.