From 3529426a3a241fbd3362b0aa4889b402139201b5 Mon Sep 17 00:00:00 2001 From: Volker Mische Date: Wed, 13 Sep 2023 11:57:42 +0200 Subject: [PATCH] fix: hard-code rows to discard Only rows of the TreeR get discarded. Hence hard-code all other cases to 0 to add clarity, that they don't discard any rows. This makes the code easier to follow and reason about. --- fil-proofs-tooling/src/shared.rs | 11 +++------- filecoin-proofs/src/api/mod.rs | 12 +--------- filecoin-proofs/src/api/seal.rs | 20 +++++------------ .../src/stacked/vanilla/create_label/mod.rs | 5 ++--- .../src/stacked/vanilla/proof.rs | 7 +++--- storage-proofs-porep/tests/stacked_vanilla.rs | 22 +++++-------------- storage-proofs-update/tests/circuit.rs | 1 - .../tests/circuit_poseidon.rs | 1 - storage-proofs-update/tests/compound.rs | 3 +-- 9 files changed, 20 insertions(+), 62 deletions(-) diff --git a/fil-proofs-tooling/src/shared.rs b/fil-proofs-tooling/src/shared.rs index 4dda17129..a43ac363c 100644 --- a/fil-proofs-tooling/src/shared.rs +++ b/fil-proofs-tooling/src/shared.rs @@ -9,7 +9,6 @@ use filecoin_proofs::{ PublicReplicaInfo, SealPreCommitOutput, SealPreCommitPhase1Output, SectorSize, UnpaddedBytesAmount, }; -use generic_array::typenum::Unsigned; use log::info; use merkletree::store::StoreConfig; use rand::{random, thread_rng, RngCore}; @@ -19,7 +18,7 @@ use rayon::prelude::{ use storage_proofs_core::{ api_version::{ApiFeature, ApiVersion}, sector::SectorId, - util::{default_rows_to_discard, NODE_SIZE}, + util::NODE_SIZE, }; use storage_proofs_porep::stacked::Labels; use tempfile::{tempdir, NamedTempFile}; @@ -193,12 +192,8 @@ pub fn create_replicas( .par_iter() .zip(sector_ids.par_iter()) .map(|(cache_dir, sector_id)| { - let nodes = sector_size.0 as usize / NODE_SIZE; - let mut tmp_store_config = StoreConfig::new( - cache_dir.path(), - format!("tmp-config-{}", sector_id), - default_rows_to_discard(nodes, Tree::Arity::to_usize()), - ); + let mut tmp_store_config = + StoreConfig::new(cache_dir.path(), format!("tmp-config-{}", sector_id), 0); tmp_store_config.size = Some(u64::from(sector_size) as usize / NODE_SIZE); let f = File::create(StoreConfig::data_path( &tmp_store_config.path, diff --git a/filecoin-proofs/src/api/mod.rs b/filecoin-proofs/src/api/mod.rs index 5f541feba..1cad7544c 100644 --- a/filecoin-proofs/src/api/mod.rs +++ b/filecoin-proofs/src/api/mod.rs @@ -16,7 +16,6 @@ use storage_proofs_core::{ merkle::get_base_tree_count, pieces::generate_piece_commitment_bytes_from_source, sector::SectorId, - util::default_rows_to_discard, }; use storage_proofs_porep::stacked::{ generate_replica_id, PersistentAux, PublicParams, StackedDrg, TemporaryAux, @@ -356,16 +355,7 @@ where { trace!("unseal_range_inner:start"); - let base_tree_size = get_base_tree_size::(porep_config.sector_size)?; - let base_tree_leafs = get_base_tree_leafs::(base_tree_size)?; - let config = StoreConfig::new( - cache_path.as_ref(), - CacheKey::CommDTree.to_string(), - default_rows_to_discard( - base_tree_leafs, - ::Arity::to_usize(), - ), - ); + let config = StoreConfig::new(cache_path.as_ref(), CacheKey::CommDTree.to_string(), 0); let pp: PublicParams = public_params(porep_config)?; let offset_padded: PaddedBytesAmount = UnpaddedBytesAmount::from(offset).into(); diff --git a/filecoin-proofs/src/api/seal.rs b/filecoin-proofs/src/api/seal.rs index 69dd3932e..59fc1481b 100644 --- a/filecoin-proofs/src/api/seal.rs +++ b/filecoin-proofs/src/api/seal.rs @@ -158,11 +158,7 @@ where base_tree_leafs, ); - let mut config = StoreConfig::new( - cache_path.as_ref(), - CacheKey::CommDTree.to_string(), - default_rows_to_discard(base_tree_leafs, BINARY_ARITY), - ); + let mut config = StoreConfig::new(cache_path.as_ref(), CacheKey::CommDTree.to_string(), 0); let data_tree = create_base_merkle_tree::>( Some(config.clone()), @@ -273,11 +269,7 @@ where "seal phase 2: base tree size {}, base tree leafs {}, rows to discard {}", base_tree_size, base_tree_leafs, - default_rows_to_discard(base_tree_leafs, BINARY_ARITY) - ); - ensure!( - config.rows_to_discard == default_rows_to_discard(base_tree_leafs, BINARY_ARITY), - "Invalid cache size specified" + 0 ); let store: DiskStore = @@ -1202,7 +1194,6 @@ where let base_tree_count = get_base_tree_count::(); let base_tree_leafs = leaf_count / base_tree_count; - let rows_to_discard = default_rows_to_discard(base_tree_leafs, TreeR::Arity::to_usize()); let size = get_base_tree_size::(SectorSize(sector_size))?; let tree_r_last_config = StoreConfig { path: PathBuf::from(output_dir.as_ref()), @@ -1214,7 +1205,7 @@ where // configuration. *Use with caution*. It must be noted that if/when this unchecked // value is passed through merkle_light, merkle_light now does a check that does not // allow us to discard more rows than is possible to discard. - rows_to_discard, + rows_to_discard: default_rows_to_discard(base_tree_leafs, TreeR::Arity::to_usize()), }; let replica_base_tree_size = get_base_tree_size::(sector_size.into())?; @@ -1257,13 +1248,12 @@ where let base_tree_count = get_base_tree_count::(); let base_tree_leafs = leaf_count / base_tree_count; - let rows_to_discard = default_rows_to_discard(base_tree_leafs, Tree::Arity::to_usize()); let size = get_base_tree_size::(SectorSize(sector_size))?; let tree_c_config = StoreConfig { path: PathBuf::from(output_dir.as_ref()), id: CacheKey::CommCTree.to_string(), size: Some(size), - rows_to_discard, + rows_to_discard: 0, }; let configs = split_config(tree_c_config, base_tree_count)?; @@ -1275,7 +1265,7 @@ where path: PathBuf::from(input_dir.as_ref()), id: CacheKey::label_layer(layer), size: Some(label_base_tree_leafs), - rows_to_discard: default_rows_to_discard(label_base_tree_leafs, BINARY_ARITY), + rows_to_discard: 0, }) .collect(); let labels = Labels::new(label_configs); diff --git a/storage-proofs-porep/src/stacked/vanilla/create_label/mod.rs b/storage-proofs-porep/src/stacked/vanilla/create_label/mod.rs index cd5a2b382..d14afe0cf 100644 --- a/storage-proofs-porep/src/stacked/vanilla/create_label/mod.rs +++ b/storage-proofs-porep/src/stacked/vanilla/create_label/mod.rs @@ -8,10 +8,9 @@ use log::{info, warn}; use merkletree::{merkle::Element, store::StoreConfig}; use storage_proofs_core::{ cache_key::CacheKey, drgraph::Graph, error::Result, merkle::MerkleTreeTrait, - util::default_rows_to_discard, }; -use crate::stacked::vanilla::{proof::LayerState, StackedBucketGraph, BINARY_ARITY}; +use crate::stacked::vanilla::{proof::LayerState, StackedBucketGraph}; #[cfg(feature = "multicore-sdr")] pub mod multi; @@ -31,7 +30,7 @@ where path: cache_path.as_ref().to_path_buf(), id: CacheKey::label_layer(layer), size: Some(graph.size()), - rows_to_discard: default_rows_to_discard(graph.size(), BINARY_ARITY), + rows_to_discard: 0, }); let mut states = Vec::with_capacity(layers); diff --git a/storage-proofs-porep/src/stacked/vanilla/proof.rs b/storage-proofs-porep/src/stacked/vanilla/proof.rs index ac3a4a951..2118c88a0 100644 --- a/storage-proofs-porep/src/stacked/vanilla/proof.rs +++ b/storage-proofs-porep/src/stacked/vanilla/proof.rs @@ -1515,10 +1515,9 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr path: cache_path.clone(), id: CacheKey::CommDTree.to_string(), size: Some(get_merkle_tree_len(total_nodes_count, BINARY_ARITY)?), - rows_to_discard: default_rows_to_discard(total_nodes_count, BINARY_ARITY), + rows_to_discard: 0, }; - let rows_to_discard = default_rows_to_discard(nodes_count, Tree::Arity::to_usize()); let size = Some(get_merkle_tree_len(nodes_count, Tree::Arity::to_usize())?); let tree_r_last_config = StoreConfig { @@ -1531,7 +1530,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr // configuration. *Use with caution*. It must be noted that if/when this unchecked // value is passed through merkle_light, merkle_light now does a check that does not // allow us to discard more rows than is possible to discard. - rows_to_discard, + rows_to_discard: default_rows_to_discard(nodes_count, Tree::Arity::to_usize()), }; trace!( "tree_r_last using rows_to_discard={}", @@ -1542,7 +1541,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr path: cache_path, id: CacheKey::CommCTree.to_string(), size, - rows_to_discard, + rows_to_discard: 0, }; let labels = diff --git a/storage-proofs-porep/tests/stacked_vanilla.rs b/storage-proofs-porep/tests/stacked_vanilla.rs index 3e5b10352..212f45d66 100644 --- a/storage-proofs-porep/tests/stacked_vanilla.rs +++ b/storage-proofs-porep/tests/stacked_vanilla.rs @@ -19,12 +19,12 @@ use storage_proofs_core::{ proof::ProofScheme, table_tests, test_helper::setup_replica, - util::{default_rows_to_discard, NODE_SIZE}, + util::NODE_SIZE, TEST_SEED, }; use storage_proofs_porep::stacked::{ LayerChallenges, PrivateInputs, PublicInputs, SetupParams, StackedBucketGraph, StackedDrg, - TemporaryAux, TemporaryAuxCache, BINARY_ARITY, EXP_DEGREE, + TemporaryAux, TemporaryAuxCache, EXP_DEGREE, }; use tempfile::tempdir; @@ -95,11 +95,7 @@ fn test_extract_all() { // MT for original data is always named tree-d, and it will be // referenced later in the process as such. let cache_dir = tempdir().expect("tempdir failure"); - let config = StoreConfig::new( - cache_dir.path(), - CacheKey::CommDTree.to_string(), - default_rows_to_discard(nodes, BINARY_ARITY), - ); + let config = StoreConfig::new(cache_dir.path(), CacheKey::CommDTree.to_string(), 0); // Generate a replica path. let replica_path = cache_dir.path().join("replica-path"); @@ -190,11 +186,7 @@ fn test_stacked_porep_resume_seal() { // MT for original data is always named tree-d, and it will be // referenced later in the process as such. let cache_dir = tempdir().expect("tempdir failure"); - let config = StoreConfig::new( - cache_dir.path(), - CacheKey::CommDTree.to_string(), - default_rows_to_discard(nodes, BINARY_ARITY), - ); + let config = StoreConfig::new(cache_dir.path(), CacheKey::CommDTree.to_string(), 0); // Generate a replica path. let replica_path1 = cache_dir.path().join("replica-path-1"); @@ -344,11 +336,7 @@ fn test_prove_verify(n: usize, challenges: Laye // MT for original data is always named tree-d, and it will be // referenced later in the process as such. let cache_dir = tempdir().expect("tempdir failure"); - let config = StoreConfig::new( - cache_dir.path(), - CacheKey::CommDTree.to_string(), - default_rows_to_discard(nodes, BINARY_ARITY), - ); + let config = StoreConfig::new(cache_dir.path(), CacheKey::CommDTree.to_string(), 0); // Generate a replica path. let replica_path = cache_dir.path().join("replica-path"); diff --git a/storage-proofs-update/tests/circuit.rs b/storage-proofs-update/tests/circuit.rs index d42d0942a..5f5cbf592 100644 --- a/storage-proofs-update/tests/circuit.rs +++ b/storage-proofs-update/tests/circuit.rs @@ -12,7 +12,6 @@ use rand::SeedableRng; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ merkle::{MerkleTreeTrait, MerkleTreeWrapper}, - util::default_rows_to_discard, TEST_SEED, }; use storage_proofs_update::{ diff --git a/storage-proofs-update/tests/circuit_poseidon.rs b/storage-proofs-update/tests/circuit_poseidon.rs index 95e0acc3d..673e12a28 100644 --- a/storage-proofs-update/tests/circuit_poseidon.rs +++ b/storage-proofs-update/tests/circuit_poseidon.rs @@ -11,7 +11,6 @@ use rand::SeedableRng; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ merkle::{MerkleTreeTrait, MerkleTreeWrapper}, - util::default_rows_to_discard, TEST_SEED, }; use storage_proofs_update::{ diff --git a/storage-proofs-update/tests/compound.rs b/storage-proofs-update/tests/compound.rs index 0035ebb4c..63187b592 100644 --- a/storage-proofs-update/tests/compound.rs +++ b/storage-proofs-update/tests/compound.rs @@ -108,13 +108,12 @@ where let labels_d_new: Vec = (0..sector_nodes) .map(|_| TreeDDomain::random(&mut rng)) .collect(); - let tree_d_rows_to_discard = default_rows_to_discard(sector_nodes, TreeDArity::to_usize()); let tree_d_nodes = get_merkle_tree_len(sector_nodes, TreeDArity::to_usize()).unwrap(); let tree_d_new_config = StoreConfig { path: tmp_path.into(), id: "tree-d-new".to_string(), size: Some(tree_d_nodes), - rows_to_discard: tree_d_rows_to_discard, + rows_to_discard: 0, }; let tree_d_new = TreeD::try_from_iter_with_config( labels_d_new.iter().copied().map(Ok),