diff --git a/fil-proofs-tooling/src/bin/settings/main.rs b/fil-proofs-tooling/src/bin/settings/main.rs index 85c50c4df..932aa320c 100644 --- a/fil-proofs-tooling/src/bin/settings/main.rs +++ b/fil-proofs-tooling/src/bin/settings/main.rs @@ -3,6 +3,6 @@ use anyhow::Result; use storage_proofs::settings::SETTINGS; fn main() -> Result<()> { - println!("{:#?}", *SETTINGS.lock().unwrap()); + println!("{:#?}", *SETTINGS); Ok(()) } diff --git a/filecoin-proofs/src/api/post.rs b/filecoin-proofs/src/api/post.rs index c72574fe4..258120c4b 100644 --- a/filecoin-proofs/src/api/post.rs +++ b/filecoin-proofs/src/api/post.rs @@ -624,10 +624,7 @@ pub fn verify_winning_post( k: None, }; - let use_fil_blst = settings::SETTINGS - .lock() - .expect("use_fil_blst settings lock failure") - .use_fil_blst; + let use_fil_blst = settings::SETTINGS.use_fil_blst; let is_valid = if use_fil_blst { info!("verify_winning_post: use_fil_blst=true"); @@ -1000,10 +997,7 @@ pub fn verify_window_post( k: None, }; - let use_fil_blst = settings::SETTINGS - .lock() - .expect("use_fil_blst settings lock failure") - .use_fil_blst; + let use_fil_blst = settings::SETTINGS.use_fil_blst; let is_valid = if use_fil_blst { info!("verify_window_post: use_fil_blst=true"); diff --git a/filecoin-proofs/src/api/seal.rs b/filecoin-proofs/src/api/seal.rs index 9b17b68aa..1a4a46969 100644 --- a/filecoin-proofs/src/api/seal.rs +++ b/filecoin-proofs/src/api/seal.rs @@ -609,10 +609,7 @@ pub fn verify_seal( k: None, }; - let use_fil_blst = settings::SETTINGS - .lock() - .expect("use_fil_blst settings lock failure") - .use_fil_blst; + let use_fil_blst = settings::SETTINGS.use_fil_blst; let result = if use_fil_blst { info!("verify_seal: use_fil_blst=true"); diff --git a/storage-proofs/core/src/crypto/pedersen.rs b/storage-proofs/core/src/crypto/pedersen.rs index 07f0d9f2f..3f4acff39 100644 --- a/storage-proofs/core/src/crypto/pedersen.rs +++ b/storage-proofs/core/src/crypto/pedersen.rs @@ -10,12 +10,8 @@ use crate::hasher::pedersen::pedersen_hash; use crate::settings; lazy_static! { - pub static ref JJ_PARAMS: JubjubBls12 = JubjubBls12::new_with_window_size( - settings::SETTINGS - .lock() - .expect("settings lock failure") - .pedersen_hash_exp_window_size - ); + pub static ref JJ_PARAMS: JubjubBls12 = + JubjubBls12::new_with_window_size(settings::SETTINGS.pedersen_hash_exp_window_size); } pub const PEDERSEN_BLOCK_SIZE: usize = 256; diff --git a/storage-proofs/core/src/parameter_cache.rs b/storage-proofs/core/src/parameter_cache.rs index 6218d23c8..b1e34864a 100644 --- a/storage-proofs/core/src/parameter_cache.rs +++ b/storage-proofs/core/src/parameter_cache.rs @@ -142,11 +142,7 @@ impl Drop for LockedFile { } pub fn parameter_cache_dir_name() -> String { - settings::SETTINGS - .lock() - .expect("parameter_cache_dir_name settings lock failure") - .parameter_cache - .clone() + settings::SETTINGS.parameter_cache.clone() } pub fn parameter_cache_dir() -> PathBuf { @@ -329,10 +325,7 @@ fn ensure_parent(path: &PathBuf) -> Result<()> { pub fn read_cached_params(cache_entry_path: &PathBuf) -> Result> { info!("checking cache_path: {:?} for parameters", cache_entry_path); - let verify_production_params = settings::SETTINGS - .lock() - .expect("verify_production_params settings lock failure") - .verify_production_params; + let verify_production_params = settings::SETTINGS.verify_production_params; // If the verify production params is set, we make sure that the path being accessed matches a // production cache key, found in the 'parameters.json' file. The parameter data file is also diff --git a/storage-proofs/core/src/settings.rs b/storage-proofs/core/src/settings.rs index 1dd3df039..222547c03 100644 --- a/storage-proofs/core/src/settings.rs +++ b/storage-proofs/core/src/settings.rs @@ -1,13 +1,11 @@ use std::env; -use std::sync::Mutex; use config::{Config, ConfigError, Environment, File}; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; lazy_static! { - pub static ref SETTINGS: Mutex = - Mutex::new(Settings::new().expect("invalid configuration")); + pub static ref SETTINGS: Settings = Settings::new().expect("invalid configuration"); } const SETTINGS_PATH: &str = "./rust-fil-proofs.config.toml"; diff --git a/storage-proofs/core/src/util.rs b/storage-proofs/core/src/util.rs index 3386ce66e..94cabac00 100644 --- a/storage-proofs/core/src/util.rs +++ b/storage-proofs/core/src/util.rs @@ -163,10 +163,7 @@ pub fn default_rows_to_discard(leafs: usize, arity: usize) -> usize { // This configurable setting is for a default oct-tree // rows_to_discard value, which defaults to 2. - let rows_to_discard = settings::SETTINGS - .lock() - .expect("rows_to_discard settings lock failure") - .rows_to_discard as usize; + let rows_to_discard = settings::SETTINGS.rows_to_discard as usize; // Discard at most 'constant value' rows (coded below, // differing by arity) while respecting the max number that diff --git a/storage-proofs/porep/src/stacked/vanilla/cache.rs b/storage-proofs/porep/src/stacked/vanilla/cache.rs index 470e85c56..12af5b609 100644 --- a/storage-proofs/porep/src/stacked/vanilla/cache.rs +++ b/storage-proofs/porep/src/stacked/vanilla/cache.rs @@ -186,10 +186,7 @@ impl ParentCache { } Some(pcd) => ( Some(pcd), - settings::SETTINGS - .lock() - .expect("verify_cache settings lock failure") - .verify_cache, + settings::SETTINGS.verify_cache, pcd.digest.clone(), ), }; @@ -364,11 +361,7 @@ impl ParentCache { } fn parent_cache_dir_name() -> String { - settings::SETTINGS - .lock() - .expect("parent_cache settings lock failure") - .parent_cache - .clone() + settings::SETTINGS.parent_cache.clone() } fn parent_cache_id(path: &PathBuf) -> String { diff --git a/storage-proofs/porep/src/stacked/vanilla/cores.rs b/storage-proofs/porep/src/stacked/vanilla/cores.rs index 351ea34f9..42507c431 100644 --- a/storage-proofs/porep/src/stacked/vanilla/cores.rs +++ b/storage-proofs/porep/src/stacked/vanilla/cores.rs @@ -11,7 +11,7 @@ type CoreGroup = Vec; lazy_static! { pub static ref TOPOLOGY: Mutex = Mutex::new(Topology::new()); pub static ref CORE_GROUPS: Option>> = { - let settings = settings::SETTINGS.lock().expect("settings lock failure"); + let settings = &settings::SETTINGS; let num_producers = settings.multicore_sdr_producers; let cores_per_unit = num_producers + 1; diff --git a/storage-proofs/porep/src/stacked/vanilla/create_label/multi.rs b/storage-proofs/porep/src/stacked/vanilla/create_label/multi.rs index ef6d4ad18..17fa51ec5 100644 --- a/storage-proofs/porep/src/stacked/vanilla/create_label/multi.rs +++ b/storage-proofs/porep/src/stacked/vanilla/create_label/multi.rs @@ -210,7 +210,7 @@ fn create_layer_labels( info!("Creating labels for layer {}", cur_layer); // num_producers is the number of producer threads let (lookahead, num_producers, producer_stride) = { - let settings = settings::SETTINGS.lock().expect("settings lock failure"); + let settings = &settings::SETTINGS; let lookahead = settings.multicore_sdr_lookahead; let num_producers = settings.multicore_sdr_producers; // NOTE: Stride must not exceed the number of nodes in parents_cache's window. If it does, the process will deadlock @@ -432,10 +432,7 @@ pub fn create_labels_for_encoding Result { // Number of nodes to be cached in memory - let default_cache_size = settings::SETTINGS - .lock() - .expect("sdr_parents_cache_size settings lock failure") - .sdr_parents_cache_size; + let default_cache_size = settings::SETTINGS.sdr_parents_cache_size; let cache_entries = self.size() as u32; let cache_size = cache_entries.min(default_cache_size); diff --git a/storage-proofs/porep/src/stacked/vanilla/proof.rs b/storage-proofs/porep/src/stacked/vanilla/proof.rs index 6b9937dde..d84cc7824 100644 --- a/storage-proofs/porep/src/stacked/vanilla/proof.rs +++ b/storage-proofs/porep/src/stacked/vanilla/proof.rs @@ -302,11 +302,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr ) -> Result<(Labels, Vec)> { let mut parent_cache = graph.parent_cache()?; - if settings::SETTINGS - .lock() - .expect("use_multicore_sdr settings lock failure") - .use_multicore_sdr - { + if settings::SETTINGS.use_multicore_sdr { info!("multi core replication"); create_label::multi::create_labels_for_encoding( graph, @@ -336,11 +332,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr ) -> Result> { let mut parent_cache = graph.parent_cache()?; - if settings::SETTINGS - .lock() - .expect("use_multicore_sdr settings lock failure") - .use_multicore_sdr - { + if settings::SETTINGS.use_multicore_sdr { info!("multi core replication"); create_label::multi::create_labels_for_decoding( graph, @@ -391,11 +383,7 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr ColumnArity: 'static + PoseidonArity, TreeArity: PoseidonArity, { - if settings::SETTINGS - .lock() - .expect("use_gpu_column_builder settings lock failure") - .use_gpu_column_builder - { + if settings::SETTINGS.use_gpu_column_builder { Self::generate_tree_c_gpu::( layers, nodes_count, @@ -440,18 +428,9 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr // Override these values with care using environment variables: // FIL_PROOFS_MAX_GPU_COLUMN_BATCH_SIZE, FIL_PROOFS_MAX_GPU_TREE_BATCH_SIZE, and // FIL_PROOFS_COLUMN_WRITE_BATCH_SIZE respectively. - let max_gpu_column_batch_size = settings::SETTINGS - .lock() - .expect("max_gpu_column_batch_size settings lock failure") - .max_gpu_column_batch_size as usize; - let max_gpu_tree_batch_size = settings::SETTINGS - .lock() - .expect("max_gpu_tree_batch_size settings lock failure") - .max_gpu_tree_batch_size as usize; - let column_write_batch_size = settings::SETTINGS - .lock() - .expect("column_write_batch_size settings lock failure") - .column_write_batch_size as usize; + let max_gpu_column_batch_size = settings::SETTINGS.max_gpu_column_batch_size as usize; + let max_gpu_tree_batch_size = settings::SETTINGS.max_gpu_tree_batch_size as usize; + let column_write_batch_size = settings::SETTINGS.column_write_batch_size as usize; // This channel will receive batches of columns and add them to the ColumnTreeBuilder. let (builder_tx, builder_rx) = mpsc::sync_channel(0); @@ -719,16 +698,9 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr data.ensure_data()?; let last_layer_labels = labels.labels_for_last_layer()?; - if settings::SETTINGS - .lock() - .expect("use_gpu_tree_builder settings lock failure") - .use_gpu_tree_builder - { + if settings::SETTINGS.use_gpu_tree_builder { info!("generating tree r last using the GPU"); - let max_gpu_tree_batch_size = settings::SETTINGS - .lock() - .expect("max_gpu_tree_batch_size settings lock failure") - .max_gpu_tree_batch_size as usize; + let max_gpu_tree_batch_size = settings::SETTINGS.max_gpu_tree_batch_size as usize; // This channel will receive batches of leaf nodes and add them to the TreeBuilder. let (builder_tx, builder_rx) = mpsc::sync_channel::<(Vec, bool)>(0); @@ -1190,16 +1162,9 @@ impl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tr tree_count, )?; - if settings::SETTINGS - .lock() - .expect("use_gpu_tree_builder settings lock failure") - .use_gpu_tree_builder - { + if settings::SETTINGS.use_gpu_tree_builder { info!("generating tree r last using the GPU"); - let max_gpu_tree_batch_size = settings::SETTINGS - .lock() - .expect("max_gpu_tree_batch_size settings lock failure") - .max_gpu_tree_batch_size as usize; + let max_gpu_tree_batch_size = settings::SETTINGS.max_gpu_tree_batch_size as usize; let mut tree_builder = TreeBuilder::::new( Some(BatcherType::GPU), diff --git a/storage-proofs/post/src/fallback/circuit.rs b/storage-proofs/post/src/fallback/circuit.rs index c58dff3d0..4bc377c87 100644 --- a/storage-proofs/post/src/fallback/circuit.rs +++ b/storage-proofs/post/src/fallback/circuit.rs @@ -186,10 +186,7 @@ impl FallbackPoStCircuit { ) -> Result<(), SynthesisError> { let FallbackPoStCircuit { sectors, .. } = self; - let num_chunks = settings::SETTINGS - .lock() - .expect("window_post_synthesis_num_cpus settings lock failure") - .window_post_synthesis_num_cpus as usize; + let num_chunks = settings::SETTINGS.window_post_synthesis_num_cpus as usize; let chunk_size = (sectors.len() / num_chunks).max(1); let css = sectors