Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ci: parallelize test runs accross packages #1358

Merged
merged 3 commits into from
Nov 19, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
334 changes: 241 additions & 93 deletions .circleci/config.yml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion fil-proofs-tooling/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ structopt = "0.3.12"
humansize = "1.1.0"

[features]
default = ["gpu", "measurements"]
default = ["gpu", "measurements", "pairing"]
gpu = ["storage-proofs/gpu", "filecoin-proofs/gpu", "bellperson/gpu", "filecoin-hashers/gpu"]
measurements = ["storage-proofs/measurements"]
profile = ["storage-proofs/profile", "measurements"]
Expand Down
6 changes: 4 additions & 2 deletions filecoin-proofs/tests/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ fn seal_lifecycle<Tree: 'static + MerkleTreeTrait>(
}

fn get_layer_file_paths(cache_dir: &tempfile::TempDir) -> Vec<PathBuf> {
fs::read_dir(&cache_dir)
let mut list: Vec<_> = fs::read_dir(&cache_dir)
.expect("failed to read read directory ")
.filter_map(|entry| {
let cur = entry.expect("reading directory failed");
Expand All @@ -156,7 +156,9 @@ fn get_layer_file_paths(cache_dir: &tempfile::TempDir) -> Vec<PathBuf> {
None
}
})
.collect()
.collect();
list.sort();
list
}

fn clear_cache_dir_keep_data_layer(cache_dir: &tempfile::TempDir) {
Expand Down
8 changes: 0 additions & 8 deletions phase2/tests/large.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,6 @@ use mimc::{mimc as mimc_hash, MiMCDemo, MIMC_ROUNDS};
#[test]
#[ignore]
fn test_large_params() {
//test_large_params_inner()
}

// This test is marked as ignore because we haven't checked-in the phase1 file required for this
// test to pass when run via CI. To run this test you must have the correct phase1 params file in
// the top level directory of this crate.
#[allow(dead_code)]
fn test_large_params_inner() {
assert!(
Path::new("./phase1radix2m10").exists(),
"the phase1 file `phase1radix2m10` must be in the crate's top level directory"
Expand Down
8 changes: 0 additions & 8 deletions phase2/tests/small.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,6 @@ use mimc::{mimc as mimc_hash, MiMCDemo, MIMC_ROUNDS};
#[test]
#[ignore]
fn test_mimc_small_params() {
//test_mimc_small_params_inner()
}

// This test is marked as ignore because we haven't checked-in the phase1 file required for this
// test to pass when run via CI. To run this test you must have the correct phase1 params file in
// the top level directory of this crate.
#[allow(dead_code)]
fn test_mimc_small_params_inner() {
assert!(
Path::new("./phase1radix2m10").exists(),
"the phase1 file `phase1radix2m10` must be in the crate's top level directory"
Expand Down
10 changes: 6 additions & 4 deletions storage-proofs/core/src/fr32.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
use crate::error::*;

use anyhow::{ensure, Context};
use anyhow::ensure;
use bellperson::bls::{Fr, FrRepr};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use ff::{PrimeField, PrimeFieldRepr};

// Contains 32 bytes whose little-endian value represents an Fr.
// Invariants:
Expand All @@ -27,6 +26,9 @@ pub type Fr32Ary = [u8; 32];
// Otherwise, returns a BadFrBytesError.
#[cfg(feature = "pairing")]
pub fn bytes_into_fr(bytes: &[u8]) -> Result<Fr> {
use anyhow::Context;
use ff::{PrimeField, PrimeFieldRepr};

ensure!(bytes.len() == 32, Error::BadFrBytes);

let mut fr_repr = <<Fr as PrimeField>::Repr as Default>::default();
Expand Down Expand Up @@ -76,15 +78,15 @@ pub fn bytes_into_fr_repr_safe(r: &[u8]) -> FrRepr {
// Takes an Fr and returns a vector of exactly 32 bytes guaranteed to contain a valid Fr.
#[cfg(feature = "pairing")]
pub fn fr_into_bytes(fr: &Fr) -> Fr32Vec {
use ff::{PrimeField, PrimeFieldRepr};

let mut out = Vec::with_capacity(32);
fr.into_repr().write_le(&mut out).expect("write_le failure");
out
}

#[cfg(feature = "blst")]
pub fn fr_into_bytes(fr: &Fr) -> Fr32Vec {
use std::convert::TryInto;

fr.to_bytes_le().to_vec()
}

Expand Down
100 changes: 63 additions & 37 deletions storage-proofs/core/src/parameter_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use sha2::{Digest, Sha256};

use std::collections::{BTreeMap, HashSet};
use std::fs::{self, create_dir_all, File};
use std::io::{self, SeekFrom};
use std::io::{self, SeekFrom, Write};
use std::path::{Path, PathBuf};
use std::sync::Mutex;

Expand Down Expand Up @@ -80,7 +80,7 @@ pub fn get_verifying_key_data(cache_id: &str) -> Option<&ParameterData> {

impl LockedFile {
pub fn open_exclusive_read<P: AsRef<Path>>(p: P) -> io::Result<Self> {
let f = fs::OpenOptions::new().read(true).open(p)?;
let f = fs::OpenOptions::new().read(true).create(false).open(p)?;
f.lock_exclusive()?;

Ok(LockedFile(f))
Expand All @@ -90,15 +90,15 @@ impl LockedFile {
let f = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.create_new(true)
.open(p)?;
f.lock_exclusive()?;

Ok(LockedFile(f))
}

pub fn open_shared_read<P: AsRef<Path>>(p: P) -> io::Result<Self> {
let f = fs::OpenOptions::new().read(true).open(p)?;
let f = fs::OpenOptions::new().read(true).create(false).open(p)?;
f.lock_shared()?;

Ok(LockedFile(f))
Expand Down Expand Up @@ -236,6 +236,7 @@ where
let meta_path = ensure_ancestor_dirs_exist(parameter_cache_metadata_path(&id))?;
read_cached_metadata(&meta_path)
.or_else(|_| write_cached_metadata(&meta_path, Self::cache_meta(pub_params)))
.map_err(Into::into)
}

/// If the rng option argument is set, parameters will be
Expand Down Expand Up @@ -277,9 +278,16 @@ where
read_cached_params(&cache_path).or_else(|err| match err.downcast::<Error>() {
Ok(error @ Error::InvalidParameters(_)) => Err(error.into()),
_ => {
write_cached_params(&cache_path, generate()?).unwrap_or_else(|e| {
panic!("{}: failed to write generated parameters to cache", e)
});
// if the file already exists, another process is already trying to generate these.
if !cache_path.exists() {
match write_cached_params(&cache_path, generate()?) {
Ok(_) => {}
Err(e) if e.kind() == io::ErrorKind::AlreadyExists => {
// other thread just wrote it, do nothing
}
Err(e) => panic!("{}: failed to write generated parameters to cache", e),
}
}
Ok(read_cached_params(&cache_path)?)
}
})
Expand All @@ -305,12 +313,14 @@ where

// generate (or load) verifying key
let cache_path = ensure_ancestor_dirs_exist(parameter_cache_verifying_key_path(&id))?;
read_cached_verifying_key(&cache_path)
.or_else(|_| write_cached_verifying_key(&cache_path, generate()?))
match read_cached_verifying_key(&cache_path) {
vmx marked this conversation as resolved.
Show resolved Hide resolved
Ok(key) => Ok(key),
Err(_) => write_cached_verifying_key(&cache_path, generate()?).map_err(Into::into),
}
}
}

fn ensure_parent(path: &PathBuf) -> Result<()> {
fn ensure_parent(path: &PathBuf) -> io::Result<()> {
match path.parent() {
Some(dir) => {
create_dir_all(dir)?;
Expand Down Expand Up @@ -348,11 +358,15 @@ pub fn read_cached_params(cache_entry_path: &PathBuf) -> Result<groth16::MappedP
.is_none();
if not_yet_verified {
info!("generating consistency digest for parameters");
let hash = with_exclusive_read_lock(cache_entry_path, |mut file| {
let mut hasher = Blake2bParams::new().to_state();
io::copy(&mut file, &mut hasher).expect("copying file into hasher failed");
Ok(hasher.finalize())
})?;
let hash = with_exclusive_read_lock::<_, io::Error, _>(
cache_entry_path,
|mut file| {
let mut hasher = Blake2bParams::new().to_state();
io::copy(&mut file, &mut hasher)
.expect("copying file into hasher failed");
Ok(hasher.finalize())
},
)?;
info!("generated consistency digest for parameters");

// The hash in the parameters file is truncated to 256 bits.
Expand All @@ -377,16 +391,19 @@ pub fn read_cached_params(cache_entry_path: &PathBuf) -> Result<groth16::MappedP
}
}

with_exclusive_read_lock(cache_entry_path, |_| {
with_exclusive_read_lock::<_, io::Error, _>(cache_entry_path, |_file| {
let mapped_params =
Parameters::build_mapped_parameters(cache_entry_path.to_path_buf(), false)?;
info!("read parameters from cache {:?} ", cache_entry_path);

Ok(mapped_params)
})
.map_err(Into::into)
}

fn read_cached_verifying_key(cache_entry_path: &PathBuf) -> Result<groth16::VerifyingKey<Bls12>> {
fn read_cached_verifying_key(
cache_entry_path: &PathBuf,
) -> io::Result<groth16::VerifyingKey<Bls12>> {
info!(
"checking cache_path: {:?} for verifying key",
cache_entry_path
Expand All @@ -399,7 +416,7 @@ fn read_cached_verifying_key(cache_entry_path: &PathBuf) -> Result<groth16::Veri
})
}

fn read_cached_metadata(cache_entry_path: &PathBuf) -> Result<CacheEntryMetadata> {
fn read_cached_metadata(cache_entry_path: &PathBuf) -> io::Result<CacheEntryMetadata> {
info!("checking cache_path: {:?} for metadata", cache_entry_path);
with_exclusive_read_lock(cache_entry_path, |file| {
let value = serde_json::from_reader(file)?;
Expand All @@ -412,7 +429,7 @@ fn read_cached_metadata(cache_entry_path: &PathBuf) -> Result<CacheEntryMetadata
fn write_cached_metadata(
cache_entry_path: &PathBuf,
value: CacheEntryMetadata,
) -> Result<CacheEntryMetadata> {
) -> io::Result<CacheEntryMetadata> {
with_exclusive_lock(cache_entry_path, |file| {
serde_json::to_writer(file, &value)?;
info!("wrote metadata to cache {:?} ", cache_entry_path);
Expand All @@ -424,9 +441,10 @@ fn write_cached_metadata(
fn write_cached_verifying_key(
cache_entry_path: &PathBuf,
value: groth16::VerifyingKey<Bls12>,
) -> Result<groth16::VerifyingKey<Bls12>> {
with_exclusive_lock(cache_entry_path, |file| {
value.write(file)?;
) -> io::Result<groth16::VerifyingKey<Bls12>> {
with_exclusive_lock(cache_entry_path, |mut file| {
value.write(&mut file)?;
file.flush()?;
info!("wrote verifying key to cache {:?} ", cache_entry_path);

Ok(value)
Expand All @@ -436,34 +454,42 @@ fn write_cached_verifying_key(
fn write_cached_params(
cache_entry_path: &PathBuf,
value: groth16::Parameters<Bls12>,
) -> Result<groth16::Parameters<Bls12>> {
with_exclusive_lock(cache_entry_path, |file| {
value.write(file)?;
) -> io::Result<groth16::Parameters<Bls12>> {
with_exclusive_lock(cache_entry_path, |mut file| {
value.write(&mut file)?;
file.flush()?;
info!("wrote groth parameters to cache {:?} ", cache_entry_path);

Ok(value)
})
}

pub fn with_exclusive_lock<T>(
file_path: &PathBuf,
f: impl FnOnce(&mut LockedFile) -> Result<T>,
) -> Result<T> {
pub fn with_exclusive_lock<T, E, F>(file_path: &PathBuf, f: F) -> std::result::Result<T, E>
where
F: FnOnce(&mut LockedFile) -> std::result::Result<T, E>,
E: From<io::Error>,
{
with_open_file(file_path, LockedFile::open_exclusive, f)
}

pub fn with_exclusive_read_lock<T>(
file_path: &PathBuf,
f: impl FnOnce(&mut LockedFile) -> Result<T>,
) -> Result<T> {
pub fn with_exclusive_read_lock<T, E, F>(file_path: &PathBuf, f: F) -> std::result::Result<T, E>
where
F: FnOnce(&mut LockedFile) -> std::result::Result<T, E>,
E: From<io::Error>,
{
with_open_file(file_path, LockedFile::open_exclusive_read, f)
}

pub fn with_open_file<'a, T>(
pub fn with_open_file<'a, T, E, F, G>(
file_path: &'a PathBuf,
open_file: impl FnOnce(&'a PathBuf) -> io::Result<LockedFile>,
f: impl FnOnce(&mut LockedFile) -> Result<T>,
) -> Result<T> {
open_file: G,
f: F,
) -> std::result::Result<T, E>
where
F: FnOnce(&mut LockedFile) -> std::result::Result<T, E>,
G: FnOnce(&'a PathBuf) -> io::Result<LockedFile>,
E: From<io::Error>,
{
ensure_parent(&file_path)?;
f(&mut open_file(&file_path)?)
}
28 changes: 21 additions & 7 deletions storage-proofs/porep/src/stacked/vanilla/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,21 @@ impl ParentCache {
{
let path = cache_path(cache_entries, graph);
if path.exists() {
Self::open(len, cache_entries, graph, path)
Self::open(len, cache_entries, graph, &path)
} else {
Self::generate(len, cache_entries, graph, path)
match Self::generate(len, cache_entries, graph, &path) {
Ok(c) => Ok(c),
vmx marked this conversation as resolved.
Show resolved Hide resolved
Err(err) => {
match err.downcast::<std::io::Error>() {
Ok(error) if error.kind() == std::io::ErrorKind::AlreadyExists => {
// cache was written from another process, just read it
Self::open(len, cache_entries, graph, &path)
}
Ok(error) => Err(error.into()),
Err(error) => Err(error),
}
}
}
}
}

Expand All @@ -171,7 +183,7 @@ impl ParentCache {
len: u32,
cache_entries: u32,
graph: &StackedGraph<H, G>,
path: PathBuf,
path: &PathBuf,
) -> Result<Self>
where
H: Hasher,
Expand Down Expand Up @@ -247,8 +259,10 @@ impl ParentCache {
"[!!!] Parent cache digest mismatch detected. Regenerating {}",
path.display()
);
// delete invalid cache
std::fs::remove_file(path)?;
ensure!(
Self::generate(len, graph.size() as u32, graph, path.clone()).is_ok(),
Self::generate(len, graph.size() as u32, graph, path).is_ok(),
"Failed to generate parent cache"
);

Expand All @@ -260,7 +274,7 @@ impl ParentCache {

Ok(ParentCache {
cache: CacheData::open(0, len, &path)?,
path,
path: path.clone(),
num_cache_entries: cache_entries,
sector_size: graph.size() * NODE_SIZE,
digest: digest_hex,
Expand All @@ -272,7 +286,7 @@ impl ParentCache {
len: u32,
cache_entries: u32,
graph: &StackedGraph<H, G>,
path: PathBuf,
path: &PathBuf,
) -> Result<Self>
where
H: Hasher,
Expand Down Expand Up @@ -343,7 +357,7 @@ impl ParentCache {

Ok(ParentCache {
cache: CacheData::open(0, len, &path)?,
path,
path: path.clone(),
num_cache_entries: cache_entries,
sector_size,
digest: digest_hex,
Expand Down
Loading