diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 93b4032c31089..253c96de85bbc 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -46,6 +46,9 @@ extern crate tracing; #[macro_use] extern crate rustc_macros; +#[cfg(parallel_compiler)] +extern crate hashbrown; + use std::fmt; pub use rustc_index::static_assert_size; diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index 266e54604a6b4..48b534a31f63f 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -85,6 +85,7 @@ cfg_match! { [std::sync::mpsc::Sender where T: DynSend] [std::sync::Arc where T: ?Sized + DynSync + DynSend] [std::sync::LazyLock where T: DynSend, F: DynSend] + [hashbrown::HashTable where T: DynSend] [std::collections::HashSet where K: DynSend, S: DynSend] [std::collections::HashMap where K: DynSend, V: DynSend, S: DynSend] [std::collections::BTreeMap where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend] diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs index 9b66b9a48d905..e31feedb17d11 100644 --- a/compiler/rustc_query_system/src/lib.rs +++ b/compiler/rustc_query_system/src/lib.rs @@ -14,6 +14,9 @@ extern crate rustc_data_structures; #[macro_use] extern crate rustc_macros; +#[allow(unused_extern_crates)] +extern crate hashbrown; + pub mod cache; pub mod dep_graph; mod error; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 3bb2cc5634fe8..ae9f3f577992b 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -12,9 +12,9 @@ use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobI use crate::query::SerializedDepNodeIndex; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use crate::HandleCycleError; +use hashbrown::hash_table::Entry; use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::sharded::Sharded; +use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lock; #[cfg(parallel_compiler)] @@ -22,7 +22,6 @@ use rustc_data_structures::{outline, sync}; use rustc_errors::{DiagnosticBuilder, FatalError, StashKey}; use rustc_span::{Span, DUMMY_SP}; use std::cell::Cell; -use std::collections::hash_map::Entry; use std::fmt::Debug; use std::hash::Hash; use std::mem; @@ -30,8 +29,13 @@ use thin_vec::ThinVec; use super::QueryConfig; +#[inline] +fn equivalent_key(k: &K) -> impl Fn(&(K, V)) -> bool + '_ { + move |x| x.0 == *k +} + pub struct QueryState { - active: Sharded>, + active: Sharded>, } /// Indicates the state of a query for a given key in a query map. @@ -165,7 +169,7 @@ where { /// Completes the query by updating the query cache with the `result`, /// signals the waiter and forgets the JobOwner, so it won't poison the query - fn complete(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) + fn complete(self, cache: &C, key_hash: u64, result: C::Value, dep_node_index: DepNodeIndex) where C: QueryCache, { @@ -180,8 +184,11 @@ where cache.complete(key, result, dep_node_index); let job = { - let mut lock = state.active.lock_shard_by_value(&key); - lock.remove(&key).unwrap().expect_job() + let mut shard = state.active.lock_shard_by_hash(key_hash); + match shard.find_entry(key_hash, equivalent_key(&key)) { + Err(_) => panic!(), + Ok(occupied) => occupied.remove().0.1.expect_job(), + } }; job.signal_complete(); @@ -198,11 +205,16 @@ where // Poison the query so jobs waiting on it panic. let state = self.state; let job = { - let mut shard = state.active.lock_shard_by_value(&self.key); - let job = shard.remove(&self.key).unwrap().expect_job(); - - shard.insert(self.key, QueryResult::Poisoned); - job + let key_hash = sharded::make_hash(&self.key); + let mut shard = state.active.lock_shard_by_hash(key_hash); + match shard.find_entry(key_hash, equivalent_key(&self.key)) { + Err(_) => panic!(), + Ok(occupied) => { + let ((key, value), vacant) = occupied.remove(); + vacant.insert((key, QueryResult::Poisoned)); + value.expect_job() + } + } }; // Also signal the completion of the job, so waiters // will continue execution. @@ -283,12 +295,11 @@ where outline(|| { // We didn't find the query result in the query cache. Check if it was // poisoned due to a panic instead. - let lock = query.query_state(qcx).active.get_shard_by_value(&key).lock(); - - match lock.get(&key) { - Some(QueryResult::Poisoned) => { - panic!("query '{}' not cached due to poisoning", query.name()) - } + let key_hash = sharded::make_hash(&key); + let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash); + match shard.find(key_hash, equivalent_key(&key)) { + // The query we waited on panicked. Continue unwinding here. + Some((_, QueryResult::Poisoned)) => FatalError.raise(), _ => panic!( "query '{}' result must be in the cache or the query must be poisoned after a wait", query.name() @@ -319,7 +330,8 @@ where Qcx: QueryContext, { let state = query.query_state(qcx); - let mut state_lock = state.active.lock_shard_by_value(&key); + let key_hash = sharded::make_hash(&key); + let mut state_lock = state.active.lock_shard_by_hash(key_hash); // For the parallel compiler we need to check both the query cache and query state structures // while holding the state lock to ensure that 1) the query has not yet completed and 2) the @@ -336,21 +348,21 @@ where let current_job_id = qcx.current_query_job(); - match state_lock.entry(key) { + match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) { Entry::Vacant(entry) => { // Nothing has computed or is computing the query, so we start a new job and insert it in the // state map. let id = qcx.next_job_id(); let job = QueryJob::new(id, span, current_job_id); - entry.insert(QueryResult::Started(job)); + entry.insert((key, QueryResult::Started(job))); // Drop the lock before we start executing the query drop(state_lock); - execute_job::<_, _, INCR>(query, qcx, state, key, id, dep_node) + execute_job::<_, _, INCR>(query, qcx, state, key, key_hash, id, dep_node) } Entry::Occupied(mut entry) => { - match entry.get_mut() { + match &mut entry.get_mut().1 { QueryResult::Started(job) => { #[cfg(parallel_compiler)] if sync::is_dyn_thread_safe() { @@ -382,6 +394,7 @@ fn execute_job( qcx: Qcx, state: &QueryState, key: Q::Key, + key_hash: u64, id: QueryJobId, dep_node: Option, ) -> (Q::Value, Option) @@ -442,7 +455,7 @@ where } } } - job_owner.complete(cache, result, dep_node_index); + job_owner.complete(cache, key_hash, result, dep_node_index); (result, Some(dep_node_index)) }