Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimize hash map operations in the query system #115747

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions compiler/rustc_data_structures/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ extern crate tracing;
#[macro_use]
extern crate rustc_macros;

#[cfg(parallel_compiler)]
extern crate hashbrown;

use std::fmt;

pub use rustc_index::static_assert_size;
Expand Down
1 change: 1 addition & 0 deletions compiler/rustc_data_structures/src/marker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ cfg_match! {
[std::sync::mpsc::Sender<T> where T: DynSend]
[std::sync::Arc<T> where T: ?Sized + DynSync + DynSend]
[std::sync::LazyLock<T, F> where T: DynSend, F: DynSend]
[hashbrown::HashTable<T> where T: DynSend]
[std::collections::HashSet<K, S> where K: DynSend, S: DynSend]
[std::collections::HashMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend]
[std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend]
Expand Down
3 changes: 3 additions & 0 deletions compiler/rustc_query_system/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ extern crate rustc_data_structures;
#[macro_use]
extern crate rustc_macros;

#[allow(unused_extern_crates)]
extern crate hashbrown;

pub mod cache;
pub mod dep_graph;
mod error;
Expand Down
58 changes: 36 additions & 22 deletions compiler/rustc_query_system/src/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,30 @@ use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobI
use crate::query::SerializedDepNodeIndex;
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
use crate::HandleCycleError;
use hashbrown::hash_table::Entry;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sharded::{self, Sharded};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lock;
#[cfg(parallel_compiler)]
use rustc_data_structures::{outline, sync};
use rustc_errors::{Diag, FatalError, StashKey};
use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::hash::Hash;
use std::mem;
use thin_vec::ThinVec;

use super::QueryConfig;

#[inline]
fn equivalent_key<K: Eq, V>(k: &K) -> impl Fn(&(K, V)) -> bool + '_ {
move |x| x.0 == *k
}

pub struct QueryState<K> {
active: Sharded<FxHashMap<K, QueryResult>>,
active: Sharded<hashbrown::HashTable<(K, QueryResult)>>,
}

/// Indicates the state of a query for a given key in a query map.
Expand Down Expand Up @@ -164,7 +168,7 @@ where
{
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
fn complete<C>(self, cache: &C, key_hash: u64, result: C::Value, dep_node_index: DepNodeIndex)
where
C: QueryCache<Key = K>,
{
Expand All @@ -179,8 +183,11 @@ where
cache.complete(key, result, dep_node_index);

let job = {
let mut lock = state.active.lock_shard_by_value(&key);
lock.remove(&key).unwrap().expect_job()
let mut shard = state.active.lock_shard_by_hash(key_hash);
match shard.find_entry(key_hash, equivalent_key(&key)) {
Err(_) => panic!(),
Ok(occupied) => occupied.remove().0.1.expect_job(),
}
};

job.signal_complete();
Expand All @@ -197,11 +204,16 @@ where
// Poison the query so jobs waiting on it panic.
let state = self.state;
let job = {
let mut shard = state.active.lock_shard_by_value(&self.key);
let job = shard.remove(&self.key).unwrap().expect_job();

shard.insert(self.key, QueryResult::Poisoned);
job
let key_hash = sharded::make_hash(&self.key);
let mut shard = state.active.lock_shard_by_hash(key_hash);
match shard.find_entry(key_hash, equivalent_key(&self.key)) {
Err(_) => panic!(),
Ok(occupied) => {
let ((key, value), vacant) = occupied.remove();
vacant.insert((key, QueryResult::Poisoned));
value.expect_job()
}
}
};
// Also signal the completion of the job, so waiters
// will continue execution.
Expand Down Expand Up @@ -282,11 +294,11 @@ where
outline(|| {
// We didn't find the query result in the query cache. Check if it was
// poisoned due to a panic instead.
let lock = query.query_state(qcx).active.get_shard_by_value(&key).lock();

match lock.get(&key) {
let key_hash = sharded::make_hash(&key);
let shard = query.query_state(qcx).active.lock_shard_by_hash(key_hash);
match shard.find(key_hash, equivalent_key(&key)) {
// The query we waited on panicked. Continue unwinding here.
Some(QueryResult::Poisoned) => FatalError.raise(),
Some((_, QueryResult::Poisoned)) => FatalError.raise(),
_ => panic!(
"query '{}' result must be in the cache or the query must be poisoned after a wait",
query.name()
Expand Down Expand Up @@ -317,7 +329,8 @@ where
Qcx: QueryContext,
{
let state = query.query_state(qcx);
let mut state_lock = state.active.lock_shard_by_value(&key);
let key_hash = sharded::make_hash(&key);
let mut state_lock = state.active.lock_shard_by_hash(key_hash);

// For the parallel compiler we need to check both the query cache and query state structures
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
Expand All @@ -334,21 +347,21 @@ where

let current_job_id = qcx.current_query_job();

match state_lock.entry(key) {
match state_lock.entry(key_hash, equivalent_key(&key), |(k, _)| sharded::make_hash(k)) {
Entry::Vacant(entry) => {
// Nothing has computed or is computing the query, so we start a new job and insert it in the
// state map.
let id = qcx.next_job_id();
let job = QueryJob::new(id, span, current_job_id);
entry.insert(QueryResult::Started(job));
entry.insert((key, QueryResult::Started(job)));

// Drop the lock before we start executing the query
drop(state_lock);

execute_job::<_, _, INCR>(query, qcx, state, key, id, dep_node)
execute_job::<_, _, INCR>(query, qcx, state, key, key_hash, id, dep_node)
}
Entry::Occupied(mut entry) => {
match entry.get_mut() {
match &mut entry.get_mut().1 {
QueryResult::Started(job) => {
#[cfg(parallel_compiler)]
if sync::is_dyn_thread_safe() {
Expand Down Expand Up @@ -380,6 +393,7 @@ fn execute_job<Q, Qcx, const INCR: bool>(
qcx: Qcx,
state: &QueryState<Q::Key>,
key: Q::Key,
key_hash: u64,
id: QueryJobId,
dep_node: Option<DepNode>,
) -> (Q::Value, Option<DepNodeIndex>)
Expand Down Expand Up @@ -440,7 +454,7 @@ where
}
}
}
job_owner.complete(cache, result, dep_node_index);
job_owner.complete(cache, key_hash, result, dep_node_index);

(result, Some(dep_node_index))
}
Expand Down
Loading