diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 59e0c35974559..8a9a12386064f 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -70,7 +70,7 @@ impl DepNodeColor { } } -struct DepGraphData { +pub struct DepGraphData { /// The new encoding of the dependency graph, optimized for red/green /// tracking. The `current` field is the dependency graph of only the /// current compilation session: We don't merge the previous dep-graph into @@ -168,6 +168,11 @@ impl DepGraph { DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) } } + #[inline] + pub fn data(&self) -> Option<&DepGraphData> { + self.data.as_deref() + } + /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise. #[inline] pub fn is_fully_enabled(&self) -> bool { @@ -252,6 +257,38 @@ impl DepGraph { K::with_deps(TaskDepsRef::Forbid, op) } + #[inline(always)] + pub fn with_task, A: Debug, R>( + &self, + key: DepNode, + cx: Ctxt, + arg: A, + task: fn(Ctxt, A) -> R, + hash_result: Option, &R) -> Fingerprint>, + ) -> (R, DepNodeIndex) { + match self.data() { + Some(data) => data.with_task(key, cx, arg, task, hash_result), + None => (task(cx, arg), self.next_virtual_depnode_index()), + } + } + + pub fn with_anon_task, OP, R>( + &self, + cx: Tcx, + dep_kind: K, + op: OP, + ) -> (R, DepNodeIndex) + where + OP: FnOnce() -> R, + { + match self.data() { + Some(data) => data.with_anon_task(cx, dep_kind, op), + None => (op(), self.next_virtual_depnode_index()), + } + } +} + +impl DepGraphData { /// Starts a new dep-graph task. Dep-graph tasks are specified /// using a free function (`task`) and **not** a closure -- this /// is intentional because we want to exercise tight control over @@ -288,29 +325,6 @@ impl DepGraph { task: fn(Ctxt, A) -> R, hash_result: Option, &R) -> Fingerprint>, ) -> (R, DepNodeIndex) { - if self.is_fully_enabled() { - self.with_task_impl(key, cx, arg, task, hash_result) - } else { - // Incremental compilation is turned off. We just execute the task - // without tracking. We still provide a dep-node index that uniquely - // identifies the task so that we have a cheap way of referring to - // the query for self-profiling. - (task(cx, arg), self.next_virtual_depnode_index()) - } - } - - #[inline(always)] - fn with_task_impl, A: Debug, R>( - &self, - key: DepNode, - cx: Ctxt, - arg: A, - task: fn(Ctxt, A) -> R, - hash_result: Option, &R) -> Fingerprint>, - ) -> (R, DepNodeIndex) { - // This function is only called when the graph is enabled. - let data = self.data.as_ref().unwrap(); - // If the following assertion triggers, it can have two reasons: // 1. Something is wrong with DepNode creation, either here or // in `DepGraph::try_mark_green()`. @@ -351,9 +365,9 @@ impl DepGraph { let print_status = cfg!(debug_assertions) && dcx.sess().opts.unstable_opts.dep_tasks; // Intern the new `DepNode`. - let (dep_node_index, prev_and_color) = data.current.intern_node( + let (dep_node_index, prev_and_color) = self.current.intern_node( dcx.profiler(), - &data.previous, + &self.previous, key, edges, current_fingerprint, @@ -364,12 +378,12 @@ impl DepGraph { if let Some((prev_index, color)) = prev_and_color { debug_assert!( - data.colors.get(prev_index).is_none(), + self.colors.get(prev_index).is_none(), "DepGraph::with_task() - Duplicate DepNodeColor \ insertion for {key:?}" ); - data.colors.insert(prev_index, color); + self.colors.insert(prev_index, color); } (result, dep_node_index) @@ -388,57 +402,55 @@ impl DepGraph { { debug_assert!(!cx.is_eval_always(dep_kind)); - if let Some(ref data) = self.data { - let task_deps = Lock::new(TaskDeps::default()); - let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op); - let task_deps = task_deps.into_inner(); - let task_deps = task_deps.reads; - - let dep_node_index = match task_deps.len() { - 0 => { - // Because the dep-node id of anon nodes is computed from the sets of its - // dependencies we already know what the ID of this dependency-less node is - // going to be (i.e. equal to the precomputed - // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating - // a `StableHasher` and sending the node through interning. - DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE - } - 1 => { - // When there is only one dependency, don't bother creating a node. - task_deps[0] - } - _ => { - // The dep node indices are hashed here instead of hashing the dep nodes of the - // dependencies. These indices may refer to different nodes per session, but this isn't - // a problem here because we that ensure the final dep node hash is per session only by - // combining it with the per session random number `anon_id_seed`. This hash only need - // to map the dependencies to a single value on a per session basis. - let mut hasher = StableHasher::new(); - task_deps.hash(&mut hasher); - - let target_dep_node = DepNode { - kind: dep_kind, - // Fingerprint::combine() is faster than sending Fingerprint - // through the StableHasher (at least as long as StableHasher - // is so slow). - hash: data.current.anon_id_seed.combine(hasher.finish()).into(), - }; + let task_deps = Lock::new(TaskDeps::default()); + let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op); + let task_deps = task_deps.into_inner(); + let task_deps = task_deps.reads; + + let dep_node_index = match task_deps.len() { + 0 => { + // Because the dep-node id of anon nodes is computed from the sets of its + // dependencies we already know what the ID of this dependency-less node is + // going to be (i.e. equal to the precomputed + // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating + // a `StableHasher` and sending the node through interning. + DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE + } + 1 => { + // When there is only one dependency, don't bother creating a node. + task_deps[0] + } + _ => { + // The dep node indices are hashed here instead of hashing the dep nodes of the + // dependencies. These indices may refer to different nodes per session, but this isn't + // a problem here because we that ensure the final dep node hash is per session only by + // combining it with the per session random number `anon_id_seed`. This hash only need + // to map the dependencies to a single value on a per session basis. + let mut hasher = StableHasher::new(); + task_deps.hash(&mut hasher); + + let target_dep_node = DepNode { + kind: dep_kind, + // Fingerprint::combine() is faster than sending Fingerprint + // through the StableHasher (at least as long as StableHasher + // is so slow). + hash: self.current.anon_id_seed.combine(hasher.finish()).into(), + }; - data.current.intern_new_node( - cx.profiler(), - target_dep_node, - task_deps, - Fingerprint::ZERO, - ) - } - }; + self.current.intern_new_node( + cx.profiler(), + target_dep_node, + task_deps, + Fingerprint::ZERO, + ) + } + }; - (result, dep_node_index) - } else { - (op(), self.next_virtual_depnode_index()) - } + (result, dep_node_index) } +} +impl DepGraph { #[inline] pub fn read_index(&self, dep_node_index: DepNodeIndex) { if let Some(ref data) = self.data { @@ -519,9 +531,9 @@ impl DepGraph { // value to an existing node. // // For sanity, we still check that the loaded stable hash and the new one match. - if let Some(dep_node_index) = self.dep_node_index_of_opt(&node) { + if let Some(dep_node_index) = data.dep_node_index_of_opt(&node) { let _current_fingerprint = - crate::query::incremental_verify_ich(cx, result, &node, hash_result); + crate::query::incremental_verify_ich(cx, data, result, &node, hash_result); #[cfg(debug_assertions)] if hash_result.is_some() { @@ -577,32 +589,57 @@ impl DepGraph { self.next_virtual_depnode_index() } } +} +impl DepGraphData { #[inline] - pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex { - self.dep_node_index_of_opt(dep_node).unwrap() + pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { + if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { + self.current.prev_index_to_index.lock()[prev_index] + } else { + self.current + .new_node_to_index + .get_shard_by_value(dep_node) + .lock() + .get(dep_node) + .copied() + } } #[inline] - pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { - let data = self.data.as_ref().unwrap(); - let current = &data.current; + pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool { + self.dep_node_index_of_opt(dep_node).is_some() + } - if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { - current.prev_index_to_index.lock()[prev_index] + fn node_color(&self, dep_node: &DepNode) -> Option { + if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { + self.colors.get(prev_index) } else { - current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied() + // This is a node that did not exist in the previous compilation session. + None } } - #[inline] - pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool { - self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some() + /// Returns true if the given node has been marked as green during the + /// current compilation session. Used in various assertions + pub fn is_green(&self, dep_node: &DepNode) -> bool { + self.node_color(dep_node).map_or(false, |c| c.is_green()) } #[inline] pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option { - self.data.as_ref().unwrap().previous.fingerprint_of(dep_node) + self.previous.fingerprint_of(dep_node) + } + + pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) { + self.debug_loaded_from_disk.lock().insert(dep_node); + } +} + +impl DepGraph { + #[inline] + pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool { + self.data.as_ref().and_then(|data| data.dep_node_index_of_opt(dep_node)).is_some() } /// Checks whether a previous work product exists for `v` and, if @@ -617,10 +654,6 @@ impl DepGraph { &self.data.as_ref().unwrap().previous_work_products } - pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) { - self.data.as_ref().unwrap().debug_loaded_from_disk.lock().insert(dep_node); - } - pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool { self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node) } @@ -645,17 +678,22 @@ impl DepGraph { fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(ref data) = self.data { - if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { - return data.colors.get(prev_index); - } else { - // This is a node that did not exist in the previous compilation session. - return None; - } + return data.node_color(dep_node); } None } + pub fn try_mark_green>( + &self, + qcx: Qcx, + dep_node: &DepNode, + ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { + self.data().and_then(|data| data.try_mark_green(qcx, dep_node)) + } +} + +impl DepGraphData { /// Try to mark a node index for the node dep_node. /// /// A node will have an index, when it's already been marked green, or when we can mark it @@ -668,26 +706,23 @@ impl DepGraph { ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); - // Return None if the dep graph is disabled - let data = self.data.as_ref()?; - // Return None if the dep node didn't exist in the previous session - let prev_index = data.previous.node_to_index_opt(dep_node)?; + let prev_index = self.previous.node_to_index_opt(dep_node)?; - match data.colors.get(prev_index) { + match self.colors.get(prev_index) { Some(DepNodeColor::Green(dep_node_index)) => return Some((prev_index, dep_node_index)), Some(DepNodeColor::Red) => return None, None => {} } - let backtrace = backtrace_printer(qcx.dep_context().sess(), data, prev_index); + let backtrace = backtrace_printer(qcx.dep_context().sess(), self, prev_index); // This DepNode and the corresponding query invocation existed // in the previous compilation session too, so we can try to // mark it as green by recursively marking all of its // dependencies green. let ret = self - .try_mark_previous_green(qcx, data, prev_index, &dep_node) + .try_mark_previous_green(qcx, prev_index, &dep_node) .map(|dep_node_index| (prev_index, dep_node_index)); // We succeeded, no backtrace. @@ -695,16 +730,15 @@ impl DepGraph { return ret; } - #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")] + #[instrument(skip(self, qcx, parent_dep_node_index), level = "debug")] fn try_mark_parent_green>( &self, qcx: Qcx, - data: &DepGraphData, parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option<()> { - let dep_dep_node_color = data.colors.get(parent_dep_node_index); - let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index); + let dep_dep_node_color = self.colors.get(parent_dep_node_index); + let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green(_)) => { @@ -733,8 +767,7 @@ impl DepGraph { dep_dep_node, dep_dep_node.hash, ); - let node_index = - self.try_mark_previous_green(qcx, data, parent_dep_node_index, dep_dep_node); + let node_index = self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node); if node_index.is_some() { debug!("managed to MARK dependency {dep_dep_node:?} as green",); @@ -750,7 +783,7 @@ impl DepGraph { return None; } - let dep_dep_node_color = data.colors.get(parent_dep_node_index); + let dep_dep_node_color = self.colors.get(parent_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green(_)) => { @@ -783,30 +816,29 @@ impl DepGraph { } /// Try to mark a dep-node which existed in the previous compilation session as green. - #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")] + #[instrument(skip(self, qcx, prev_dep_node_index), level = "debug")] fn try_mark_previous_green>( &self, qcx: Qcx, - data: &DepGraphData, prev_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option { #[cfg(not(parallel_compiler))] { debug_assert!(!self.dep_node_exists(dep_node)); - debug_assert!(data.colors.get(prev_dep_node_index).is_none()); + debug_assert!(self.colors.get(prev_dep_node_index).is_none()); } // We never try to mark eval_always nodes as green debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); - debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); + debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node); - let prev_deps = data.previous.edge_targets_from(prev_dep_node_index); + let prev_deps = self.previous.edge_targets_from(prev_dep_node_index); for &dep_dep_node_index in prev_deps { - let backtrace = backtrace_printer(qcx.dep_context().sess(), data, dep_dep_node_index); - let success = self.try_mark_parent_green(qcx, data, dep_dep_node_index, dep_node); + let backtrace = backtrace_printer(qcx.dep_context().sess(), self, dep_dep_node_index); + let success = self.try_mark_parent_green(qcx, dep_dep_node_index, dep_node); backtrace.disable(); success?; } @@ -819,9 +851,9 @@ impl DepGraph { // We allocating an entry for the node in the current dependency graph and // adding all the appropriate edges imported from the previous graph - let dep_node_index = data.current.promote_node_and_deps_to_current( + let dep_node_index = self.current.promote_node_and_deps_to_current( qcx.dep_context().profiler(), - &data.previous, + &self.previous, prev_dep_node_index, ); @@ -833,20 +865,20 @@ impl DepGraph { #[cfg(not(parallel_compiler))] debug_assert!( - data.colors.get(prev_dep_node_index).is_none(), + self.colors.get(prev_dep_node_index).is_none(), "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ insertion for {dep_node:?}" ); if !side_effects.is_empty() { - self.with_query_deserialization(|| { - self.emit_side_effects(qcx, data, dep_node_index, side_effects) + qcx.dep_context().dep_graph().with_query_deserialization(|| { + self.emit_side_effects(qcx, dep_node_index, side_effects) }); } // ... and finally storing a "Green" entry in the color map. // Multiple threads can all write the same color here - data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); + self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); debug!("successfully marked {dep_node:?} as green"); Some(dep_node_index) @@ -859,11 +891,10 @@ impl DepGraph { fn emit_side_effects>( &self, qcx: Qcx, - data: &DepGraphData, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects, ) { - let mut processed = data.processed_side_effects.lock(); + let mut processed = self.processed_side_effects.lock(); if processed.insert(dep_node_index) { // We were the first to insert the node in the set so this thread @@ -879,7 +910,9 @@ impl DepGraph { } } } +} +impl DepGraph { /// Returns true if the given node has been marked as red during the /// current compilation session. Used in various assertions pub fn is_red(&self, dep_node: &DepNode) -> bool { diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index ba83b77563165..5a7b9ae2ab436 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -6,7 +6,8 @@ mod serialized; pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId}; pub use graph::{ - hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, WorkProduct, + hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, + WorkProduct, }; pub use query::DepGraphQuery; pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex}; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 005fcd8c4cc9d..7b9e0c3a0a677 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,8 +2,8 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::HasDepContext; use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; +use crate::dep_graph::{DepGraphData, HasDepContext}; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; @@ -411,32 +411,34 @@ where Qcx: QueryContext, { let dep_graph = qcx.dep_context().dep_graph(); + let dep_graph_data = match dep_graph.data() { + // Fast path for when incr. comp. is off. + None => { + // Fingerprint the key, just to assert that it doesn't + // have anything we don't consider hashable + if cfg!(debug_assertions) { + let _ = key.to_fingerprint(*qcx.dep_context()); + } - // Fast path for when incr. comp. is off. - if !dep_graph.is_fully_enabled() { - // Fingerprint the key, just to assert that it doesn't - // have anything we don't consider hashable - if cfg!(debug_assertions) { - let _ = key.to_fingerprint(*qcx.dep_context()); - } - - let prof_timer = qcx.dep_context().profiler().query_provider(); - let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key)); - let dep_node_index = dep_graph.next_virtual_depnode_index(); - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + let prof_timer = qcx.dep_context().profiler().query_provider(); + let result = + qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key)); + let dep_node_index = dep_graph.next_virtual_depnode_index(); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + // Similarly, fingerprint the result to assert that + // it doesn't have anything not considered hashable. + if cfg!(debug_assertions) && let Some(hash_result) = query.hash_result() + { + qcx.dep_context().with_stable_hashing_context(|mut hcx| { + hash_result(&mut hcx, &result); + }); + } - // Similarly, fingerprint the result to assert that - // it doesn't have anything not considered hashable. - if cfg!(debug_assertions) - && let Some(hash_result) = query.hash_result() - { - qcx.dep_context().with_stable_hashing_context(|mut hcx| { - hash_result(&mut hcx, &result); - }); + return (result, dep_node_index); } - - return (result, dep_node_index); - } + Some(data) => data, + }; if !query.anon() && !query.eval_always() { // `to_dep_node` is expensive for some `DepKind`s. @@ -446,7 +448,7 @@ where // The diagnostics for this query will be promoted to the current session during // `try_mark_green()`, so we can ignore them here. if let Some(ret) = qcx.start_query(job_id, false, None, || { - try_load_from_disk_and_cache_in_memory(query, qcx, &key, &dep_node) + try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node) }) { return ret; } @@ -458,7 +460,7 @@ where let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || { if query.anon() { - return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind(), || { + return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || { query.compute(qcx, key) }); } @@ -467,7 +469,7 @@ where let dep_node = dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key)); - dep_graph.with_task( + dep_graph_data.with_task( dep_node, (qcx, query), key, @@ -495,6 +497,7 @@ where #[inline(always)] fn try_load_from_disk_and_cache_in_memory( query: Q, + dep_graph_data: &DepGraphData, qcx: Qcx, key: &Q::Key, dep_node: &DepNode, @@ -506,10 +509,9 @@ where // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. - let dep_graph = qcx.dep_context().dep_graph(); - let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(qcx, &dep_node)?; + let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?; - debug_assert!(dep_graph.is_green(dep_node)); + debug_assert!(dep_graph_data.is_green(dep_node)); // First we try to load the result from the on-disk cache. // Some things are never cached on disk. @@ -519,8 +521,10 @@ where // The call to `with_query_deserialization` enforces that no new `DepNodes` // are created during deserialization. See the docs of that method for more // details. - let result = - dep_graph.with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index)); + let result = qcx + .dep_context() + .dep_graph() + .with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -528,14 +532,11 @@ where if std::intrinsics::unlikely( qcx.dep_context().sess().opts.unstable_opts.query_dep_graph, ) { - dep_graph.mark_debug_loaded_from_disk(*dep_node) + dep_graph_data.mark_debug_loaded_from_disk(*dep_node) } - let prev_fingerprint = qcx - .dep_context() - .dep_graph() - .prev_fingerprint_of(dep_node) - .unwrap_or(Fingerprint::ZERO); + let prev_fingerprint = + dep_graph_data.prev_fingerprint_of(dep_node).unwrap_or(Fingerprint::ZERO); // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. // @@ -547,7 +548,13 @@ where if std::intrinsics::unlikely( try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, ) { - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result()); + incremental_verify_ich( + *qcx.dep_context(), + dep_graph_data, + &result, + dep_node, + query.hash_result(), + ); } return Some((result, dep_node_index)); @@ -566,7 +573,7 @@ where let prof_timer = qcx.dep_context().profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = dep_graph.with_ignore(|| query.compute(qcx, *key)); + let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -579,15 +586,22 @@ where // // See issue #82920 for an example of a miscompilation that would get turned into // an ICE by this check - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result()); + incremental_verify_ich( + *qcx.dep_context(), + dep_graph_data, + &result, + dep_node, + query.hash_result(), + ); Some((result, dep_node_index)) } #[inline] -#[instrument(skip(tcx, result, hash_result), level = "debug")] +#[instrument(skip(tcx, dep_graph_data, result, hash_result), level = "debug")] pub(crate) fn incremental_verify_ich( tcx: Tcx, + dep_graph_data: &DepGraphData, result: &V, dep_node: &DepNode, hash_result: Option, &V) -> Fingerprint>, @@ -596,7 +610,7 @@ where Tcx: DepContext, { assert!( - tcx.dep_graph().is_green(dep_node), + dep_graph_data.is_green(dep_node), "fingerprint for green query instance not loaded from cache: {dep_node:?}", ); @@ -604,7 +618,7 @@ where tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result)) }); - let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node); + let old_hash = dep_graph_data.prev_fingerprint_of(dep_node); if Some(new_hash) != old_hash { incremental_verify_ich_failed(