]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_middle/src/dep_graph/mod.rs
New upstream version 1.51.0+dfsg1
[rustc.git] / compiler / rustc_middle / src / dep_graph / mod.rs
CommitLineData
ba9703b0 1use crate::ich::StableHashingContext;
ba9703b0
XL
2use crate::ty::{self, TyCtxt};
3use rustc_data_structures::profiling::SelfProfilerRef;
4use rustc_data_structures::sync::Lock;
5use rustc_data_structures::thin_vec::ThinVec;
6use rustc_errors::Diagnostic;
f9f354fc 7use rustc_hir::def_id::LocalDefId;
ba9703b0
XL
8
9mod dep_node;
10
ba9703b0
XL
11pub use rustc_query_system::dep_graph::{
12 debug, hash_result, DepContext, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex,
f9f354fc 13 WorkProduct, WorkProductId,
ba9703b0
XL
14};
15
5869c6ff
XL
16crate use dep_node::make_compile_codegen_unit;
17pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt};
ba9703b0
XL
18
19pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
20pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
21pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
22pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepKind>;
23pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
24
25impl rustc_query_system::dep_graph::DepKind for DepKind {
26 const NULL: Self = DepKind::Null;
27
5869c6ff
XL
28 #[inline(always)]
29 fn can_reconstruct_query_key(&self) -> bool {
30 DepKind::can_reconstruct_query_key(self)
31 }
32
33 #[inline(always)]
ba9703b0 34 fn is_eval_always(&self) -> bool {
5869c6ff 35 self.is_eval_always
ba9703b0
XL
36 }
37
5869c6ff 38 #[inline(always)]
ba9703b0 39 fn has_params(&self) -> bool {
5869c6ff 40 self.has_params
ba9703b0
XL
41 }
42
43 fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
44 write!(f, "{:?}", node.kind)?;
45
5869c6ff 46 if !node.kind.has_params && !node.kind.is_anon {
ba9703b0
XL
47 return Ok(());
48 }
49
50 write!(f, "(")?;
51
52 ty::tls::with_opt(|opt_tcx| {
53 if let Some(tcx) = opt_tcx {
54 if let Some(def_id) = node.extract_def_id(tcx) {
55 write!(f, "{}", tcx.def_path_debug_str(def_id))?;
56 } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) {
57 write!(f, "{}", s)?;
58 } else {
59 write!(f, "{}", node.hash)?;
60 }
61 } else {
62 write!(f, "{}", node.hash)?;
63 }
64 Ok(())
65 })?;
66
67 write!(f, ")")
68 }
69
70 fn with_deps<OP, R>(task_deps: Option<&Lock<TaskDeps>>, op: OP) -> R
71 where
72 OP: FnOnce() -> R,
73 {
74 ty::tls::with_context(|icx| {
75 let icx = ty::tls::ImplicitCtxt { task_deps, ..icx.clone() };
76
77 ty::tls::enter_context(&icx, |_| op())
78 })
79 }
80
f9f354fc 81 fn read_deps<OP>(op: OP)
ba9703b0 82 where
f9f354fc 83 OP: for<'a> FnOnce(Option<&'a Lock<TaskDeps>>),
ba9703b0
XL
84 {
85 ty::tls::with_context_opt(|icx| {
86 let icx = if let Some(icx) = icx { icx } else { return };
87 op(icx.task_deps)
88 })
89 }
ba9703b0
XL
90}
91
92impl<'tcx> DepContext for TyCtxt<'tcx> {
93 type DepKind = DepKind;
94 type StableHashingContext = StableHashingContext<'tcx>;
95
fc512014
XL
96 fn register_reused_dep_node(&self, dep_node: &DepNode) {
97 if let Some(cache) = self.queries.on_disk_cache.as_ref() {
98 cache.register_reused_dep_node(*self, dep_node)
99 }
100 }
101
ba9703b0
XL
102 fn create_stable_hashing_context(&self) -> Self::StableHashingContext {
103 TyCtxt::create_stable_hashing_context(*self)
104 }
105
106 fn debug_dep_tasks(&self) -> bool {
107 self.sess.opts.debugging_opts.dep_tasks
108 }
f9f354fc
XL
109 fn debug_dep_node(&self) -> bool {
110 self.sess.opts.debugging_opts.incremental_info
111 || self.sess.opts.debugging_opts.query_dep_graph
112 }
ba9703b0
XL
113
114 fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
115 // FIXME: This match is just a workaround for incremental bugs and should
116 // be removed. https://github.com/rust-lang/rust/issues/62649 is one such
117 // bug that must be fixed before removing this.
118 match dep_node.kind {
5869c6ff 119 DepKind::hir_owner | DepKind::hir_owner_nodes => {
ba9703b0 120 if let Some(def_id) = dep_node.extract_def_id(*self) {
5869c6ff 121 if !def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
ba9703b0
XL
122 // This `DefPath` does not have a
123 // corresponding `DepNode` (e.g. a
124 // struct field), and the ` DefPath`
125 // collided with the `DefPath` of a
126 // proper item that existed in the
127 // previous compilation session.
128 //
129 // Since the given `DefPath` does not
130 // denote the item that previously
131 // existed, we just fail to mark green.
132 return false;
133 }
134 } else {
135 // If the node does not exist anymore, we
136 // just fail to mark green.
137 return false;
138 }
139 }
140 _ => {
141 // For other kinds of nodes it's OK to be
142 // forced.
143 }
144 }
145
146 debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
5869c6ff
XL
147
148 // We must avoid ever having to call `force_from_dep_node()` for a
149 // `DepNode::codegen_unit`:
150 // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
151 // would always end up having to evaluate the first caller of the
152 // `codegen_unit` query that *is* reconstructible. This might very well be
153 // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
154 // to re-trigger calling the `codegen_unit` query with the right key. At
155 // that point we would already have re-done all the work we are trying to
156 // avoid doing in the first place.
157 // The solution is simple: Just explicitly call the `codegen_unit` query for
158 // each CGU, right after partitioning. This way `try_mark_green` will always
159 // hit the cache instead of having to go through `force_from_dep_node`.
160 // This assertion makes sure, we actually keep applying the solution above.
161 debug_assert!(
162 dep_node.kind != DepKind::codegen_unit,
163 "calling force_from_dep_node() on DepKind::codegen_unit"
164 );
165
166 (dep_node.kind.force_from_dep_node)(*self, dep_node)
ba9703b0
XL
167 }
168
169 fn has_errors_or_delayed_span_bugs(&self) -> bool {
170 self.sess.has_errors_or_delayed_span_bugs()
171 }
172
173 fn diagnostic(&self) -> &rustc_errors::Handler {
174 self.sess.diagnostic()
175 }
176
177 // Interactions with on_disk_cache
178 fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
5869c6ff 179 (dep_node.kind.try_load_from_on_disk_cache)(*self, dep_node)
ba9703b0
XL
180 }
181
182 fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
fc512014
XL
183 self.queries
184 .on_disk_cache
185 .as_ref()
186 .map(|c| c.load_diagnostics(*self, prev_dep_node_index))
187 .unwrap_or_default()
ba9703b0
XL
188 }
189
190 fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
fc512014
XL
191 if let Some(c) = self.queries.on_disk_cache.as_ref() {
192 c.store_diagnostics(dep_node_index, diagnostics)
193 }
ba9703b0
XL
194 }
195
196 fn store_diagnostics_for_anon_node(
197 &self,
198 dep_node_index: DepNodeIndex,
199 diagnostics: ThinVec<Diagnostic>,
200 ) {
fc512014
XL
201 if let Some(c) = self.queries.on_disk_cache.as_ref() {
202 c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
203 }
ba9703b0
XL
204 }
205
206 fn profiler(&self) -> &SelfProfilerRef {
207 &self.prof
208 }
209}
210
f9f354fc 211fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
3dfed10e 212 let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
f9f354fc 213 def_id == hir_id.owner
ba9703b0 214}