]>
Commit | Line | Data |
---|---|---|
29967ef6 XL |
1 | use super::Error; |
2 | ||
5099ac24 | 3 | use itertools::Itertools; |
29967ef6 XL |
4 | use rustc_data_structures::fx::FxHashMap; |
5 | use rustc_data_structures::graph::dominators::{self, Dominators}; | |
6 | use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode}; | |
7 | use rustc_index::bit_set::BitSet; | |
353b0b11 | 8 | use rustc_index::vec::{IndexSlice, IndexVec}; |
29967ef6 XL |
9 | use rustc_middle::mir::coverage::*; |
10 | use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind}; | |
11 | ||
12 | use std::ops::{Index, IndexMut}; | |
13 | ||
14 | const ID_SEPARATOR: &str = ","; | |
15 | ||
16 | /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s | |
17 | /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a | |
18 | /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional | |
19 | /// set of additional counters--if needed--to count incoming edges, if there are more than one. | |
20 | /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.) | |
fc512014 XL |
21 | #[derive(Debug)] |
22 | pub(super) struct CoverageGraph { | |
29967ef6 XL |
23 | bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, |
24 | bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
25 | pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, | |
26 | pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, | |
27 | dominators: Option<Dominators<BasicCoverageBlock>>, | |
28 | } | |
29 | ||
30 | impl CoverageGraph { | |
a2a8927a | 31 | pub fn from_mir(mir_body: &mir::Body<'_>) -> Self { |
29967ef6 XL |
32 | let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body); |
33 | ||
34 | // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock | |
35 | // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the | |
fc512014 XL |
36 | // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a |
37 | // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so | |
38 | // de-duplication is required. This is done without reordering the successors. | |
29967ef6 | 39 | |
353b0b11 | 40 | let mut seen = IndexVec::from_elem(false, &bcbs); |
29967ef6 XL |
41 | let successors = IndexVec::from_fn_n( |
42 | |bcb| { | |
fc512014 XL |
43 | for b in seen.iter_mut() { |
44 | *b = false; | |
45 | } | |
29967ef6 | 46 | let bcb_data = &bcbs[bcb]; |
fc512014 XL |
47 | let mut bcb_successors = Vec::new(); |
48 | for successor in | |
29967ef6 | 49 | bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind) |
5e7ed085 | 50 | .filter_map(|successor_bb| bb_to_bcb[successor_bb]) |
fc512014 XL |
51 | { |
52 | if !seen[successor] { | |
53 | seen[successor] = true; | |
54 | bcb_successors.push(successor); | |
55 | } | |
56 | } | |
29967ef6 XL |
57 | bcb_successors |
58 | }, | |
59 | bcbs.len(), | |
60 | ); | |
61 | ||
353b0b11 | 62 | let mut predecessors = IndexVec::from_elem(Vec::new(), &bcbs); |
29967ef6 XL |
63 | for (bcb, bcb_successors) in successors.iter_enumerated() { |
64 | for &successor in bcb_successors { | |
65 | predecessors[successor].push(bcb); | |
66 | } | |
67 | } | |
68 | ||
69 | let mut basic_coverage_blocks = | |
70 | Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None }; | |
71 | let dominators = dominators::dominators(&basic_coverage_blocks); | |
72 | basic_coverage_blocks.dominators = Some(dominators); | |
73 | basic_coverage_blocks | |
74 | } | |
75 | ||
76 | fn compute_basic_coverage_blocks( | |
a2a8927a | 77 | mir_body: &mir::Body<'_>, |
29967ef6 XL |
78 | ) -> ( |
79 | IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, | |
80 | IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
81 | ) { | |
064997fb | 82 | let num_basic_blocks = mir_body.basic_blocks.len(); |
29967ef6 XL |
83 | let mut bcbs = IndexVec::with_capacity(num_basic_blocks); |
84 | let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks); | |
85 | ||
86 | // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows | |
87 | // each block terminator's `successors()`. Coverage spans must map to actual source code, | |
88 | // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal | |
89 | // intentionally omits unwind paths. | |
90 | // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and | |
91 | // `catch_unwind()` handlers. | |
92 | let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors); | |
93 | ||
94 | let mut basic_blocks = Vec::new(); | |
95 | for (bb, data) in mir_cfg_without_unwind { | |
96 | if let Some(last) = basic_blocks.last() { | |
064997fb | 97 | let predecessors = &mir_body.basic_blocks.predecessors()[bb]; |
29967ef6 XL |
98 | if predecessors.len() > 1 || !predecessors.contains(last) { |
99 | // The `bb` has more than one _incoming_ edge, and should start its own | |
100 | // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet | |
101 | // include `bb`; it contains a sequence of one or more sequential basic_blocks | |
102 | // with no intermediate branches in or out. Save these as a new | |
103 | // `BasicCoverageBlockData` before starting the new one.) | |
104 | Self::add_basic_coverage_block( | |
105 | &mut bcbs, | |
106 | &mut bb_to_bcb, | |
107 | basic_blocks.split_off(0), | |
108 | ); | |
109 | debug!( | |
110 | " because {}", | |
111 | if predecessors.len() > 1 { | |
112 | "predecessors.len() > 1".to_owned() | |
113 | } else { | |
114 | format!("bb {} is not in precessors: {:?}", bb.index(), predecessors) | |
115 | } | |
116 | ); | |
117 | } | |
118 | } | |
119 | basic_blocks.push(bb); | |
120 | ||
121 | let term = data.terminator(); | |
122 | ||
123 | match term.kind { | |
124 | TerminatorKind::Return { .. } | |
353b0b11 | 125 | | TerminatorKind::Terminate |
29967ef6 | 126 | | TerminatorKind::Yield { .. } |
29967ef6 XL |
127 | | TerminatorKind::SwitchInt { .. } => { |
128 | // The `bb` has more than one _outgoing_ edge, or exits the function. Save the | |
129 | // current sequence of `basic_blocks` gathered to this point, as a new | |
130 | // `BasicCoverageBlockData`. | |
131 | Self::add_basic_coverage_block( | |
132 | &mut bcbs, | |
133 | &mut bb_to_bcb, | |
134 | basic_blocks.split_off(0), | |
135 | ); | |
136 | debug!(" because term.kind = {:?}", term.kind); | |
137 | // Note that this condition is based on `TerminatorKind`, even though it | |
138 | // theoretically boils down to `successors().len() != 1`; that is, either zero | |
353b0b11 | 139 | // (e.g., `Return`, `Terminate`) or multiple successors (e.g., `SwitchInt`), but |
29967ef6 XL |
140 | // since the BCB CFG ignores things like unwind branches (which exist in the |
141 | // `Terminator`s `successors()` list) checking the number of successors won't | |
142 | // work. | |
143 | } | |
fc512014 XL |
144 | |
145 | // The following `TerminatorKind`s are either not expected outside an unwind branch, | |
146 | // or they should not (under normal circumstances) branch. Coverage graphs are | |
147 | // simplified by assuring coverage results are accurate for program executions that | |
148 | // don't panic. | |
149 | // | |
150 | // Programs that panic and unwind may record slightly inaccurate coverage results | |
151 | // for a coverage region containing the `Terminator` that began the panic. This | |
152 | // is as intended. (See Issue #78544 for a possible future option to support | |
153 | // coverage in test programs that panic.) | |
29967ef6 XL |
154 | TerminatorKind::Goto { .. } |
155 | | TerminatorKind::Resume | |
156 | | TerminatorKind::Unreachable | |
157 | | TerminatorKind::Drop { .. } | |
29967ef6 XL |
158 | | TerminatorKind::Call { .. } |
159 | | TerminatorKind::GeneratorDrop | |
fc512014 | 160 | | TerminatorKind::Assert { .. } |
29967ef6 XL |
161 | | TerminatorKind::FalseEdge { .. } |
162 | | TerminatorKind::FalseUnwind { .. } | |
163 | | TerminatorKind::InlineAsm { .. } => {} | |
164 | } | |
165 | } | |
166 | ||
167 | if !basic_blocks.is_empty() { | |
168 | // process any remaining basic_blocks into a final `BasicCoverageBlockData` | |
169 | Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0)); | |
170 | debug!(" because the end of the MIR CFG was reached while traversing"); | |
171 | } | |
172 | ||
173 | (bcbs, bb_to_bcb) | |
174 | } | |
175 | ||
176 | fn add_basic_coverage_block( | |
177 | bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, | |
353b0b11 | 178 | bb_to_bcb: &mut IndexSlice<BasicBlock, Option<BasicCoverageBlock>>, |
29967ef6 XL |
179 | basic_blocks: Vec<BasicBlock>, |
180 | ) { | |
353b0b11 | 181 | let bcb = bcbs.next_index(); |
29967ef6 XL |
182 | for &bb in basic_blocks.iter() { |
183 | bb_to_bcb[bb] = Some(bcb); | |
184 | } | |
185 | let bcb_data = BasicCoverageBlockData::from(basic_blocks); | |
186 | debug!("adding bcb{}: {:?}", bcb.index(), bcb_data); | |
187 | bcbs.push(bcb_data); | |
188 | } | |
189 | ||
190 | #[inline(always)] | |
191 | pub fn iter_enumerated( | |
192 | &self, | |
193 | ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> { | |
194 | self.bcbs.iter_enumerated() | |
195 | } | |
196 | ||
197 | #[inline(always)] | |
198 | pub fn iter_enumerated_mut( | |
199 | &mut self, | |
200 | ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> { | |
201 | self.bcbs.iter_enumerated_mut() | |
202 | } | |
203 | ||
204 | #[inline(always)] | |
205 | pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> { | |
206 | if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None } | |
207 | } | |
208 | ||
209 | #[inline(always)] | |
9ffffee4 FG |
210 | pub fn dominates(&self, dom: BasicCoverageBlock, node: BasicCoverageBlock) -> bool { |
211 | self.dominators.as_ref().unwrap().dominates(dom, node) | |
29967ef6 XL |
212 | } |
213 | ||
214 | #[inline(always)] | |
215 | pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> { | |
216 | self.dominators.as_ref().unwrap() | |
217 | } | |
218 | } | |
219 | ||
220 | impl Index<BasicCoverageBlock> for CoverageGraph { | |
221 | type Output = BasicCoverageBlockData; | |
222 | ||
223 | #[inline] | |
224 | fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData { | |
225 | &self.bcbs[index] | |
226 | } | |
227 | } | |
228 | ||
229 | impl IndexMut<BasicCoverageBlock> for CoverageGraph { | |
230 | #[inline] | |
231 | fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData { | |
232 | &mut self.bcbs[index] | |
233 | } | |
234 | } | |
235 | ||
236 | impl graph::DirectedGraph for CoverageGraph { | |
237 | type Node = BasicCoverageBlock; | |
238 | } | |
239 | ||
240 | impl graph::WithNumNodes for CoverageGraph { | |
241 | #[inline] | |
242 | fn num_nodes(&self) -> usize { | |
243 | self.bcbs.len() | |
244 | } | |
245 | } | |
246 | ||
247 | impl graph::WithStartNode for CoverageGraph { | |
248 | #[inline] | |
249 | fn start_node(&self) -> Self::Node { | |
250 | self.bcb_from_bb(mir::START_BLOCK) | |
251 | .expect("mir::START_BLOCK should be in a BasicCoverageBlock") | |
252 | } | |
253 | } | |
254 | ||
255 | type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>; | |
256 | ||
257 | impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph { | |
258 | type Item = BasicCoverageBlock; | |
259 | type Iter = std::iter::Cloned<BcbSuccessors<'graph>>; | |
260 | } | |
261 | ||
262 | impl graph::WithSuccessors for CoverageGraph { | |
263 | #[inline] | |
264 | fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter { | |
265 | self.successors[node].iter().cloned() | |
266 | } | |
267 | } | |
268 | ||
a2a8927a | 269 | impl<'graph> graph::GraphPredecessors<'graph> for CoverageGraph { |
29967ef6 | 270 | type Item = BasicCoverageBlock; |
17df50a5 | 271 | type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>; |
29967ef6 XL |
272 | } |
273 | ||
274 | impl graph::WithPredecessors for CoverageGraph { | |
275 | #[inline] | |
276 | fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter { | |
17df50a5 | 277 | self.predecessors[node].iter().copied() |
29967ef6 XL |
278 | } |
279 | } | |
280 | ||
281 | rustc_index::newtype_index! { | |
5e7ed085 | 282 | /// A node in the control-flow graph of CoverageGraph. |
9c376795 | 283 | #[debug_format = "bcb{}"] |
fc512014 | 284 | pub(super) struct BasicCoverageBlock { |
9c376795 | 285 | const START_BCB = 0; |
29967ef6 XL |
286 | } |
287 | } | |
288 | ||
fc512014 XL |
289 | /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`. |
290 | /// | |
291 | /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without | |
29967ef6 XL |
292 | /// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without |
293 | /// altering the original MIR CFG. | |
294 | /// | |
295 | /// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not | |
296 | /// necessary). The BCB-based CFG is a more aggressive simplification. For example: | |
297 | /// | |
298 | /// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code, | |
299 | /// that is injected by the Rust compiler but has no physical source code to count. This also | |
300 | /// means a BasicBlock with a `Call` terminator can be merged into its primary successor target | |
301 | /// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage | |
302 | /// of `#[should_panic]` tests and `catch_unwind()` handlers") | |
303 | /// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are | |
304 | /// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as | |
305 | /// a `Goto`, and merged with its successor into the same BCB. | |
306 | /// | |
307 | /// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`. | |
308 | /// In some cases, a BCB's execution count can be computed by `Expression`. Additional | |
309 | /// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO` | |
310 | /// to the BCB's primary counter or expression). | |
311 | /// | |
312 | /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based | |
9ffffee4 | 313 | /// queries (`dominates()`, `predecessors`, `successors`, etc.) have branch (control flow) |
29967ef6 XL |
314 | /// significance. |
315 | #[derive(Debug, Clone)] | |
fc512014 | 316 | pub(super) struct BasicCoverageBlockData { |
29967ef6 XL |
317 | pub basic_blocks: Vec<BasicBlock>, |
318 | pub counter_kind: Option<CoverageKind>, | |
319 | edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>, | |
320 | } | |
321 | ||
322 | impl BasicCoverageBlockData { | |
323 | pub fn from(basic_blocks: Vec<BasicBlock>) -> Self { | |
324 | assert!(basic_blocks.len() > 0); | |
325 | Self { basic_blocks, counter_kind: None, edge_from_bcbs: None } | |
326 | } | |
327 | ||
328 | #[inline(always)] | |
329 | pub fn leader_bb(&self) -> BasicBlock { | |
330 | self.basic_blocks[0] | |
331 | } | |
332 | ||
333 | #[inline(always)] | |
334 | pub fn last_bb(&self) -> BasicBlock { | |
335 | *self.basic_blocks.last().unwrap() | |
336 | } | |
337 | ||
338 | #[inline(always)] | |
339 | pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> { | |
340 | &mir_body[self.last_bb()].terminator() | |
341 | } | |
342 | ||
343 | pub fn set_counter( | |
344 | &mut self, | |
345 | counter_kind: CoverageKind, | |
346 | ) -> Result<ExpressionOperandId, Error> { | |
347 | debug_assert!( | |
348 | // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also | |
349 | // have an expression (to be injected into an existing `BasicBlock` represented by this | |
350 | // `BasicCoverageBlock`). | |
351 | self.edge_from_bcbs.is_none() || counter_kind.is_expression(), | |
352 | "attempt to add a `Counter` to a BCB target with existing incoming edge counters" | |
353 | ); | |
354 | let operand = counter_kind.as_operand_id(); | |
355 | if let Some(replaced) = self.counter_kind.replace(counter_kind) { | |
356 | Error::from_string(format!( | |
357 | "attempt to set a BasicCoverageBlock coverage counter more than once; \ | |
358 | {:?} already had counter {:?}", | |
359 | self, replaced, | |
360 | )) | |
361 | } else { | |
362 | Ok(operand) | |
363 | } | |
364 | } | |
365 | ||
366 | #[inline(always)] | |
367 | pub fn counter(&self) -> Option<&CoverageKind> { | |
368 | self.counter_kind.as_ref() | |
369 | } | |
370 | ||
371 | #[inline(always)] | |
372 | pub fn take_counter(&mut self) -> Option<CoverageKind> { | |
373 | self.counter_kind.take() | |
374 | } | |
375 | ||
376 | pub fn set_edge_counter_from( | |
377 | &mut self, | |
378 | from_bcb: BasicCoverageBlock, | |
379 | counter_kind: CoverageKind, | |
380 | ) -> Result<ExpressionOperandId, Error> { | |
381 | if level_enabled!(tracing::Level::DEBUG) { | |
382 | // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also | |
383 | // have an expression (to be injected into an existing `BasicBlock` represented by this | |
384 | // `BasicCoverageBlock`). | |
385 | if !self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) { | |
386 | return Error::from_string(format!( | |
387 | "attempt to add an incoming edge counter from {:?} when the target BCB already \ | |
388 | has a `Counter`", | |
389 | from_bcb | |
390 | )); | |
391 | } | |
392 | } | |
393 | let operand = counter_kind.as_operand_id(); | |
6a06907d XL |
394 | if let Some(replaced) = |
395 | self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind) | |
29967ef6 XL |
396 | { |
397 | Error::from_string(format!( | |
398 | "attempt to set an edge counter more than once; from_bcb: \ | |
399 | {:?} already had counter {:?}", | |
400 | from_bcb, replaced, | |
401 | )) | |
402 | } else { | |
403 | Ok(operand) | |
404 | } | |
405 | } | |
406 | ||
407 | #[inline] | |
408 | pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> { | |
409 | if let Some(edge_from_bcbs) = &self.edge_from_bcbs { | |
410 | edge_from_bcbs.get(&from_bcb) | |
411 | } else { | |
412 | None | |
413 | } | |
414 | } | |
415 | ||
416 | #[inline] | |
417 | pub fn take_edge_counters( | |
418 | &mut self, | |
419 | ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> { | |
5099ac24 | 420 | self.edge_from_bcbs.take().map(|m| m.into_iter()) |
29967ef6 XL |
421 | } |
422 | ||
423 | pub fn id(&self) -> String { | |
5099ac24 | 424 | format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR)) |
29967ef6 XL |
425 | } |
426 | } | |
427 | ||
428 | /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`) | |
429 | /// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_ | |
430 | /// the specific branching BCB, representing the edge between the two. The latter case | |
431 | /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`. | |
432 | #[derive(Clone, Copy, PartialEq, Eq)] | |
fc512014 | 433 | pub(super) struct BcbBranch { |
29967ef6 XL |
434 | pub edge_from_bcb: Option<BasicCoverageBlock>, |
435 | pub target_bcb: BasicCoverageBlock, | |
436 | } | |
437 | ||
438 | impl BcbBranch { | |
439 | pub fn from_to( | |
440 | from_bcb: BasicCoverageBlock, | |
441 | to_bcb: BasicCoverageBlock, | |
442 | basic_coverage_blocks: &CoverageGraph, | |
443 | ) -> Self { | |
444 | let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 { | |
445 | Some(from_bcb) | |
446 | } else { | |
447 | None | |
448 | }; | |
449 | Self { edge_from_bcb, target_bcb: to_bcb } | |
450 | } | |
451 | ||
452 | pub fn counter<'a>( | |
453 | &self, | |
454 | basic_coverage_blocks: &'a CoverageGraph, | |
455 | ) -> Option<&'a CoverageKind> { | |
456 | if let Some(from_bcb) = self.edge_from_bcb { | |
457 | basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb) | |
458 | } else { | |
459 | basic_coverage_blocks[self.target_bcb].counter() | |
460 | } | |
461 | } | |
462 | ||
463 | pub fn is_only_path_to_target(&self) -> bool { | |
464 | self.edge_from_bcb.is_none() | |
465 | } | |
466 | } | |
467 | ||
468 | impl std::fmt::Debug for BcbBranch { | |
469 | fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | |
470 | if let Some(from_bcb) = self.edge_from_bcb { | |
471 | write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb) | |
472 | } else { | |
473 | write!(fmt, "{:?}", self.target_bcb) | |
474 | } | |
475 | } | |
476 | } | |
477 | ||
478 | // Returns the `Terminator`s non-unwind successors. | |
479 | // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and | |
480 | // `catch_unwind()` handlers. | |
481 | fn bcb_filtered_successors<'a, 'tcx>( | |
923072b8 FG |
482 | body: &'a mir::Body<'tcx>, |
483 | term_kind: &'a TerminatorKind<'tcx>, | |
5e7ed085 | 484 | ) -> Box<dyn Iterator<Item = BasicBlock> + 'a> { |
94222f64 XL |
485 | Box::new( |
486 | match &term_kind { | |
487 | // SwitchInt successors are never unwind, and all of them should be traversed. | |
923072b8 FG |
488 | TerminatorKind::SwitchInt { ref targets, .. } => { |
489 | None.into_iter().chain(targets.all_targets().into_iter().copied()) | |
490 | } | |
94222f64 XL |
491 | // For all other kinds, return only the first successor, if any, and ignore unwinds. |
492 | // NOTE: `chain(&[])` is required to coerce the `option::iter` (from | |
493 | // `next().into_iter()`) into the `mir::Successors` aliased type. | |
923072b8 | 494 | _ => term_kind.successors().next().into_iter().chain((&[]).into_iter().copied()), |
94222f64 | 495 | } |
5e7ed085 | 496 | .filter(move |&successor| body[successor].terminator().kind != TerminatorKind::Unreachable), |
94222f64 | 497 | ) |
29967ef6 XL |
498 | } |
499 | ||
500 | /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the | |
501 | /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that | |
502 | /// ensures a loop is completely traversed before processing Blocks after the end of the loop. | |
29967ef6 | 503 | #[derive(Debug)] |
fc512014 | 504 | pub(super) struct TraversalContext { |
29967ef6 XL |
505 | /// From one or more backedges returning to a loop header. |
506 | pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>, | |
507 | ||
508 | /// worklist, to be traversed, of CoverageGraph in the loop with the given loop | |
509 | /// backedges, such that the loop is the inner inner-most loop containing these | |
510 | /// CoverageGraph | |
511 | pub worklist: Vec<BasicCoverageBlock>, | |
512 | } | |
513 | ||
fc512014 | 514 | pub(super) struct TraverseCoverageGraphWithLoops { |
29967ef6 XL |
515 | pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, |
516 | pub context_stack: Vec<TraversalContext>, | |
517 | visited: BitSet<BasicCoverageBlock>, | |
518 | } | |
519 | ||
520 | impl TraverseCoverageGraphWithLoops { | |
521 | pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self { | |
522 | let start_bcb = basic_coverage_blocks.start_node(); | |
523 | let backedges = find_loop_backedges(basic_coverage_blocks); | |
94222f64 XL |
524 | let context_stack = |
525 | vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }]; | |
29967ef6 XL |
526 | // `context_stack` starts with a `TraversalContext` for the main function context (beginning |
527 | // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top | |
528 | // of the stack as loops are entered, and popped off of the stack when a loop's worklist is | |
529 | // exhausted. | |
530 | let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes()); | |
531 | Self { backedges, context_stack, visited } | |
532 | } | |
533 | ||
534 | pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> { | |
535 | debug!( | |
536 | "TraverseCoverageGraphWithLoops::next - context_stack: {:?}", | |
537 | self.context_stack.iter().rev().collect::<Vec<_>>() | |
538 | ); | |
353b0b11 FG |
539 | |
540 | while let Some(context) = self.context_stack.last_mut() { | |
541 | if let Some(next_bcb) = context.worklist.pop() { | |
542 | if !self.visited.insert(next_bcb) { | |
543 | debug!("Already visited: {:?}", next_bcb); | |
544 | continue; | |
545 | } | |
546 | debug!("Visiting {:?}", next_bcb); | |
547 | if self.backedges[next_bcb].len() > 0 { | |
548 | debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb); | |
549 | self.context_stack.push(TraversalContext { | |
550 | loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)), | |
551 | worklist: Vec::new(), | |
552 | }); | |
553 | } | |
554 | self.extend_worklist(basic_coverage_blocks, next_bcb); | |
555 | return Some(next_bcb); | |
556 | } else { | |
557 | // Strip contexts with empty worklists from the top of the stack | |
29967ef6 XL |
558 | self.context_stack.pop(); |
559 | } | |
29967ef6 | 560 | } |
353b0b11 | 561 | |
29967ef6 XL |
562 | None |
563 | } | |
564 | ||
565 | pub fn extend_worklist( | |
566 | &mut self, | |
567 | basic_coverage_blocks: &CoverageGraph, | |
568 | bcb: BasicCoverageBlock, | |
569 | ) { | |
570 | let successors = &basic_coverage_blocks.successors[bcb]; | |
571 | debug!("{:?} has {} successors:", bcb, successors.len()); | |
572 | for &successor in successors { | |
573 | if successor == bcb { | |
574 | debug!( | |
575 | "{:?} has itself as its own successor. (Note, the compiled code will \ | |
576 | generate an infinite loop.)", | |
577 | bcb | |
578 | ); | |
579 | // Don't re-add this successor to the worklist. We are already processing it. | |
580 | break; | |
581 | } | |
582 | for context in self.context_stack.iter_mut().rev() { | |
583 | // Add successors of the current BCB to the appropriate context. Successors that | |
584 | // stay within a loop are added to the BCBs context worklist. Successors that | |
585 | // exit the loop (they are not dominated by the loop header) must be reachable | |
586 | // from other BCBs outside the loop, and they will be added to a different | |
587 | // worklist. | |
588 | // | |
589 | // Branching blocks (with more than one successor) must be processed before | |
590 | // blocks with only one successor, to prevent unnecessarily complicating | |
591 | // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the | |
592 | // branching block would have given an `Expression` (or vice versa). | |
593 | let (some_successor_to_add, some_loop_header) = | |
594 | if let Some((_, loop_header)) = context.loop_backedges { | |
9ffffee4 | 595 | if basic_coverage_blocks.dominates(loop_header, successor) { |
29967ef6 XL |
596 | (Some(successor), Some(loop_header)) |
597 | } else { | |
598 | (None, None) | |
599 | } | |
600 | } else { | |
601 | (Some(successor), None) | |
602 | }; | |
603 | if let Some(successor_to_add) = some_successor_to_add { | |
604 | if basic_coverage_blocks.successors[successor_to_add].len() > 1 { | |
605 | debug!( | |
606 | "{:?} successor is branching. Prioritize it at the beginning of \ | |
607 | the {}", | |
608 | successor_to_add, | |
609 | if let Some(loop_header) = some_loop_header { | |
610 | format!("worklist for the loop headed by {:?}", loop_header) | |
611 | } else { | |
612 | String::from("non-loop worklist") | |
613 | }, | |
614 | ); | |
615 | context.worklist.insert(0, successor_to_add); | |
616 | } else { | |
617 | debug!( | |
618 | "{:?} successor is non-branching. Defer it to the end of the {}", | |
619 | successor_to_add, | |
620 | if let Some(loop_header) = some_loop_header { | |
621 | format!("worklist for the loop headed by {:?}", loop_header) | |
622 | } else { | |
623 | String::from("non-loop worklist") | |
624 | }, | |
625 | ); | |
626 | context.worklist.push(successor_to_add); | |
627 | } | |
628 | break; | |
629 | } | |
630 | } | |
631 | } | |
632 | } | |
633 | ||
634 | pub fn is_complete(&self) -> bool { | |
635 | self.visited.count() == self.visited.domain_size() | |
636 | } | |
637 | ||
638 | pub fn unvisited(&self) -> Vec<BasicCoverageBlock> { | |
639 | let mut unvisited_set: BitSet<BasicCoverageBlock> = | |
640 | BitSet::new_filled(self.visited.domain_size()); | |
641 | unvisited_set.subtract(&self.visited); | |
642 | unvisited_set.iter().collect::<Vec<_>>() | |
643 | } | |
644 | } | |
645 | ||
fc512014 | 646 | pub(super) fn find_loop_backedges( |
29967ef6 XL |
647 | basic_coverage_blocks: &CoverageGraph, |
648 | ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> { | |
649 | let num_bcbs = basic_coverage_blocks.num_nodes(); | |
650 | let mut backedges = IndexVec::from_elem_n(Vec::<BasicCoverageBlock>::new(), num_bcbs); | |
651 | ||
652 | // Identify loops by their backedges. | |
653 | // | |
654 | // The computational complexity is bounded by: n(s) x d where `n` is the number of | |
655 | // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the | |
656 | // MIR); `s` is the average number of successors per node (which is most likely less than 2, and | |
657 | // independent of the size of the function, so it can be treated as a constant); | |
658 | // and `d` is the average number of dominators per node. | |
659 | // | |
660 | // The average number of dominators depends on the size and complexity of the function, and | |
661 | // nodes near the start of the function's control flow graph typically have less dominators | |
662 | // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I | |
663 | // think the resulting complexity has the characteristics of O(n log n). | |
664 | // | |
665 | // The overall complexity appears to be comparable to many other MIR transform algorithms, and I | |
666 | // don't expect that this function is creating a performance hot spot, but if this becomes an | |
9ffffee4 | 667 | // issue, there may be ways to optimize the `dominates` algorithm (as indicated by an |
29967ef6 XL |
668 | // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps |
669 | // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short | |
9ffffee4 | 670 | // circuit downstream `dominates` checks. |
29967ef6 XL |
671 | // |
672 | // For now, that kind of optimization seems unnecessarily complicated. | |
673 | for (bcb, _) in basic_coverage_blocks.iter_enumerated() { | |
674 | for &successor in &basic_coverage_blocks.successors[bcb] { | |
9ffffee4 | 675 | if basic_coverage_blocks.dominates(successor, bcb) { |
29967ef6 XL |
676 | let loop_header = successor; |
677 | let backedge_from_bcb = bcb; | |
678 | debug!( | |
679 | "Found BCB backedge: {:?} -> loop_header: {:?}", | |
680 | backedge_from_bcb, loop_header | |
681 | ); | |
682 | backedges[loop_header].push(backedge_from_bcb); | |
683 | } | |
684 | } | |
685 | } | |
686 | backedges | |
687 | } | |
688 | ||
689 | pub struct ShortCircuitPreorder< | |
690 | 'a, | |
691 | 'tcx, | |
923072b8 | 692 | F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>, |
29967ef6 | 693 | > { |
923072b8 | 694 | body: &'a mir::Body<'tcx>, |
29967ef6 XL |
695 | visited: BitSet<BasicBlock>, |
696 | worklist: Vec<BasicBlock>, | |
697 | filtered_successors: F, | |
698 | } | |
699 | ||
700 | impl< | |
701 | 'a, | |
702 | 'tcx, | |
923072b8 | 703 | F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>, |
29967ef6 XL |
704 | > ShortCircuitPreorder<'a, 'tcx, F> |
705 | { | |
706 | pub fn new( | |
923072b8 | 707 | body: &'a mir::Body<'tcx>, |
29967ef6 XL |
708 | filtered_successors: F, |
709 | ) -> ShortCircuitPreorder<'a, 'tcx, F> { | |
710 | let worklist = vec![mir::START_BLOCK]; | |
711 | ||
712 | ShortCircuitPreorder { | |
713 | body, | |
f2b60f7d | 714 | visited: BitSet::new_empty(body.basic_blocks.len()), |
29967ef6 XL |
715 | worklist, |
716 | filtered_successors, | |
717 | } | |
718 | } | |
719 | } | |
720 | ||
721 | impl< | |
923072b8 | 722 | 'a, |
29967ef6 | 723 | 'tcx, |
923072b8 | 724 | F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>, |
29967ef6 XL |
725 | > Iterator for ShortCircuitPreorder<'a, 'tcx, F> |
726 | { | |
727 | type Item = (BasicBlock, &'a BasicBlockData<'tcx>); | |
728 | ||
729 | fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { | |
730 | while let Some(idx) = self.worklist.pop() { | |
731 | if !self.visited.insert(idx) { | |
732 | continue; | |
733 | } | |
734 | ||
735 | let data = &self.body[idx]; | |
736 | ||
737 | if let Some(ref term) = data.terminator { | |
738 | self.worklist.extend((self.filtered_successors)(&self.body, &term.kind)); | |
739 | } | |
740 | ||
741 | return Some((idx, data)); | |
742 | } | |
743 | ||
744 | None | |
745 | } | |
746 | ||
747 | fn size_hint(&self) -> (usize, Option<usize>) { | |
f2b60f7d | 748 | let size = self.body.basic_blocks.len() - self.visited.count(); |
29967ef6 XL |
749 | (size, Some(size)) |
750 | } | |
751 | } |