]>
Commit | Line | Data |
---|---|---|
29967ef6 XL |
1 | use super::Error; |
2 | ||
3 | use rustc_data_structures::fx::FxHashMap; | |
4 | use rustc_data_structures::graph::dominators::{self, Dominators}; | |
5 | use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode}; | |
6 | use rustc_index::bit_set::BitSet; | |
7 | use rustc_index::vec::IndexVec; | |
8 | use rustc_middle::mir::coverage::*; | |
9 | use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind}; | |
10 | ||
11 | use std::ops::{Index, IndexMut}; | |
12 | ||
13 | const ID_SEPARATOR: &str = ","; | |
14 | ||
15 | /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s | |
16 | /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a | |
17 | /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional | |
18 | /// set of additional counters--if needed--to count incoming edges, if there are more than one. | |
19 | /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.) | |
fc512014 XL |
20 | #[derive(Debug)] |
21 | pub(super) struct CoverageGraph { | |
29967ef6 XL |
22 | bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, |
23 | bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
24 | pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, | |
25 | pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, | |
26 | dominators: Option<Dominators<BasicCoverageBlock>>, | |
27 | } | |
28 | ||
29 | impl CoverageGraph { | |
a2a8927a | 30 | pub fn from_mir(mir_body: &mir::Body<'_>) -> Self { |
29967ef6 XL |
31 | let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body); |
32 | ||
33 | // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock | |
34 | // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the | |
fc512014 XL |
35 | // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a |
36 | // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so | |
37 | // de-duplication is required. This is done without reordering the successors. | |
29967ef6 | 38 | |
fc512014 XL |
39 | let bcbs_len = bcbs.len(); |
40 | let mut seen = IndexVec::from_elem_n(false, bcbs_len); | |
29967ef6 XL |
41 | let successors = IndexVec::from_fn_n( |
42 | |bcb| { | |
fc512014 XL |
43 | for b in seen.iter_mut() { |
44 | *b = false; | |
45 | } | |
29967ef6 | 46 | let bcb_data = &bcbs[bcb]; |
fc512014 XL |
47 | let mut bcb_successors = Vec::new(); |
48 | for successor in | |
29967ef6 XL |
49 | bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind) |
50 | .filter_map(|&successor_bb| bb_to_bcb[successor_bb]) | |
fc512014 XL |
51 | { |
52 | if !seen[successor] { | |
53 | seen[successor] = true; | |
54 | bcb_successors.push(successor); | |
55 | } | |
56 | } | |
29967ef6 XL |
57 | bcb_successors |
58 | }, | |
59 | bcbs.len(), | |
60 | ); | |
61 | ||
62 | let mut predecessors = IndexVec::from_elem_n(Vec::new(), bcbs.len()); | |
63 | for (bcb, bcb_successors) in successors.iter_enumerated() { | |
64 | for &successor in bcb_successors { | |
65 | predecessors[successor].push(bcb); | |
66 | } | |
67 | } | |
68 | ||
69 | let mut basic_coverage_blocks = | |
70 | Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None }; | |
71 | let dominators = dominators::dominators(&basic_coverage_blocks); | |
72 | basic_coverage_blocks.dominators = Some(dominators); | |
73 | basic_coverage_blocks | |
74 | } | |
75 | ||
76 | fn compute_basic_coverage_blocks( | |
a2a8927a | 77 | mir_body: &mir::Body<'_>, |
29967ef6 XL |
78 | ) -> ( |
79 | IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, | |
80 | IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
81 | ) { | |
82 | let num_basic_blocks = mir_body.num_nodes(); | |
83 | let mut bcbs = IndexVec::with_capacity(num_basic_blocks); | |
84 | let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks); | |
85 | ||
86 | // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows | |
87 | // each block terminator's `successors()`. Coverage spans must map to actual source code, | |
88 | // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal | |
89 | // intentionally omits unwind paths. | |
90 | // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and | |
91 | // `catch_unwind()` handlers. | |
92 | let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors); | |
93 | ||
94 | let mut basic_blocks = Vec::new(); | |
95 | for (bb, data) in mir_cfg_without_unwind { | |
96 | if let Some(last) = basic_blocks.last() { | |
97 | let predecessors = &mir_body.predecessors()[bb]; | |
98 | if predecessors.len() > 1 || !predecessors.contains(last) { | |
99 | // The `bb` has more than one _incoming_ edge, and should start its own | |
100 | // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet | |
101 | // include `bb`; it contains a sequence of one or more sequential basic_blocks | |
102 | // with no intermediate branches in or out. Save these as a new | |
103 | // `BasicCoverageBlockData` before starting the new one.) | |
104 | Self::add_basic_coverage_block( | |
105 | &mut bcbs, | |
106 | &mut bb_to_bcb, | |
107 | basic_blocks.split_off(0), | |
108 | ); | |
109 | debug!( | |
110 | " because {}", | |
111 | if predecessors.len() > 1 { | |
112 | "predecessors.len() > 1".to_owned() | |
113 | } else { | |
114 | format!("bb {} is not in precessors: {:?}", bb.index(), predecessors) | |
115 | } | |
116 | ); | |
117 | } | |
118 | } | |
119 | basic_blocks.push(bb); | |
120 | ||
121 | let term = data.terminator(); | |
122 | ||
123 | match term.kind { | |
124 | TerminatorKind::Return { .. } | |
29967ef6 | 125 | | TerminatorKind::Abort |
29967ef6 | 126 | | TerminatorKind::Yield { .. } |
29967ef6 XL |
127 | | TerminatorKind::SwitchInt { .. } => { |
128 | // The `bb` has more than one _outgoing_ edge, or exits the function. Save the | |
129 | // current sequence of `basic_blocks` gathered to this point, as a new | |
130 | // `BasicCoverageBlockData`. | |
131 | Self::add_basic_coverage_block( | |
132 | &mut bcbs, | |
133 | &mut bb_to_bcb, | |
134 | basic_blocks.split_off(0), | |
135 | ); | |
136 | debug!(" because term.kind = {:?}", term.kind); | |
137 | // Note that this condition is based on `TerminatorKind`, even though it | |
138 | // theoretically boils down to `successors().len() != 1`; that is, either zero | |
139 | // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but | |
140 | // since the BCB CFG ignores things like unwind branches (which exist in the | |
141 | // `Terminator`s `successors()` list) checking the number of successors won't | |
142 | // work. | |
143 | } | |
fc512014 XL |
144 | |
145 | // The following `TerminatorKind`s are either not expected outside an unwind branch, | |
146 | // or they should not (under normal circumstances) branch. Coverage graphs are | |
147 | // simplified by assuring coverage results are accurate for program executions that | |
148 | // don't panic. | |
149 | // | |
150 | // Programs that panic and unwind may record slightly inaccurate coverage results | |
151 | // for a coverage region containing the `Terminator` that began the panic. This | |
152 | // is as intended. (See Issue #78544 for a possible future option to support | |
153 | // coverage in test programs that panic.) | |
29967ef6 XL |
154 | TerminatorKind::Goto { .. } |
155 | | TerminatorKind::Resume | |
156 | | TerminatorKind::Unreachable | |
157 | | TerminatorKind::Drop { .. } | |
158 | | TerminatorKind::DropAndReplace { .. } | |
159 | | TerminatorKind::Call { .. } | |
160 | | TerminatorKind::GeneratorDrop | |
fc512014 | 161 | | TerminatorKind::Assert { .. } |
29967ef6 XL |
162 | | TerminatorKind::FalseEdge { .. } |
163 | | TerminatorKind::FalseUnwind { .. } | |
164 | | TerminatorKind::InlineAsm { .. } => {} | |
165 | } | |
166 | } | |
167 | ||
168 | if !basic_blocks.is_empty() { | |
169 | // process any remaining basic_blocks into a final `BasicCoverageBlockData` | |
170 | Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0)); | |
171 | debug!(" because the end of the MIR CFG was reached while traversing"); | |
172 | } | |
173 | ||
174 | (bcbs, bb_to_bcb) | |
175 | } | |
176 | ||
177 | fn add_basic_coverage_block( | |
178 | bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, | |
179 | bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
180 | basic_blocks: Vec<BasicBlock>, | |
181 | ) { | |
182 | let bcb = BasicCoverageBlock::from_usize(bcbs.len()); | |
183 | for &bb in basic_blocks.iter() { | |
184 | bb_to_bcb[bb] = Some(bcb); | |
185 | } | |
186 | let bcb_data = BasicCoverageBlockData::from(basic_blocks); | |
187 | debug!("adding bcb{}: {:?}", bcb.index(), bcb_data); | |
188 | bcbs.push(bcb_data); | |
189 | } | |
190 | ||
191 | #[inline(always)] | |
192 | pub fn iter_enumerated( | |
193 | &self, | |
194 | ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> { | |
195 | self.bcbs.iter_enumerated() | |
196 | } | |
197 | ||
198 | #[inline(always)] | |
199 | pub fn iter_enumerated_mut( | |
200 | &mut self, | |
201 | ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> { | |
202 | self.bcbs.iter_enumerated_mut() | |
203 | } | |
204 | ||
205 | #[inline(always)] | |
206 | pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> { | |
207 | if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None } | |
208 | } | |
209 | ||
210 | #[inline(always)] | |
211 | pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool { | |
212 | self.dominators.as_ref().unwrap().is_dominated_by(node, dom) | |
213 | } | |
214 | ||
215 | #[inline(always)] | |
216 | pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> { | |
217 | self.dominators.as_ref().unwrap() | |
218 | } | |
219 | } | |
220 | ||
221 | impl Index<BasicCoverageBlock> for CoverageGraph { | |
222 | type Output = BasicCoverageBlockData; | |
223 | ||
224 | #[inline] | |
225 | fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData { | |
226 | &self.bcbs[index] | |
227 | } | |
228 | } | |
229 | ||
230 | impl IndexMut<BasicCoverageBlock> for CoverageGraph { | |
231 | #[inline] | |
232 | fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData { | |
233 | &mut self.bcbs[index] | |
234 | } | |
235 | } | |
236 | ||
237 | impl graph::DirectedGraph for CoverageGraph { | |
238 | type Node = BasicCoverageBlock; | |
239 | } | |
240 | ||
241 | impl graph::WithNumNodes for CoverageGraph { | |
242 | #[inline] | |
243 | fn num_nodes(&self) -> usize { | |
244 | self.bcbs.len() | |
245 | } | |
246 | } | |
247 | ||
248 | impl graph::WithStartNode for CoverageGraph { | |
249 | #[inline] | |
250 | fn start_node(&self) -> Self::Node { | |
251 | self.bcb_from_bb(mir::START_BLOCK) | |
252 | .expect("mir::START_BLOCK should be in a BasicCoverageBlock") | |
253 | } | |
254 | } | |
255 | ||
256 | type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>; | |
257 | ||
258 | impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph { | |
259 | type Item = BasicCoverageBlock; | |
260 | type Iter = std::iter::Cloned<BcbSuccessors<'graph>>; | |
261 | } | |
262 | ||
263 | impl graph::WithSuccessors for CoverageGraph { | |
264 | #[inline] | |
265 | fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter { | |
266 | self.successors[node].iter().cloned() | |
267 | } | |
268 | } | |
269 | ||
a2a8927a | 270 | impl<'graph> graph::GraphPredecessors<'graph> for CoverageGraph { |
29967ef6 | 271 | type Item = BasicCoverageBlock; |
17df50a5 | 272 | type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>; |
29967ef6 XL |
273 | } |
274 | ||
275 | impl graph::WithPredecessors for CoverageGraph { | |
276 | #[inline] | |
277 | fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter { | |
17df50a5 | 278 | self.predecessors[node].iter().copied() |
29967ef6 XL |
279 | } |
280 | } | |
281 | ||
282 | rustc_index::newtype_index! { | |
283 | /// A node in the [control-flow graph][CFG] of CoverageGraph. | |
fc512014 | 284 | pub(super) struct BasicCoverageBlock { |
29967ef6 | 285 | DEBUG_FORMAT = "bcb{}", |
fc512014 | 286 | const START_BCB = 0, |
29967ef6 XL |
287 | } |
288 | } | |
289 | ||
fc512014 XL |
290 | /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`. |
291 | /// | |
292 | /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without | |
29967ef6 XL |
293 | /// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without |
294 | /// altering the original MIR CFG. | |
295 | /// | |
296 | /// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not | |
297 | /// necessary). The BCB-based CFG is a more aggressive simplification. For example: | |
298 | /// | |
299 | /// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code, | |
300 | /// that is injected by the Rust compiler but has no physical source code to count. This also | |
301 | /// means a BasicBlock with a `Call` terminator can be merged into its primary successor target | |
302 | /// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage | |
303 | /// of `#[should_panic]` tests and `catch_unwind()` handlers") | |
304 | /// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are | |
305 | /// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as | |
306 | /// a `Goto`, and merged with its successor into the same BCB. | |
307 | /// | |
308 | /// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`. | |
309 | /// In some cases, a BCB's execution count can be computed by `Expression`. Additional | |
310 | /// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO` | |
311 | /// to the BCB's primary counter or expression). | |
312 | /// | |
313 | /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based | |
314 | /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow) | |
315 | /// significance. | |
316 | #[derive(Debug, Clone)] | |
fc512014 | 317 | pub(super) struct BasicCoverageBlockData { |
29967ef6 XL |
318 | pub basic_blocks: Vec<BasicBlock>, |
319 | pub counter_kind: Option<CoverageKind>, | |
320 | edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>, | |
321 | } | |
322 | ||
323 | impl BasicCoverageBlockData { | |
324 | pub fn from(basic_blocks: Vec<BasicBlock>) -> Self { | |
325 | assert!(basic_blocks.len() > 0); | |
326 | Self { basic_blocks, counter_kind: None, edge_from_bcbs: None } | |
327 | } | |
328 | ||
329 | #[inline(always)] | |
330 | pub fn leader_bb(&self) -> BasicBlock { | |
331 | self.basic_blocks[0] | |
332 | } | |
333 | ||
334 | #[inline(always)] | |
335 | pub fn last_bb(&self) -> BasicBlock { | |
336 | *self.basic_blocks.last().unwrap() | |
337 | } | |
338 | ||
339 | #[inline(always)] | |
340 | pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> { | |
341 | &mir_body[self.last_bb()].terminator() | |
342 | } | |
343 | ||
344 | pub fn set_counter( | |
345 | &mut self, | |
346 | counter_kind: CoverageKind, | |
347 | ) -> Result<ExpressionOperandId, Error> { | |
348 | debug_assert!( | |
349 | // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also | |
350 | // have an expression (to be injected into an existing `BasicBlock` represented by this | |
351 | // `BasicCoverageBlock`). | |
352 | self.edge_from_bcbs.is_none() || counter_kind.is_expression(), | |
353 | "attempt to add a `Counter` to a BCB target with existing incoming edge counters" | |
354 | ); | |
355 | let operand = counter_kind.as_operand_id(); | |
356 | if let Some(replaced) = self.counter_kind.replace(counter_kind) { | |
357 | Error::from_string(format!( | |
358 | "attempt to set a BasicCoverageBlock coverage counter more than once; \ | |
359 | {:?} already had counter {:?}", | |
360 | self, replaced, | |
361 | )) | |
362 | } else { | |
363 | Ok(operand) | |
364 | } | |
365 | } | |
366 | ||
367 | #[inline(always)] | |
368 | pub fn counter(&self) -> Option<&CoverageKind> { | |
369 | self.counter_kind.as_ref() | |
370 | } | |
371 | ||
372 | #[inline(always)] | |
373 | pub fn take_counter(&mut self) -> Option<CoverageKind> { | |
374 | self.counter_kind.take() | |
375 | } | |
376 | ||
377 | pub fn set_edge_counter_from( | |
378 | &mut self, | |
379 | from_bcb: BasicCoverageBlock, | |
380 | counter_kind: CoverageKind, | |
381 | ) -> Result<ExpressionOperandId, Error> { | |
382 | if level_enabled!(tracing::Level::DEBUG) { | |
383 | // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also | |
384 | // have an expression (to be injected into an existing `BasicBlock` represented by this | |
385 | // `BasicCoverageBlock`). | |
386 | if !self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) { | |
387 | return Error::from_string(format!( | |
388 | "attempt to add an incoming edge counter from {:?} when the target BCB already \ | |
389 | has a `Counter`", | |
390 | from_bcb | |
391 | )); | |
392 | } | |
393 | } | |
394 | let operand = counter_kind.as_operand_id(); | |
6a06907d XL |
395 | if let Some(replaced) = |
396 | self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind) | |
29967ef6 XL |
397 | { |
398 | Error::from_string(format!( | |
399 | "attempt to set an edge counter more than once; from_bcb: \ | |
400 | {:?} already had counter {:?}", | |
401 | from_bcb, replaced, | |
402 | )) | |
403 | } else { | |
404 | Ok(operand) | |
405 | } | |
406 | } | |
407 | ||
408 | #[inline] | |
409 | pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> { | |
410 | if let Some(edge_from_bcbs) = &self.edge_from_bcbs { | |
411 | edge_from_bcbs.get(&from_bcb) | |
412 | } else { | |
413 | None | |
414 | } | |
415 | } | |
416 | ||
417 | #[inline] | |
418 | pub fn take_edge_counters( | |
419 | &mut self, | |
420 | ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> { | |
421 | self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter())) | |
422 | } | |
423 | ||
424 | pub fn id(&self) -> String { | |
425 | format!( | |
426 | "@{}", | |
427 | self.basic_blocks | |
428 | .iter() | |
429 | .map(|bb| bb.index().to_string()) | |
430 | .collect::<Vec<_>>() | |
431 | .join(ID_SEPARATOR) | |
432 | ) | |
433 | } | |
434 | } | |
435 | ||
436 | /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`) | |
437 | /// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_ | |
438 | /// the specific branching BCB, representing the edge between the two. The latter case | |
439 | /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`. | |
440 | #[derive(Clone, Copy, PartialEq, Eq)] | |
fc512014 | 441 | pub(super) struct BcbBranch { |
29967ef6 XL |
442 | pub edge_from_bcb: Option<BasicCoverageBlock>, |
443 | pub target_bcb: BasicCoverageBlock, | |
444 | } | |
445 | ||
446 | impl BcbBranch { | |
447 | pub fn from_to( | |
448 | from_bcb: BasicCoverageBlock, | |
449 | to_bcb: BasicCoverageBlock, | |
450 | basic_coverage_blocks: &CoverageGraph, | |
451 | ) -> Self { | |
452 | let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 { | |
453 | Some(from_bcb) | |
454 | } else { | |
455 | None | |
456 | }; | |
457 | Self { edge_from_bcb, target_bcb: to_bcb } | |
458 | } | |
459 | ||
460 | pub fn counter<'a>( | |
461 | &self, | |
462 | basic_coverage_blocks: &'a CoverageGraph, | |
463 | ) -> Option<&'a CoverageKind> { | |
464 | if let Some(from_bcb) = self.edge_from_bcb { | |
465 | basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb) | |
466 | } else { | |
467 | basic_coverage_blocks[self.target_bcb].counter() | |
468 | } | |
469 | } | |
470 | ||
471 | pub fn is_only_path_to_target(&self) -> bool { | |
472 | self.edge_from_bcb.is_none() | |
473 | } | |
474 | } | |
475 | ||
476 | impl std::fmt::Debug for BcbBranch { | |
477 | fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | |
478 | if let Some(from_bcb) = self.edge_from_bcb { | |
479 | write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb) | |
480 | } else { | |
481 | write!(fmt, "{:?}", self.target_bcb) | |
482 | } | |
483 | } | |
484 | } | |
485 | ||
486 | // Returns the `Terminator`s non-unwind successors. | |
487 | // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and | |
488 | // `catch_unwind()` handlers. | |
489 | fn bcb_filtered_successors<'a, 'tcx>( | |
490 | body: &'tcx &'a mir::Body<'tcx>, | |
491 | term_kind: &'tcx TerminatorKind<'tcx>, | |
492 | ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> { | |
493 | let mut successors = term_kind.successors(); | |
94222f64 XL |
494 | Box::new( |
495 | match &term_kind { | |
496 | // SwitchInt successors are never unwind, and all of them should be traversed. | |
497 | TerminatorKind::SwitchInt { .. } => successors, | |
498 | // For all other kinds, return only the first successor, if any, and ignore unwinds. | |
499 | // NOTE: `chain(&[])` is required to coerce the `option::iter` (from | |
500 | // `next().into_iter()`) into the `mir::Successors` aliased type. | |
501 | _ => successors.next().into_iter().chain(&[]), | |
502 | } | |
503 | .filter(move |&&successor| { | |
504 | body[successor].terminator().kind != TerminatorKind::Unreachable | |
505 | }), | |
506 | ) | |
29967ef6 XL |
507 | } |
508 | ||
509 | /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the | |
510 | /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that | |
511 | /// ensures a loop is completely traversed before processing Blocks after the end of the loop. | |
29967ef6 | 512 | #[derive(Debug)] |
fc512014 | 513 | pub(super) struct TraversalContext { |
29967ef6 XL |
514 | /// From one or more backedges returning to a loop header. |
515 | pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>, | |
516 | ||
517 | /// worklist, to be traversed, of CoverageGraph in the loop with the given loop | |
518 | /// backedges, such that the loop is the inner inner-most loop containing these | |
519 | /// CoverageGraph | |
520 | pub worklist: Vec<BasicCoverageBlock>, | |
521 | } | |
522 | ||
fc512014 | 523 | pub(super) struct TraverseCoverageGraphWithLoops { |
29967ef6 XL |
524 | pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, |
525 | pub context_stack: Vec<TraversalContext>, | |
526 | visited: BitSet<BasicCoverageBlock>, | |
527 | } | |
528 | ||
529 | impl TraverseCoverageGraphWithLoops { | |
530 | pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self { | |
531 | let start_bcb = basic_coverage_blocks.start_node(); | |
532 | let backedges = find_loop_backedges(basic_coverage_blocks); | |
94222f64 XL |
533 | let context_stack = |
534 | vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }]; | |
29967ef6 XL |
535 | // `context_stack` starts with a `TraversalContext` for the main function context (beginning |
536 | // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top | |
537 | // of the stack as loops are entered, and popped off of the stack when a loop's worklist is | |
538 | // exhausted. | |
539 | let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes()); | |
540 | Self { backedges, context_stack, visited } | |
541 | } | |
542 | ||
543 | pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> { | |
544 | debug!( | |
545 | "TraverseCoverageGraphWithLoops::next - context_stack: {:?}", | |
546 | self.context_stack.iter().rev().collect::<Vec<_>>() | |
547 | ); | |
548 | while let Some(next_bcb) = { | |
549 | // Strip contexts with empty worklists from the top of the stack | |
550 | while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) { | |
551 | self.context_stack.pop(); | |
552 | } | |
553 | // Pop the next bcb off of the current context_stack. If none, all BCBs were visited. | |
554 | self.context_stack.last_mut().map_or(None, |context| context.worklist.pop()) | |
555 | } { | |
556 | if !self.visited.insert(next_bcb) { | |
557 | debug!("Already visited: {:?}", next_bcb); | |
558 | continue; | |
559 | } | |
560 | debug!("Visiting {:?}", next_bcb); | |
561 | if self.backedges[next_bcb].len() > 0 { | |
562 | debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb); | |
563 | self.context_stack.push(TraversalContext { | |
564 | loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)), | |
565 | worklist: Vec::new(), | |
566 | }); | |
567 | } | |
568 | self.extend_worklist(basic_coverage_blocks, next_bcb); | |
569 | return Some(next_bcb); | |
570 | } | |
571 | None | |
572 | } | |
573 | ||
574 | pub fn extend_worklist( | |
575 | &mut self, | |
576 | basic_coverage_blocks: &CoverageGraph, | |
577 | bcb: BasicCoverageBlock, | |
578 | ) { | |
579 | let successors = &basic_coverage_blocks.successors[bcb]; | |
580 | debug!("{:?} has {} successors:", bcb, successors.len()); | |
581 | for &successor in successors { | |
582 | if successor == bcb { | |
583 | debug!( | |
584 | "{:?} has itself as its own successor. (Note, the compiled code will \ | |
585 | generate an infinite loop.)", | |
586 | bcb | |
587 | ); | |
588 | // Don't re-add this successor to the worklist. We are already processing it. | |
589 | break; | |
590 | } | |
591 | for context in self.context_stack.iter_mut().rev() { | |
592 | // Add successors of the current BCB to the appropriate context. Successors that | |
593 | // stay within a loop are added to the BCBs context worklist. Successors that | |
594 | // exit the loop (they are not dominated by the loop header) must be reachable | |
595 | // from other BCBs outside the loop, and they will be added to a different | |
596 | // worklist. | |
597 | // | |
598 | // Branching blocks (with more than one successor) must be processed before | |
599 | // blocks with only one successor, to prevent unnecessarily complicating | |
600 | // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the | |
601 | // branching block would have given an `Expression` (or vice versa). | |
602 | let (some_successor_to_add, some_loop_header) = | |
603 | if let Some((_, loop_header)) = context.loop_backedges { | |
604 | if basic_coverage_blocks.is_dominated_by(successor, loop_header) { | |
605 | (Some(successor), Some(loop_header)) | |
606 | } else { | |
607 | (None, None) | |
608 | } | |
609 | } else { | |
610 | (Some(successor), None) | |
611 | }; | |
612 | if let Some(successor_to_add) = some_successor_to_add { | |
613 | if basic_coverage_blocks.successors[successor_to_add].len() > 1 { | |
614 | debug!( | |
615 | "{:?} successor is branching. Prioritize it at the beginning of \ | |
616 | the {}", | |
617 | successor_to_add, | |
618 | if let Some(loop_header) = some_loop_header { | |
619 | format!("worklist for the loop headed by {:?}", loop_header) | |
620 | } else { | |
621 | String::from("non-loop worklist") | |
622 | }, | |
623 | ); | |
624 | context.worklist.insert(0, successor_to_add); | |
625 | } else { | |
626 | debug!( | |
627 | "{:?} successor is non-branching. Defer it to the end of the {}", | |
628 | successor_to_add, | |
629 | if let Some(loop_header) = some_loop_header { | |
630 | format!("worklist for the loop headed by {:?}", loop_header) | |
631 | } else { | |
632 | String::from("non-loop worklist") | |
633 | }, | |
634 | ); | |
635 | context.worklist.push(successor_to_add); | |
636 | } | |
637 | break; | |
638 | } | |
639 | } | |
640 | } | |
641 | } | |
642 | ||
643 | pub fn is_complete(&self) -> bool { | |
644 | self.visited.count() == self.visited.domain_size() | |
645 | } | |
646 | ||
647 | pub fn unvisited(&self) -> Vec<BasicCoverageBlock> { | |
648 | let mut unvisited_set: BitSet<BasicCoverageBlock> = | |
649 | BitSet::new_filled(self.visited.domain_size()); | |
650 | unvisited_set.subtract(&self.visited); | |
651 | unvisited_set.iter().collect::<Vec<_>>() | |
652 | } | |
653 | } | |
654 | ||
fc512014 | 655 | pub(super) fn find_loop_backedges( |
29967ef6 XL |
656 | basic_coverage_blocks: &CoverageGraph, |
657 | ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> { | |
658 | let num_bcbs = basic_coverage_blocks.num_nodes(); | |
659 | let mut backedges = IndexVec::from_elem_n(Vec::<BasicCoverageBlock>::new(), num_bcbs); | |
660 | ||
661 | // Identify loops by their backedges. | |
662 | // | |
663 | // The computational complexity is bounded by: n(s) x d where `n` is the number of | |
664 | // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the | |
665 | // MIR); `s` is the average number of successors per node (which is most likely less than 2, and | |
666 | // independent of the size of the function, so it can be treated as a constant); | |
667 | // and `d` is the average number of dominators per node. | |
668 | // | |
669 | // The average number of dominators depends on the size and complexity of the function, and | |
670 | // nodes near the start of the function's control flow graph typically have less dominators | |
671 | // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I | |
672 | // think the resulting complexity has the characteristics of O(n log n). | |
673 | // | |
674 | // The overall complexity appears to be comparable to many other MIR transform algorithms, and I | |
675 | // don't expect that this function is creating a performance hot spot, but if this becomes an | |
676 | // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an | |
677 | // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps | |
678 | // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short | |
679 | // circuit downstream `is_dominated_by` checks. | |
680 | // | |
681 | // For now, that kind of optimization seems unnecessarily complicated. | |
682 | for (bcb, _) in basic_coverage_blocks.iter_enumerated() { | |
683 | for &successor in &basic_coverage_blocks.successors[bcb] { | |
684 | if basic_coverage_blocks.is_dominated_by(bcb, successor) { | |
685 | let loop_header = successor; | |
686 | let backedge_from_bcb = bcb; | |
687 | debug!( | |
688 | "Found BCB backedge: {:?} -> loop_header: {:?}", | |
689 | backedge_from_bcb, loop_header | |
690 | ); | |
691 | backedges[loop_header].push(backedge_from_bcb); | |
692 | } | |
693 | } | |
694 | } | |
695 | backedges | |
696 | } | |
697 | ||
698 | pub struct ShortCircuitPreorder< | |
699 | 'a, | |
700 | 'tcx, | |
701 | F: Fn( | |
702 | &'tcx &'a mir::Body<'tcx>, | |
703 | &'tcx TerminatorKind<'tcx>, | |
704 | ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>, | |
705 | > { | |
706 | body: &'tcx &'a mir::Body<'tcx>, | |
707 | visited: BitSet<BasicBlock>, | |
708 | worklist: Vec<BasicBlock>, | |
709 | filtered_successors: F, | |
710 | } | |
711 | ||
712 | impl< | |
713 | 'a, | |
714 | 'tcx, | |
715 | F: Fn( | |
716 | &'tcx &'a mir::Body<'tcx>, | |
717 | &'tcx TerminatorKind<'tcx>, | |
718 | ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>, | |
719 | > ShortCircuitPreorder<'a, 'tcx, F> | |
720 | { | |
721 | pub fn new( | |
722 | body: &'tcx &'a mir::Body<'tcx>, | |
723 | filtered_successors: F, | |
724 | ) -> ShortCircuitPreorder<'a, 'tcx, F> { | |
725 | let worklist = vec![mir::START_BLOCK]; | |
726 | ||
727 | ShortCircuitPreorder { | |
728 | body, | |
729 | visited: BitSet::new_empty(body.basic_blocks().len()), | |
730 | worklist, | |
731 | filtered_successors, | |
732 | } | |
733 | } | |
734 | } | |
735 | ||
736 | impl< | |
737 | 'a: 'tcx, | |
738 | 'tcx, | |
739 | F: Fn( | |
740 | &'tcx &'a mir::Body<'tcx>, | |
741 | &'tcx TerminatorKind<'tcx>, | |
742 | ) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a>, | |
743 | > Iterator for ShortCircuitPreorder<'a, 'tcx, F> | |
744 | { | |
745 | type Item = (BasicBlock, &'a BasicBlockData<'tcx>); | |
746 | ||
747 | fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { | |
748 | while let Some(idx) = self.worklist.pop() { | |
749 | if !self.visited.insert(idx) { | |
750 | continue; | |
751 | } | |
752 | ||
753 | let data = &self.body[idx]; | |
754 | ||
755 | if let Some(ref term) = data.terminator { | |
756 | self.worklist.extend((self.filtered_successors)(&self.body, &term.kind)); | |
757 | } | |
758 | ||
759 | return Some((idx, data)); | |
760 | } | |
761 | ||
762 | None | |
763 | } | |
764 | ||
765 | fn size_hint(&self) -> (usize, Option<usize>) { | |
766 | let size = self.body.basic_blocks().len() - self.visited.count(); | |
767 | (size, Some(size)) | |
768 | } | |
769 | } |