]>
Commit | Line | Data |
---|---|---|
29967ef6 XL |
1 | use super::Error; |
2 | ||
5099ac24 | 3 | use itertools::Itertools; |
29967ef6 XL |
4 | use rustc_data_structures::fx::FxHashMap; |
5 | use rustc_data_structures::graph::dominators::{self, Dominators}; | |
6 | use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode}; | |
7 | use rustc_index::bit_set::BitSet; | |
8 | use rustc_index::vec::IndexVec; | |
9 | use rustc_middle::mir::coverage::*; | |
10 | use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind}; | |
11 | ||
12 | use std::ops::{Index, IndexMut}; | |
13 | ||
14 | const ID_SEPARATOR: &str = ","; | |
15 | ||
16 | /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s | |
17 | /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a | |
18 | /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional | |
19 | /// set of additional counters--if needed--to count incoming edges, if there are more than one. | |
20 | /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.) | |
fc512014 XL |
21 | #[derive(Debug)] |
22 | pub(super) struct CoverageGraph { | |
29967ef6 XL |
23 | bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, |
24 | bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
25 | pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, | |
26 | pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, | |
27 | dominators: Option<Dominators<BasicCoverageBlock>>, | |
28 | } | |
29 | ||
30 | impl CoverageGraph { | |
a2a8927a | 31 | pub fn from_mir(mir_body: &mir::Body<'_>) -> Self { |
29967ef6 XL |
32 | let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body); |
33 | ||
34 | // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock | |
35 | // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the | |
fc512014 XL |
36 | // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a |
37 | // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so | |
38 | // de-duplication is required. This is done without reordering the successors. | |
29967ef6 | 39 | |
fc512014 XL |
40 | let bcbs_len = bcbs.len(); |
41 | let mut seen = IndexVec::from_elem_n(false, bcbs_len); | |
29967ef6 XL |
42 | let successors = IndexVec::from_fn_n( |
43 | |bcb| { | |
fc512014 XL |
44 | for b in seen.iter_mut() { |
45 | *b = false; | |
46 | } | |
29967ef6 | 47 | let bcb_data = &bcbs[bcb]; |
fc512014 XL |
48 | let mut bcb_successors = Vec::new(); |
49 | for successor in | |
29967ef6 | 50 | bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind) |
5e7ed085 | 51 | .filter_map(|successor_bb| bb_to_bcb[successor_bb]) |
fc512014 XL |
52 | { |
53 | if !seen[successor] { | |
54 | seen[successor] = true; | |
55 | bcb_successors.push(successor); | |
56 | } | |
57 | } | |
29967ef6 XL |
58 | bcb_successors |
59 | }, | |
60 | bcbs.len(), | |
61 | ); | |
62 | ||
63 | let mut predecessors = IndexVec::from_elem_n(Vec::new(), bcbs.len()); | |
64 | for (bcb, bcb_successors) in successors.iter_enumerated() { | |
65 | for &successor in bcb_successors { | |
66 | predecessors[successor].push(bcb); | |
67 | } | |
68 | } | |
69 | ||
70 | let mut basic_coverage_blocks = | |
71 | Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None }; | |
72 | let dominators = dominators::dominators(&basic_coverage_blocks); | |
73 | basic_coverage_blocks.dominators = Some(dominators); | |
74 | basic_coverage_blocks | |
75 | } | |
76 | ||
77 | fn compute_basic_coverage_blocks( | |
a2a8927a | 78 | mir_body: &mir::Body<'_>, |
29967ef6 XL |
79 | ) -> ( |
80 | IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, | |
81 | IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
82 | ) { | |
83 | let num_basic_blocks = mir_body.num_nodes(); | |
84 | let mut bcbs = IndexVec::with_capacity(num_basic_blocks); | |
85 | let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks); | |
86 | ||
87 | // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows | |
88 | // each block terminator's `successors()`. Coverage spans must map to actual source code, | |
89 | // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal | |
90 | // intentionally omits unwind paths. | |
91 | // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and | |
92 | // `catch_unwind()` handlers. | |
93 | let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors); | |
94 | ||
95 | let mut basic_blocks = Vec::new(); | |
96 | for (bb, data) in mir_cfg_without_unwind { | |
97 | if let Some(last) = basic_blocks.last() { | |
98 | let predecessors = &mir_body.predecessors()[bb]; | |
99 | if predecessors.len() > 1 || !predecessors.contains(last) { | |
100 | // The `bb` has more than one _incoming_ edge, and should start its own | |
101 | // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet | |
102 | // include `bb`; it contains a sequence of one or more sequential basic_blocks | |
103 | // with no intermediate branches in or out. Save these as a new | |
104 | // `BasicCoverageBlockData` before starting the new one.) | |
105 | Self::add_basic_coverage_block( | |
106 | &mut bcbs, | |
107 | &mut bb_to_bcb, | |
108 | basic_blocks.split_off(0), | |
109 | ); | |
110 | debug!( | |
111 | " because {}", | |
112 | if predecessors.len() > 1 { | |
113 | "predecessors.len() > 1".to_owned() | |
114 | } else { | |
115 | format!("bb {} is not in precessors: {:?}", bb.index(), predecessors) | |
116 | } | |
117 | ); | |
118 | } | |
119 | } | |
120 | basic_blocks.push(bb); | |
121 | ||
122 | let term = data.terminator(); | |
123 | ||
124 | match term.kind { | |
125 | TerminatorKind::Return { .. } | |
29967ef6 | 126 | | TerminatorKind::Abort |
29967ef6 | 127 | | TerminatorKind::Yield { .. } |
29967ef6 XL |
128 | | TerminatorKind::SwitchInt { .. } => { |
129 | // The `bb` has more than one _outgoing_ edge, or exits the function. Save the | |
130 | // current sequence of `basic_blocks` gathered to this point, as a new | |
131 | // `BasicCoverageBlockData`. | |
132 | Self::add_basic_coverage_block( | |
133 | &mut bcbs, | |
134 | &mut bb_to_bcb, | |
135 | basic_blocks.split_off(0), | |
136 | ); | |
137 | debug!(" because term.kind = {:?}", term.kind); | |
138 | // Note that this condition is based on `TerminatorKind`, even though it | |
139 | // theoretically boils down to `successors().len() != 1`; that is, either zero | |
140 | // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but | |
141 | // since the BCB CFG ignores things like unwind branches (which exist in the | |
142 | // `Terminator`s `successors()` list) checking the number of successors won't | |
143 | // work. | |
144 | } | |
fc512014 XL |
145 | |
146 | // The following `TerminatorKind`s are either not expected outside an unwind branch, | |
147 | // or they should not (under normal circumstances) branch. Coverage graphs are | |
148 | // simplified by assuring coverage results are accurate for program executions that | |
149 | // don't panic. | |
150 | // | |
151 | // Programs that panic and unwind may record slightly inaccurate coverage results | |
152 | // for a coverage region containing the `Terminator` that began the panic. This | |
153 | // is as intended. (See Issue #78544 for a possible future option to support | |
154 | // coverage in test programs that panic.) | |
29967ef6 XL |
155 | TerminatorKind::Goto { .. } |
156 | | TerminatorKind::Resume | |
157 | | TerminatorKind::Unreachable | |
158 | | TerminatorKind::Drop { .. } | |
159 | | TerminatorKind::DropAndReplace { .. } | |
160 | | TerminatorKind::Call { .. } | |
161 | | TerminatorKind::GeneratorDrop | |
fc512014 | 162 | | TerminatorKind::Assert { .. } |
29967ef6 XL |
163 | | TerminatorKind::FalseEdge { .. } |
164 | | TerminatorKind::FalseUnwind { .. } | |
165 | | TerminatorKind::InlineAsm { .. } => {} | |
166 | } | |
167 | } | |
168 | ||
169 | if !basic_blocks.is_empty() { | |
170 | // process any remaining basic_blocks into a final `BasicCoverageBlockData` | |
171 | Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0)); | |
172 | debug!(" because the end of the MIR CFG was reached while traversing"); | |
173 | } | |
174 | ||
175 | (bcbs, bb_to_bcb) | |
176 | } | |
177 | ||
178 | fn add_basic_coverage_block( | |
179 | bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, | |
180 | bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>, | |
181 | basic_blocks: Vec<BasicBlock>, | |
182 | ) { | |
183 | let bcb = BasicCoverageBlock::from_usize(bcbs.len()); | |
184 | for &bb in basic_blocks.iter() { | |
185 | bb_to_bcb[bb] = Some(bcb); | |
186 | } | |
187 | let bcb_data = BasicCoverageBlockData::from(basic_blocks); | |
188 | debug!("adding bcb{}: {:?}", bcb.index(), bcb_data); | |
189 | bcbs.push(bcb_data); | |
190 | } | |
191 | ||
192 | #[inline(always)] | |
193 | pub fn iter_enumerated( | |
194 | &self, | |
195 | ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> { | |
196 | self.bcbs.iter_enumerated() | |
197 | } | |
198 | ||
199 | #[inline(always)] | |
200 | pub fn iter_enumerated_mut( | |
201 | &mut self, | |
202 | ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> { | |
203 | self.bcbs.iter_enumerated_mut() | |
204 | } | |
205 | ||
206 | #[inline(always)] | |
207 | pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> { | |
208 | if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None } | |
209 | } | |
210 | ||
211 | #[inline(always)] | |
212 | pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool { | |
213 | self.dominators.as_ref().unwrap().is_dominated_by(node, dom) | |
214 | } | |
215 | ||
216 | #[inline(always)] | |
217 | pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> { | |
218 | self.dominators.as_ref().unwrap() | |
219 | } | |
220 | } | |
221 | ||
222 | impl Index<BasicCoverageBlock> for CoverageGraph { | |
223 | type Output = BasicCoverageBlockData; | |
224 | ||
225 | #[inline] | |
226 | fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData { | |
227 | &self.bcbs[index] | |
228 | } | |
229 | } | |
230 | ||
231 | impl IndexMut<BasicCoverageBlock> for CoverageGraph { | |
232 | #[inline] | |
233 | fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData { | |
234 | &mut self.bcbs[index] | |
235 | } | |
236 | } | |
237 | ||
238 | impl graph::DirectedGraph for CoverageGraph { | |
239 | type Node = BasicCoverageBlock; | |
240 | } | |
241 | ||
242 | impl graph::WithNumNodes for CoverageGraph { | |
243 | #[inline] | |
244 | fn num_nodes(&self) -> usize { | |
245 | self.bcbs.len() | |
246 | } | |
247 | } | |
248 | ||
249 | impl graph::WithStartNode for CoverageGraph { | |
250 | #[inline] | |
251 | fn start_node(&self) -> Self::Node { | |
252 | self.bcb_from_bb(mir::START_BLOCK) | |
253 | .expect("mir::START_BLOCK should be in a BasicCoverageBlock") | |
254 | } | |
255 | } | |
256 | ||
257 | type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>; | |
258 | ||
259 | impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph { | |
260 | type Item = BasicCoverageBlock; | |
261 | type Iter = std::iter::Cloned<BcbSuccessors<'graph>>; | |
262 | } | |
263 | ||
264 | impl graph::WithSuccessors for CoverageGraph { | |
265 | #[inline] | |
266 | fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter { | |
267 | self.successors[node].iter().cloned() | |
268 | } | |
269 | } | |
270 | ||
a2a8927a | 271 | impl<'graph> graph::GraphPredecessors<'graph> for CoverageGraph { |
29967ef6 | 272 | type Item = BasicCoverageBlock; |
17df50a5 | 273 | type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>; |
29967ef6 XL |
274 | } |
275 | ||
276 | impl graph::WithPredecessors for CoverageGraph { | |
277 | #[inline] | |
278 | fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter { | |
17df50a5 | 279 | self.predecessors[node].iter().copied() |
29967ef6 XL |
280 | } |
281 | } | |
282 | ||
283 | rustc_index::newtype_index! { | |
5e7ed085 | 284 | /// A node in the control-flow graph of CoverageGraph. |
fc512014 | 285 | pub(super) struct BasicCoverageBlock { |
29967ef6 | 286 | DEBUG_FORMAT = "bcb{}", |
fc512014 | 287 | const START_BCB = 0, |
29967ef6 XL |
288 | } |
289 | } | |
290 | ||
fc512014 XL |
291 | /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`. |
292 | /// | |
293 | /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without | |
29967ef6 XL |
294 | /// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without |
295 | /// altering the original MIR CFG. | |
296 | /// | |
297 | /// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not | |
298 | /// necessary). The BCB-based CFG is a more aggressive simplification. For example: | |
299 | /// | |
300 | /// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code, | |
301 | /// that is injected by the Rust compiler but has no physical source code to count. This also | |
302 | /// means a BasicBlock with a `Call` terminator can be merged into its primary successor target | |
303 | /// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage | |
304 | /// of `#[should_panic]` tests and `catch_unwind()` handlers") | |
305 | /// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are | |
306 | /// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as | |
307 | /// a `Goto`, and merged with its successor into the same BCB. | |
308 | /// | |
309 | /// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`. | |
310 | /// In some cases, a BCB's execution count can be computed by `Expression`. Additional | |
311 | /// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO` | |
312 | /// to the BCB's primary counter or expression). | |
313 | /// | |
314 | /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based | |
315 | /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow) | |
316 | /// significance. | |
317 | #[derive(Debug, Clone)] | |
fc512014 | 318 | pub(super) struct BasicCoverageBlockData { |
29967ef6 XL |
319 | pub basic_blocks: Vec<BasicBlock>, |
320 | pub counter_kind: Option<CoverageKind>, | |
321 | edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>, | |
322 | } | |
323 | ||
324 | impl BasicCoverageBlockData { | |
325 | pub fn from(basic_blocks: Vec<BasicBlock>) -> Self { | |
326 | assert!(basic_blocks.len() > 0); | |
327 | Self { basic_blocks, counter_kind: None, edge_from_bcbs: None } | |
328 | } | |
329 | ||
330 | #[inline(always)] | |
331 | pub fn leader_bb(&self) -> BasicBlock { | |
332 | self.basic_blocks[0] | |
333 | } | |
334 | ||
335 | #[inline(always)] | |
336 | pub fn last_bb(&self) -> BasicBlock { | |
337 | *self.basic_blocks.last().unwrap() | |
338 | } | |
339 | ||
340 | #[inline(always)] | |
341 | pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> { | |
342 | &mir_body[self.last_bb()].terminator() | |
343 | } | |
344 | ||
345 | pub fn set_counter( | |
346 | &mut self, | |
347 | counter_kind: CoverageKind, | |
348 | ) -> Result<ExpressionOperandId, Error> { | |
349 | debug_assert!( | |
350 | // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also | |
351 | // have an expression (to be injected into an existing `BasicBlock` represented by this | |
352 | // `BasicCoverageBlock`). | |
353 | self.edge_from_bcbs.is_none() || counter_kind.is_expression(), | |
354 | "attempt to add a `Counter` to a BCB target with existing incoming edge counters" | |
355 | ); | |
356 | let operand = counter_kind.as_operand_id(); | |
357 | if let Some(replaced) = self.counter_kind.replace(counter_kind) { | |
358 | Error::from_string(format!( | |
359 | "attempt to set a BasicCoverageBlock coverage counter more than once; \ | |
360 | {:?} already had counter {:?}", | |
361 | self, replaced, | |
362 | )) | |
363 | } else { | |
364 | Ok(operand) | |
365 | } | |
366 | } | |
367 | ||
368 | #[inline(always)] | |
369 | pub fn counter(&self) -> Option<&CoverageKind> { | |
370 | self.counter_kind.as_ref() | |
371 | } | |
372 | ||
373 | #[inline(always)] | |
374 | pub fn take_counter(&mut self) -> Option<CoverageKind> { | |
375 | self.counter_kind.take() | |
376 | } | |
377 | ||
378 | pub fn set_edge_counter_from( | |
379 | &mut self, | |
380 | from_bcb: BasicCoverageBlock, | |
381 | counter_kind: CoverageKind, | |
382 | ) -> Result<ExpressionOperandId, Error> { | |
383 | if level_enabled!(tracing::Level::DEBUG) { | |
384 | // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also | |
385 | // have an expression (to be injected into an existing `BasicBlock` represented by this | |
386 | // `BasicCoverageBlock`). | |
387 | if !self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) { | |
388 | return Error::from_string(format!( | |
389 | "attempt to add an incoming edge counter from {:?} when the target BCB already \ | |
390 | has a `Counter`", | |
391 | from_bcb | |
392 | )); | |
393 | } | |
394 | } | |
395 | let operand = counter_kind.as_operand_id(); | |
6a06907d XL |
396 | if let Some(replaced) = |
397 | self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind) | |
29967ef6 XL |
398 | { |
399 | Error::from_string(format!( | |
400 | "attempt to set an edge counter more than once; from_bcb: \ | |
401 | {:?} already had counter {:?}", | |
402 | from_bcb, replaced, | |
403 | )) | |
404 | } else { | |
405 | Ok(operand) | |
406 | } | |
407 | } | |
408 | ||
409 | #[inline] | |
410 | pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> { | |
411 | if let Some(edge_from_bcbs) = &self.edge_from_bcbs { | |
412 | edge_from_bcbs.get(&from_bcb) | |
413 | } else { | |
414 | None | |
415 | } | |
416 | } | |
417 | ||
418 | #[inline] | |
419 | pub fn take_edge_counters( | |
420 | &mut self, | |
421 | ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> { | |
5099ac24 | 422 | self.edge_from_bcbs.take().map(|m| m.into_iter()) |
29967ef6 XL |
423 | } |
424 | ||
425 | pub fn id(&self) -> String { | |
5099ac24 | 426 | format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR)) |
29967ef6 XL |
427 | } |
428 | } | |
429 | ||
430 | /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`) | |
431 | /// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_ | |
432 | /// the specific branching BCB, representing the edge between the two. The latter case | |
433 | /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`. | |
434 | #[derive(Clone, Copy, PartialEq, Eq)] | |
fc512014 | 435 | pub(super) struct BcbBranch { |
29967ef6 XL |
436 | pub edge_from_bcb: Option<BasicCoverageBlock>, |
437 | pub target_bcb: BasicCoverageBlock, | |
438 | } | |
439 | ||
440 | impl BcbBranch { | |
441 | pub fn from_to( | |
442 | from_bcb: BasicCoverageBlock, | |
443 | to_bcb: BasicCoverageBlock, | |
444 | basic_coverage_blocks: &CoverageGraph, | |
445 | ) -> Self { | |
446 | let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 { | |
447 | Some(from_bcb) | |
448 | } else { | |
449 | None | |
450 | }; | |
451 | Self { edge_from_bcb, target_bcb: to_bcb } | |
452 | } | |
453 | ||
454 | pub fn counter<'a>( | |
455 | &self, | |
456 | basic_coverage_blocks: &'a CoverageGraph, | |
457 | ) -> Option<&'a CoverageKind> { | |
458 | if let Some(from_bcb) = self.edge_from_bcb { | |
459 | basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb) | |
460 | } else { | |
461 | basic_coverage_blocks[self.target_bcb].counter() | |
462 | } | |
463 | } | |
464 | ||
465 | pub fn is_only_path_to_target(&self) -> bool { | |
466 | self.edge_from_bcb.is_none() | |
467 | } | |
468 | } | |
469 | ||
470 | impl std::fmt::Debug for BcbBranch { | |
471 | fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | |
472 | if let Some(from_bcb) = self.edge_from_bcb { | |
473 | write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb) | |
474 | } else { | |
475 | write!(fmt, "{:?}", self.target_bcb) | |
476 | } | |
477 | } | |
478 | } | |
479 | ||
480 | // Returns the `Terminator`s non-unwind successors. | |
481 | // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and | |
482 | // `catch_unwind()` handlers. | |
483 | fn bcb_filtered_successors<'a, 'tcx>( | |
923072b8 FG |
484 | body: &'a mir::Body<'tcx>, |
485 | term_kind: &'a TerminatorKind<'tcx>, | |
5e7ed085 | 486 | ) -> Box<dyn Iterator<Item = BasicBlock> + 'a> { |
94222f64 XL |
487 | Box::new( |
488 | match &term_kind { | |
489 | // SwitchInt successors are never unwind, and all of them should be traversed. | |
923072b8 FG |
490 | TerminatorKind::SwitchInt { ref targets, .. } => { |
491 | None.into_iter().chain(targets.all_targets().into_iter().copied()) | |
492 | } | |
94222f64 XL |
493 | // For all other kinds, return only the first successor, if any, and ignore unwinds. |
494 | // NOTE: `chain(&[])` is required to coerce the `option::iter` (from | |
495 | // `next().into_iter()`) into the `mir::Successors` aliased type. | |
923072b8 | 496 | _ => term_kind.successors().next().into_iter().chain((&[]).into_iter().copied()), |
94222f64 | 497 | } |
5e7ed085 | 498 | .filter(move |&successor| body[successor].terminator().kind != TerminatorKind::Unreachable), |
94222f64 | 499 | ) |
29967ef6 XL |
500 | } |
501 | ||
502 | /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the | |
503 | /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that | |
504 | /// ensures a loop is completely traversed before processing Blocks after the end of the loop. | |
29967ef6 | 505 | #[derive(Debug)] |
fc512014 | 506 | pub(super) struct TraversalContext { |
29967ef6 XL |
507 | /// From one or more backedges returning to a loop header. |
508 | pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>, | |
509 | ||
510 | /// worklist, to be traversed, of CoverageGraph in the loop with the given loop | |
511 | /// backedges, such that the loop is the inner inner-most loop containing these | |
512 | /// CoverageGraph | |
513 | pub worklist: Vec<BasicCoverageBlock>, | |
514 | } | |
515 | ||
fc512014 | 516 | pub(super) struct TraverseCoverageGraphWithLoops { |
29967ef6 XL |
517 | pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>, |
518 | pub context_stack: Vec<TraversalContext>, | |
519 | visited: BitSet<BasicCoverageBlock>, | |
520 | } | |
521 | ||
522 | impl TraverseCoverageGraphWithLoops { | |
523 | pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self { | |
524 | let start_bcb = basic_coverage_blocks.start_node(); | |
525 | let backedges = find_loop_backedges(basic_coverage_blocks); | |
94222f64 XL |
526 | let context_stack = |
527 | vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }]; | |
29967ef6 XL |
528 | // `context_stack` starts with a `TraversalContext` for the main function context (beginning |
529 | // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top | |
530 | // of the stack as loops are entered, and popped off of the stack when a loop's worklist is | |
531 | // exhausted. | |
532 | let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes()); | |
533 | Self { backedges, context_stack, visited } | |
534 | } | |
535 | ||
536 | pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> { | |
537 | debug!( | |
538 | "TraverseCoverageGraphWithLoops::next - context_stack: {:?}", | |
539 | self.context_stack.iter().rev().collect::<Vec<_>>() | |
540 | ); | |
541 | while let Some(next_bcb) = { | |
542 | // Strip contexts with empty worklists from the top of the stack | |
543 | while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) { | |
544 | self.context_stack.pop(); | |
545 | } | |
546 | // Pop the next bcb off of the current context_stack. If none, all BCBs were visited. | |
547 | self.context_stack.last_mut().map_or(None, |context| context.worklist.pop()) | |
548 | } { | |
549 | if !self.visited.insert(next_bcb) { | |
550 | debug!("Already visited: {:?}", next_bcb); | |
551 | continue; | |
552 | } | |
553 | debug!("Visiting {:?}", next_bcb); | |
554 | if self.backedges[next_bcb].len() > 0 { | |
555 | debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb); | |
556 | self.context_stack.push(TraversalContext { | |
557 | loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)), | |
558 | worklist: Vec::new(), | |
559 | }); | |
560 | } | |
561 | self.extend_worklist(basic_coverage_blocks, next_bcb); | |
562 | return Some(next_bcb); | |
563 | } | |
564 | None | |
565 | } | |
566 | ||
567 | pub fn extend_worklist( | |
568 | &mut self, | |
569 | basic_coverage_blocks: &CoverageGraph, | |
570 | bcb: BasicCoverageBlock, | |
571 | ) { | |
572 | let successors = &basic_coverage_blocks.successors[bcb]; | |
573 | debug!("{:?} has {} successors:", bcb, successors.len()); | |
574 | for &successor in successors { | |
575 | if successor == bcb { | |
576 | debug!( | |
577 | "{:?} has itself as its own successor. (Note, the compiled code will \ | |
578 | generate an infinite loop.)", | |
579 | bcb | |
580 | ); | |
581 | // Don't re-add this successor to the worklist. We are already processing it. | |
582 | break; | |
583 | } | |
584 | for context in self.context_stack.iter_mut().rev() { | |
585 | // Add successors of the current BCB to the appropriate context. Successors that | |
586 | // stay within a loop are added to the BCBs context worklist. Successors that | |
587 | // exit the loop (they are not dominated by the loop header) must be reachable | |
588 | // from other BCBs outside the loop, and they will be added to a different | |
589 | // worklist. | |
590 | // | |
591 | // Branching blocks (with more than one successor) must be processed before | |
592 | // blocks with only one successor, to prevent unnecessarily complicating | |
593 | // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the | |
594 | // branching block would have given an `Expression` (or vice versa). | |
595 | let (some_successor_to_add, some_loop_header) = | |
596 | if let Some((_, loop_header)) = context.loop_backedges { | |
597 | if basic_coverage_blocks.is_dominated_by(successor, loop_header) { | |
598 | (Some(successor), Some(loop_header)) | |
599 | } else { | |
600 | (None, None) | |
601 | } | |
602 | } else { | |
603 | (Some(successor), None) | |
604 | }; | |
605 | if let Some(successor_to_add) = some_successor_to_add { | |
606 | if basic_coverage_blocks.successors[successor_to_add].len() > 1 { | |
607 | debug!( | |
608 | "{:?} successor is branching. Prioritize it at the beginning of \ | |
609 | the {}", | |
610 | successor_to_add, | |
611 | if let Some(loop_header) = some_loop_header { | |
612 | format!("worklist for the loop headed by {:?}", loop_header) | |
613 | } else { | |
614 | String::from("non-loop worklist") | |
615 | }, | |
616 | ); | |
617 | context.worklist.insert(0, successor_to_add); | |
618 | } else { | |
619 | debug!( | |
620 | "{:?} successor is non-branching. Defer it to the end of the {}", | |
621 | successor_to_add, | |
622 | if let Some(loop_header) = some_loop_header { | |
623 | format!("worklist for the loop headed by {:?}", loop_header) | |
624 | } else { | |
625 | String::from("non-loop worklist") | |
626 | }, | |
627 | ); | |
628 | context.worklist.push(successor_to_add); | |
629 | } | |
630 | break; | |
631 | } | |
632 | } | |
633 | } | |
634 | } | |
635 | ||
636 | pub fn is_complete(&self) -> bool { | |
637 | self.visited.count() == self.visited.domain_size() | |
638 | } | |
639 | ||
640 | pub fn unvisited(&self) -> Vec<BasicCoverageBlock> { | |
641 | let mut unvisited_set: BitSet<BasicCoverageBlock> = | |
642 | BitSet::new_filled(self.visited.domain_size()); | |
643 | unvisited_set.subtract(&self.visited); | |
644 | unvisited_set.iter().collect::<Vec<_>>() | |
645 | } | |
646 | } | |
647 | ||
fc512014 | 648 | pub(super) fn find_loop_backedges( |
29967ef6 XL |
649 | basic_coverage_blocks: &CoverageGraph, |
650 | ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> { | |
651 | let num_bcbs = basic_coverage_blocks.num_nodes(); | |
652 | let mut backedges = IndexVec::from_elem_n(Vec::<BasicCoverageBlock>::new(), num_bcbs); | |
653 | ||
654 | // Identify loops by their backedges. | |
655 | // | |
656 | // The computational complexity is bounded by: n(s) x d where `n` is the number of | |
657 | // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the | |
658 | // MIR); `s` is the average number of successors per node (which is most likely less than 2, and | |
659 | // independent of the size of the function, so it can be treated as a constant); | |
660 | // and `d` is the average number of dominators per node. | |
661 | // | |
662 | // The average number of dominators depends on the size and complexity of the function, and | |
663 | // nodes near the start of the function's control flow graph typically have less dominators | |
664 | // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I | |
665 | // think the resulting complexity has the characteristics of O(n log n). | |
666 | // | |
667 | // The overall complexity appears to be comparable to many other MIR transform algorithms, and I | |
668 | // don't expect that this function is creating a performance hot spot, but if this becomes an | |
669 | // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an | |
670 | // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps | |
671 | // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short | |
672 | // circuit downstream `is_dominated_by` checks. | |
673 | // | |
674 | // For now, that kind of optimization seems unnecessarily complicated. | |
675 | for (bcb, _) in basic_coverage_blocks.iter_enumerated() { | |
676 | for &successor in &basic_coverage_blocks.successors[bcb] { | |
677 | if basic_coverage_blocks.is_dominated_by(bcb, successor) { | |
678 | let loop_header = successor; | |
679 | let backedge_from_bcb = bcb; | |
680 | debug!( | |
681 | "Found BCB backedge: {:?} -> loop_header: {:?}", | |
682 | backedge_from_bcb, loop_header | |
683 | ); | |
684 | backedges[loop_header].push(backedge_from_bcb); | |
685 | } | |
686 | } | |
687 | } | |
688 | backedges | |
689 | } | |
690 | ||
691 | pub struct ShortCircuitPreorder< | |
692 | 'a, | |
693 | 'tcx, | |
923072b8 | 694 | F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>, |
29967ef6 | 695 | > { |
923072b8 | 696 | body: &'a mir::Body<'tcx>, |
29967ef6 XL |
697 | visited: BitSet<BasicBlock>, |
698 | worklist: Vec<BasicBlock>, | |
699 | filtered_successors: F, | |
700 | } | |
701 | ||
702 | impl< | |
703 | 'a, | |
704 | 'tcx, | |
923072b8 | 705 | F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>, |
29967ef6 XL |
706 | > ShortCircuitPreorder<'a, 'tcx, F> |
707 | { | |
708 | pub fn new( | |
923072b8 | 709 | body: &'a mir::Body<'tcx>, |
29967ef6 XL |
710 | filtered_successors: F, |
711 | ) -> ShortCircuitPreorder<'a, 'tcx, F> { | |
712 | let worklist = vec![mir::START_BLOCK]; | |
713 | ||
714 | ShortCircuitPreorder { | |
715 | body, | |
716 | visited: BitSet::new_empty(body.basic_blocks().len()), | |
717 | worklist, | |
718 | filtered_successors, | |
719 | } | |
720 | } | |
721 | } | |
722 | ||
723 | impl< | |
923072b8 | 724 | 'a, |
29967ef6 | 725 | 'tcx, |
923072b8 | 726 | F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>, |
29967ef6 XL |
727 | > Iterator for ShortCircuitPreorder<'a, 'tcx, F> |
728 | { | |
729 | type Item = (BasicBlock, &'a BasicBlockData<'tcx>); | |
730 | ||
731 | fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { | |
732 | while let Some(idx) = self.worklist.pop() { | |
733 | if !self.visited.insert(idx) { | |
734 | continue; | |
735 | } | |
736 | ||
737 | let data = &self.body[idx]; | |
738 | ||
739 | if let Some(ref term) = data.terminator { | |
740 | self.worklist.extend((self.filtered_successors)(&self.body, &term.kind)); | |
741 | } | |
742 | ||
743 | return Some((idx, data)); | |
744 | } | |
745 | ||
746 | None | |
747 | } | |
748 | ||
749 | fn size_hint(&self) -> (usize, Option<usize>) { | |
750 | let size = self.body.basic_blocks().len() - self.visited.count(); | |
751 | (size, Some(size)) | |
752 | } | |
753 | } |