3 use itertools
::Itertools
;
4 use rustc_data_structures
::fx
::FxHashMap
;
5 use rustc_data_structures
::graph
::dominators
::{self, Dominators}
;
6 use rustc_data_structures
::graph
::{self, GraphSuccessors, WithNumNodes, WithStartNode}
;
7 use rustc_index
::bit_set
::BitSet
;
8 use rustc_index
::vec
::IndexVec
;
9 use rustc_middle
::mir
::coverage
::*;
10 use rustc_middle
::mir
::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind}
;
12 use std
::ops
::{Index, IndexMut}
;
14 const ID_SEPARATOR
: &str = ",";
16 /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
17 /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a
18 /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
19 /// set of additional counters--if needed--to count incoming edges, if there are more than one.
20 /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
22 pub(super) struct CoverageGraph
{
23 bcbs
: IndexVec
<BasicCoverageBlock
, BasicCoverageBlockData
>,
24 bb_to_bcb
: IndexVec
<BasicBlock
, Option
<BasicCoverageBlock
>>,
25 pub successors
: IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>>,
26 pub predecessors
: IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>>,
27 dominators
: Option
<Dominators
<BasicCoverageBlock
>>,
31 pub fn from_mir(mir_body
: &mir
::Body
<'_
>) -> Self {
32 let (bcbs
, bb_to_bcb
) = Self::compute_basic_coverage_blocks(mir_body
);
34 // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
35 // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
36 // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a
37 // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so
38 // de-duplication is required. This is done without reordering the successors.
40 let bcbs_len
= bcbs
.len();
41 let mut seen
= IndexVec
::from_elem_n(false, bcbs_len
);
42 let successors
= IndexVec
::from_fn_n(
44 for b
in seen
.iter_mut() {
47 let bcb_data
= &bcbs
[bcb
];
48 let mut bcb_successors
= Vec
::new();
50 bcb_filtered_successors(&mir_body
, &bcb_data
.terminator(mir_body
).kind
)
51 .filter_map(|successor_bb
| bb_to_bcb
[successor_bb
])
54 seen
[successor
] = true;
55 bcb_successors
.push(successor
);
63 let mut predecessors
= IndexVec
::from_elem_n(Vec
::new(), bcbs
.len());
64 for (bcb
, bcb_successors
) in successors
.iter_enumerated() {
65 for &successor
in bcb_successors
{
66 predecessors
[successor
].push(bcb
);
70 let mut basic_coverage_blocks
=
71 Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None }
;
72 let dominators
= dominators
::dominators(&basic_coverage_blocks
);
73 basic_coverage_blocks
.dominators
= Some(dominators
);
77 fn compute_basic_coverage_blocks(
78 mir_body
: &mir
::Body
<'_
>,
80 IndexVec
<BasicCoverageBlock
, BasicCoverageBlockData
>,
81 IndexVec
<BasicBlock
, Option
<BasicCoverageBlock
>>,
83 let num_basic_blocks
= mir_body
.num_nodes();
84 let mut bcbs
= IndexVec
::with_capacity(num_basic_blocks
);
85 let mut bb_to_bcb
= IndexVec
::from_elem_n(None
, num_basic_blocks
);
87 // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
88 // each block terminator's `successors()`. Coverage spans must map to actual source code,
89 // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
90 // intentionally omits unwind paths.
91 // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
92 // `catch_unwind()` handlers.
93 let mir_cfg_without_unwind
= ShortCircuitPreorder
::new(&mir_body
, bcb_filtered_successors
);
95 let mut basic_blocks
= Vec
::new();
96 for (bb
, data
) in mir_cfg_without_unwind
{
97 if let Some(last
) = basic_blocks
.last() {
98 let predecessors
= &mir_body
.predecessors()[bb
];
99 if predecessors
.len() > 1 || !predecessors
.contains(last
) {
100 // The `bb` has more than one _incoming_ edge, and should start its own
101 // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
102 // include `bb`; it contains a sequence of one or more sequential basic_blocks
103 // with no intermediate branches in or out. Save these as a new
104 // `BasicCoverageBlockData` before starting the new one.)
105 Self::add_basic_coverage_block(
108 basic_blocks
.split_off(0),
112 if predecessors
.len() > 1 {
113 "predecessors.len() > 1".to_owned()
115 format
!("bb {} is not in precessors: {:?}", bb
.index(), predecessors
)
120 basic_blocks
.push(bb
);
122 let term
= data
.terminator();
125 TerminatorKind
::Return { .. }
126 | TerminatorKind
::Abort
127 | TerminatorKind
::Yield { .. }
128 | TerminatorKind
::SwitchInt { .. }
=> {
129 // The `bb` has more than one _outgoing_ edge, or exits the function. Save the
130 // current sequence of `basic_blocks` gathered to this point, as a new
131 // `BasicCoverageBlockData`.
132 Self::add_basic_coverage_block(
135 basic_blocks
.split_off(0),
137 debug
!(" because term.kind = {:?}", term
.kind
);
138 // Note that this condition is based on `TerminatorKind`, even though it
139 // theoretically boils down to `successors().len() != 1`; that is, either zero
140 // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
141 // since the BCB CFG ignores things like unwind branches (which exist in the
142 // `Terminator`s `successors()` list) checking the number of successors won't
146 // The following `TerminatorKind`s are either not expected outside an unwind branch,
147 // or they should not (under normal circumstances) branch. Coverage graphs are
148 // simplified by assuring coverage results are accurate for program executions that
151 // Programs that panic and unwind may record slightly inaccurate coverage results
152 // for a coverage region containing the `Terminator` that began the panic. This
153 // is as intended. (See Issue #78544 for a possible future option to support
154 // coverage in test programs that panic.)
155 TerminatorKind
::Goto { .. }
156 | TerminatorKind
::Resume
157 | TerminatorKind
::Unreachable
158 | TerminatorKind
::Drop { .. }
159 | TerminatorKind
::DropAndReplace { .. }
160 | TerminatorKind
::Call { .. }
161 | TerminatorKind
::GeneratorDrop
162 | TerminatorKind
::Assert { .. }
163 | TerminatorKind
::FalseEdge { .. }
164 | TerminatorKind
::FalseUnwind { .. }
165 | TerminatorKind
::InlineAsm { .. }
=> {}
169 if !basic_blocks
.is_empty() {
170 // process any remaining basic_blocks into a final `BasicCoverageBlockData`
171 Self::add_basic_coverage_block(&mut bcbs
, &mut bb_to_bcb
, basic_blocks
.split_off(0));
172 debug
!(" because the end of the MIR CFG was reached while traversing");
178 fn add_basic_coverage_block(
179 bcbs
: &mut IndexVec
<BasicCoverageBlock
, BasicCoverageBlockData
>,
180 bb_to_bcb
: &mut IndexVec
<BasicBlock
, Option
<BasicCoverageBlock
>>,
181 basic_blocks
: Vec
<BasicBlock
>,
183 let bcb
= BasicCoverageBlock
::from_usize(bcbs
.len());
184 for &bb
in basic_blocks
.iter() {
185 bb_to_bcb
[bb
] = Some(bcb
);
187 let bcb_data
= BasicCoverageBlockData
::from(basic_blocks
);
188 debug
!("adding bcb{}: {:?}", bcb
.index(), bcb_data
);
193 pub fn iter_enumerated(
195 ) -> impl Iterator
<Item
= (BasicCoverageBlock
, &BasicCoverageBlockData
)> {
196 self.bcbs
.iter_enumerated()
200 pub fn iter_enumerated_mut(
202 ) -> impl Iterator
<Item
= (BasicCoverageBlock
, &mut BasicCoverageBlockData
)> {
203 self.bcbs
.iter_enumerated_mut()
207 pub fn bcb_from_bb(&self, bb
: BasicBlock
) -> Option
<BasicCoverageBlock
> {
208 if bb
.index() < self.bb_to_bcb
.len() { self.bb_to_bcb[bb] }
else { None }
212 pub fn is_dominated_by(&self, node
: BasicCoverageBlock
, dom
: BasicCoverageBlock
) -> bool
{
213 self.dominators
.as_ref().unwrap().is_dominated_by(node
, dom
)
217 pub fn dominators(&self) -> &Dominators
<BasicCoverageBlock
> {
218 self.dominators
.as_ref().unwrap()
222 impl Index
<BasicCoverageBlock
> for CoverageGraph
{
223 type Output
= BasicCoverageBlockData
;
226 fn index(&self, index
: BasicCoverageBlock
) -> &BasicCoverageBlockData
{
231 impl IndexMut
<BasicCoverageBlock
> for CoverageGraph
{
233 fn index_mut(&mut self, index
: BasicCoverageBlock
) -> &mut BasicCoverageBlockData
{
234 &mut self.bcbs
[index
]
238 impl graph
::DirectedGraph
for CoverageGraph
{
239 type Node
= BasicCoverageBlock
;
242 impl graph
::WithNumNodes
for CoverageGraph
{
244 fn num_nodes(&self) -> usize {
249 impl graph
::WithStartNode
for CoverageGraph
{
251 fn start_node(&self) -> Self::Node
{
252 self.bcb_from_bb(mir
::START_BLOCK
)
253 .expect("mir::START_BLOCK should be in a BasicCoverageBlock")
257 type BcbSuccessors
<'graph
> = std
::slice
::Iter
<'graph
, BasicCoverageBlock
>;
259 impl<'graph
> graph
::GraphSuccessors
<'graph
> for CoverageGraph
{
260 type Item
= BasicCoverageBlock
;
261 type Iter
= std
::iter
::Cloned
<BcbSuccessors
<'graph
>>;
264 impl graph
::WithSuccessors
for CoverageGraph
{
266 fn successors(&self, node
: Self::Node
) -> <Self as GraphSuccessors
<'_
>>::Iter
{
267 self.successors
[node
].iter().cloned()
271 impl<'graph
> graph
::GraphPredecessors
<'graph
> for CoverageGraph
{
272 type Item
= BasicCoverageBlock
;
273 type Iter
= std
::iter
::Copied
<std
::slice
::Iter
<'graph
, BasicCoverageBlock
>>;
276 impl graph
::WithPredecessors
for CoverageGraph
{
278 fn predecessors(&self, node
: Self::Node
) -> <Self as graph
::GraphPredecessors
<'_
>>::Iter
{
279 self.predecessors
[node
].iter().copied()
283 rustc_index
::newtype_index
! {
284 /// A node in the control-flow graph of CoverageGraph.
285 pub(super) struct BasicCoverageBlock
{
286 DEBUG_FORMAT
= "bcb{}",
291 /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
293 /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
294 /// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
295 /// altering the original MIR CFG.
297 /// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
298 /// necessary). The BCB-based CFG is a more aggressive simplification. For example:
300 /// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
301 /// that is injected by the Rust compiler but has no physical source code to count. This also
302 /// means a BasicBlock with a `Call` terminator can be merged into its primary successor target
303 /// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
304 /// of `#[should_panic]` tests and `catch_unwind()` handlers")
305 /// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
306 /// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
307 /// a `Goto`, and merged with its successor into the same BCB.
309 /// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
310 /// In some cases, a BCB's execution count can be computed by `Expression`. Additional
311 /// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
312 /// to the BCB's primary counter or expression).
314 /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
315 /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
317 #[derive(Debug, Clone)]
318 pub(super) struct BasicCoverageBlockData
{
319 pub basic_blocks
: Vec
<BasicBlock
>,
320 pub counter_kind
: Option
<CoverageKind
>,
321 edge_from_bcbs
: Option
<FxHashMap
<BasicCoverageBlock
, CoverageKind
>>,
324 impl BasicCoverageBlockData
{
325 pub fn from(basic_blocks
: Vec
<BasicBlock
>) -> Self {
326 assert
!(basic_blocks
.len() > 0);
327 Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
331 pub fn leader_bb(&self) -> BasicBlock
{
336 pub fn last_bb(&self) -> BasicBlock
{
337 *self.basic_blocks
.last().unwrap()
341 pub fn terminator
<'a
, 'tcx
>(&self, mir_body
: &'a mir
::Body
<'tcx
>) -> &'a Terminator
<'tcx
> {
342 &mir_body
[self.last_bb()].terminator()
347 counter_kind
: CoverageKind
,
348 ) -> Result
<ExpressionOperandId
, Error
> {
350 // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
351 // have an expression (to be injected into an existing `BasicBlock` represented by this
352 // `BasicCoverageBlock`).
353 self.edge_from_bcbs
.is_none() || counter_kind
.is_expression(),
354 "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
356 let operand
= counter_kind
.as_operand_id();
357 if let Some(replaced
) = self.counter_kind
.replace(counter_kind
) {
358 Error
::from_string(format
!(
359 "attempt to set a BasicCoverageBlock coverage counter more than once; \
360 {:?} already had counter {:?}",
369 pub fn counter(&self) -> Option
<&CoverageKind
> {
370 self.counter_kind
.as_ref()
374 pub fn take_counter(&mut self) -> Option
<CoverageKind
> {
375 self.counter_kind
.take()
378 pub fn set_edge_counter_from(
380 from_bcb
: BasicCoverageBlock
,
381 counter_kind
: CoverageKind
,
382 ) -> Result
<ExpressionOperandId
, Error
> {
383 if level_enabled
!(tracing
::Level
::DEBUG
) {
384 // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
385 // have an expression (to be injected into an existing `BasicBlock` represented by this
386 // `BasicCoverageBlock`).
387 if !self.counter_kind
.as_ref().map_or(true, |c
| c
.is_expression()) {
388 return Error
::from_string(format
!(
389 "attempt to add an incoming edge counter from {:?} when the target BCB already \
395 let operand
= counter_kind
.as_operand_id();
396 if let Some(replaced
) =
397 self.edge_from_bcbs
.get_or_insert_default().insert(from_bcb
, counter_kind
)
399 Error
::from_string(format
!(
400 "attempt to set an edge counter more than once; from_bcb: \
401 {:?} already had counter {:?}",
410 pub fn edge_counter_from(&self, from_bcb
: BasicCoverageBlock
) -> Option
<&CoverageKind
> {
411 if let Some(edge_from_bcbs
) = &self.edge_from_bcbs
{
412 edge_from_bcbs
.get(&from_bcb
)
419 pub fn take_edge_counters(
421 ) -> Option
<impl Iterator
<Item
= (BasicCoverageBlock
, CoverageKind
)>> {
422 self.edge_from_bcbs
.take().map(|m
| m
.into_iter())
425 pub fn id(&self) -> String
{
426 format
!("@{}", self.basic_blocks
.iter().map(|bb
| bb
.index().to_string()).join(ID_SEPARATOR
))
430 /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
431 /// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
432 /// the specific branching BCB, representing the edge between the two. The latter case
433 /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
434 #[derive(Clone, Copy, PartialEq, Eq)]
435 pub(super) struct BcbBranch
{
436 pub edge_from_bcb
: Option
<BasicCoverageBlock
>,
437 pub target_bcb
: BasicCoverageBlock
,
442 from_bcb
: BasicCoverageBlock
,
443 to_bcb
: BasicCoverageBlock
,
444 basic_coverage_blocks
: &CoverageGraph
,
446 let edge_from_bcb
= if basic_coverage_blocks
.predecessors
[to_bcb
].len() > 1 {
451 Self { edge_from_bcb, target_bcb: to_bcb }
456 basic_coverage_blocks
: &'a CoverageGraph
,
457 ) -> Option
<&'a CoverageKind
> {
458 if let Some(from_bcb
) = self.edge_from_bcb
{
459 basic_coverage_blocks
[self.target_bcb
].edge_counter_from(from_bcb
)
461 basic_coverage_blocks
[self.target_bcb
].counter()
465 pub fn is_only_path_to_target(&self) -> bool
{
466 self.edge_from_bcb
.is_none()
470 impl std
::fmt
::Debug
for BcbBranch
{
471 fn fmt(&self, fmt
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
472 if let Some(from_bcb
) = self.edge_from_bcb
{
473 write
!(fmt
, "{:?}->{:?}", from_bcb
, self.target_bcb
)
475 write
!(fmt
, "{:?}", self.target_bcb
)
480 // Returns the `Terminator`s non-unwind successors.
481 // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
482 // `catch_unwind()` handlers.
483 fn bcb_filtered_successors
<'a
, 'tcx
>(
484 body
: &'tcx
&'a mir
::Body
<'tcx
>,
485 term_kind
: &'tcx TerminatorKind
<'tcx
>,
486 ) -> Box
<dyn Iterator
<Item
= BasicBlock
> + 'a
> {
487 let mut successors
= term_kind
.successors();
490 // SwitchInt successors are never unwind, and all of them should be traversed.
491 TerminatorKind
::SwitchInt { .. }
=> successors
,
492 // For all other kinds, return only the first successor, if any, and ignore unwinds.
493 // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
494 // `next().into_iter()`) into the `mir::Successors` aliased type.
495 _
=> successors
.next().into_iter().chain(&[]),
498 .filter(move |&successor
| body
[successor
].terminator().kind
!= TerminatorKind
::Unreachable
),
502 /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
503 /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
504 /// ensures a loop is completely traversed before processing Blocks after the end of the loop.
506 pub(super) struct TraversalContext
{
507 /// From one or more backedges returning to a loop header.
508 pub loop_backedges
: Option
<(Vec
<BasicCoverageBlock
>, BasicCoverageBlock
)>,
510 /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
511 /// backedges, such that the loop is the inner inner-most loop containing these
513 pub worklist
: Vec
<BasicCoverageBlock
>,
516 pub(super) struct TraverseCoverageGraphWithLoops
{
517 pub backedges
: IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>>,
518 pub context_stack
: Vec
<TraversalContext
>,
519 visited
: BitSet
<BasicCoverageBlock
>,
522 impl TraverseCoverageGraphWithLoops
{
523 pub fn new(basic_coverage_blocks
: &CoverageGraph
) -> Self {
524 let start_bcb
= basic_coverage_blocks
.start_node();
525 let backedges
= find_loop_backedges(basic_coverage_blocks
);
527 vec
![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }
];
528 // `context_stack` starts with a `TraversalContext` for the main function context (beginning
529 // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
530 // of the stack as loops are entered, and popped off of the stack when a loop's worklist is
532 let visited
= BitSet
::new_empty(basic_coverage_blocks
.num_nodes());
533 Self { backedges, context_stack, visited }
536 pub fn next(&mut self, basic_coverage_blocks
: &CoverageGraph
) -> Option
<BasicCoverageBlock
> {
538 "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
539 self.context_stack
.iter().rev().collect
::<Vec
<_
>>()
541 while let Some(next_bcb
) = {
542 // Strip contexts with empty worklists from the top of the stack
543 while self.context_stack
.last().map_or(false, |context
| context
.worklist
.is_empty()) {
544 self.context_stack
.pop();
546 // Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
547 self.context_stack
.last_mut().map_or(None
, |context
| context
.worklist
.pop())
549 if !self.visited
.insert(next_bcb
) {
550 debug
!("Already visited: {:?}", next_bcb
);
553 debug
!("Visiting {:?}", next_bcb
);
554 if self.backedges
[next_bcb
].len() > 0 {
555 debug
!("{:?} is a loop header! Start a new TraversalContext...", next_bcb
);
556 self.context_stack
.push(TraversalContext
{
557 loop_backedges
: Some((self.backedges
[next_bcb
].clone(), next_bcb
)),
558 worklist
: Vec
::new(),
561 self.extend_worklist(basic_coverage_blocks
, next_bcb
);
562 return Some(next_bcb
);
567 pub fn extend_worklist(
569 basic_coverage_blocks
: &CoverageGraph
,
570 bcb
: BasicCoverageBlock
,
572 let successors
= &basic_coverage_blocks
.successors
[bcb
];
573 debug
!("{:?} has {} successors:", bcb
, successors
.len());
574 for &successor
in successors
{
575 if successor
== bcb
{
577 "{:?} has itself as its own successor. (Note, the compiled code will \
578 generate an infinite loop.)",
581 // Don't re-add this successor to the worklist. We are already processing it.
584 for context
in self.context_stack
.iter_mut().rev() {
585 // Add successors of the current BCB to the appropriate context. Successors that
586 // stay within a loop are added to the BCBs context worklist. Successors that
587 // exit the loop (they are not dominated by the loop header) must be reachable
588 // from other BCBs outside the loop, and they will be added to a different
591 // Branching blocks (with more than one successor) must be processed before
592 // blocks with only one successor, to prevent unnecessarily complicating
593 // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
594 // branching block would have given an `Expression` (or vice versa).
595 let (some_successor_to_add
, some_loop_header
) =
596 if let Some((_
, loop_header
)) = context
.loop_backedges
{
597 if basic_coverage_blocks
.is_dominated_by(successor
, loop_header
) {
598 (Some(successor
), Some(loop_header
))
603 (Some(successor
), None
)
605 if let Some(successor_to_add
) = some_successor_to_add
{
606 if basic_coverage_blocks
.successors
[successor_to_add
].len() > 1 {
608 "{:?} successor is branching. Prioritize it at the beginning of \
611 if let Some(loop_header
) = some_loop_header
{
612 format
!("worklist for the loop headed by {:?}", loop_header
)
614 String
::from("non-loop worklist")
617 context
.worklist
.insert(0, successor_to_add
);
620 "{:?} successor is non-branching. Defer it to the end of the {}",
622 if let Some(loop_header
) = some_loop_header
{
623 format
!("worklist for the loop headed by {:?}", loop_header
)
625 String
::from("non-loop worklist")
628 context
.worklist
.push(successor_to_add
);
636 pub fn is_complete(&self) -> bool
{
637 self.visited
.count() == self.visited
.domain_size()
640 pub fn unvisited(&self) -> Vec
<BasicCoverageBlock
> {
641 let mut unvisited_set
: BitSet
<BasicCoverageBlock
> =
642 BitSet
::new_filled(self.visited
.domain_size());
643 unvisited_set
.subtract(&self.visited
);
644 unvisited_set
.iter().collect
::<Vec
<_
>>()
648 pub(super) fn find_loop_backedges(
649 basic_coverage_blocks
: &CoverageGraph
,
650 ) -> IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>> {
651 let num_bcbs
= basic_coverage_blocks
.num_nodes();
652 let mut backedges
= IndexVec
::from_elem_n(Vec
::<BasicCoverageBlock
>::new(), num_bcbs
);
654 // Identify loops by their backedges.
656 // The computational complexity is bounded by: n(s) x d where `n` is the number of
657 // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the
658 // MIR); `s` is the average number of successors per node (which is most likely less than 2, and
659 // independent of the size of the function, so it can be treated as a constant);
660 // and `d` is the average number of dominators per node.
662 // The average number of dominators depends on the size and complexity of the function, and
663 // nodes near the start of the function's control flow graph typically have less dominators
664 // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I
665 // think the resulting complexity has the characteristics of O(n log n).
667 // The overall complexity appears to be comparable to many other MIR transform algorithms, and I
668 // don't expect that this function is creating a performance hot spot, but if this becomes an
669 // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an
670 // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps
671 // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short
672 // circuit downstream `is_dominated_by` checks.
674 // For now, that kind of optimization seems unnecessarily complicated.
675 for (bcb
, _
) in basic_coverage_blocks
.iter_enumerated() {
676 for &successor
in &basic_coverage_blocks
.successors
[bcb
] {
677 if basic_coverage_blocks
.is_dominated_by(bcb
, successor
) {
678 let loop_header
= successor
;
679 let backedge_from_bcb
= bcb
;
681 "Found BCB backedge: {:?} -> loop_header: {:?}",
682 backedge_from_bcb
, loop_header
684 backedges
[loop_header
].push(backedge_from_bcb
);
691 pub struct ShortCircuitPreorder
<
695 &'tcx
&'a mir
::Body
<'tcx
>,
696 &'tcx TerminatorKind
<'tcx
>,
697 ) -> Box
<dyn Iterator
<Item
= BasicBlock
> + 'a
>,
699 body
: &'tcx
&'a mir
::Body
<'tcx
>,
700 visited
: BitSet
<BasicBlock
>,
701 worklist
: Vec
<BasicBlock
>,
702 filtered_successors
: F
,
709 &'tcx
&'a mir
::Body
<'tcx
>,
710 &'tcx TerminatorKind
<'tcx
>,
711 ) -> Box
<dyn Iterator
<Item
= BasicBlock
> + 'a
>,
712 > ShortCircuitPreorder
<'a
, 'tcx
, F
>
715 body
: &'tcx
&'a mir
::Body
<'tcx
>,
716 filtered_successors
: F
,
717 ) -> ShortCircuitPreorder
<'a
, 'tcx
, F
> {
718 let worklist
= vec
![mir
::START_BLOCK
];
720 ShortCircuitPreorder
{
722 visited
: BitSet
::new_empty(body
.basic_blocks().len()),
733 &'tcx
&'a mir
::Body
<'tcx
>,
734 &'tcx TerminatorKind
<'tcx
>,
735 ) -> Box
<dyn Iterator
<Item
= BasicBlock
> + 'a
>,
736 > Iterator
for ShortCircuitPreorder
<'a
, 'tcx
, F
>
738 type Item
= (BasicBlock
, &'a BasicBlockData
<'tcx
>);
740 fn next(&mut self) -> Option
<(BasicBlock
, &'a BasicBlockData
<'tcx
>)> {
741 while let Some(idx
) = self.worklist
.pop() {
742 if !self.visited
.insert(idx
) {
746 let data
= &self.body
[idx
];
748 if let Some(ref term
) = data
.terminator
{
749 self.worklist
.extend((self.filtered_successors
)(&self.body
, &term
.kind
));
752 return Some((idx
, data
));
758 fn size_hint(&self) -> (usize, Option
<usize>) {
759 let size
= self.body
.basic_blocks().len() - self.visited
.count();