3 use rustc_data_structures
::fx
::FxHashMap
;
4 use rustc_data_structures
::graph
::dominators
::{self, Dominators}
;
5 use rustc_data_structures
::graph
::{self, GraphSuccessors, WithNumNodes, WithStartNode}
;
6 use rustc_index
::bit_set
::BitSet
;
7 use rustc_index
::vec
::IndexVec
;
8 use rustc_middle
::mir
::coverage
::*;
9 use rustc_middle
::mir
::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind}
;
11 use std
::ops
::{Index, IndexMut}
;
13 const ID_SEPARATOR
: &str = ",";
15 /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
16 /// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a
17 /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
18 /// set of additional counters--if needed--to count incoming edges, if there are more than one.
19 /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
21 pub(super) struct CoverageGraph
{
22 bcbs
: IndexVec
<BasicCoverageBlock
, BasicCoverageBlockData
>,
23 bb_to_bcb
: IndexVec
<BasicBlock
, Option
<BasicCoverageBlock
>>,
24 pub successors
: IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>>,
25 pub predecessors
: IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>>,
26 dominators
: Option
<Dominators
<BasicCoverageBlock
>>,
30 pub fn from_mir(mir_body
: &mir
::Body
<'tcx
>) -> Self {
31 let (bcbs
, bb_to_bcb
) = Self::compute_basic_coverage_blocks(mir_body
);
33 // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
34 // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
35 // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a
36 // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so
37 // de-duplication is required. This is done without reordering the successors.
39 let bcbs_len
= bcbs
.len();
40 let mut seen
= IndexVec
::from_elem_n(false, bcbs_len
);
41 let successors
= IndexVec
::from_fn_n(
43 for b
in seen
.iter_mut() {
46 let bcb_data
= &bcbs
[bcb
];
47 let mut bcb_successors
= Vec
::new();
49 bcb_filtered_successors(&mir_body
, &bcb_data
.terminator(mir_body
).kind
)
50 .filter_map(|&successor_bb
| bb_to_bcb
[successor_bb
])
53 seen
[successor
] = true;
54 bcb_successors
.push(successor
);
62 let mut predecessors
= IndexVec
::from_elem_n(Vec
::new(), bcbs
.len());
63 for (bcb
, bcb_successors
) in successors
.iter_enumerated() {
64 for &successor
in bcb_successors
{
65 predecessors
[successor
].push(bcb
);
69 let mut basic_coverage_blocks
=
70 Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None }
;
71 let dominators
= dominators
::dominators(&basic_coverage_blocks
);
72 basic_coverage_blocks
.dominators
= Some(dominators
);
76 fn compute_basic_coverage_blocks(
77 mir_body
: &mir
::Body
<'tcx
>,
79 IndexVec
<BasicCoverageBlock
, BasicCoverageBlockData
>,
80 IndexVec
<BasicBlock
, Option
<BasicCoverageBlock
>>,
82 let num_basic_blocks
= mir_body
.num_nodes();
83 let mut bcbs
= IndexVec
::with_capacity(num_basic_blocks
);
84 let mut bb_to_bcb
= IndexVec
::from_elem_n(None
, num_basic_blocks
);
86 // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
87 // each block terminator's `successors()`. Coverage spans must map to actual source code,
88 // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
89 // intentionally omits unwind paths.
90 // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
91 // `catch_unwind()` handlers.
92 let mir_cfg_without_unwind
= ShortCircuitPreorder
::new(&mir_body
, bcb_filtered_successors
);
94 let mut basic_blocks
= Vec
::new();
95 for (bb
, data
) in mir_cfg_without_unwind
{
96 if let Some(last
) = basic_blocks
.last() {
97 let predecessors
= &mir_body
.predecessors()[bb
];
98 if predecessors
.len() > 1 || !predecessors
.contains(last
) {
99 // The `bb` has more than one _incoming_ edge, and should start its own
100 // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
101 // include `bb`; it contains a sequence of one or more sequential basic_blocks
102 // with no intermediate branches in or out. Save these as a new
103 // `BasicCoverageBlockData` before starting the new one.)
104 Self::add_basic_coverage_block(
107 basic_blocks
.split_off(0),
111 if predecessors
.len() > 1 {
112 "predecessors.len() > 1".to_owned()
114 format
!("bb {} is not in precessors: {:?}", bb
.index(), predecessors
)
119 basic_blocks
.push(bb
);
121 let term
= data
.terminator();
124 TerminatorKind
::Return { .. }
125 | TerminatorKind
::Abort
126 | TerminatorKind
::Yield { .. }
127 | TerminatorKind
::SwitchInt { .. }
=> {
128 // The `bb` has more than one _outgoing_ edge, or exits the function. Save the
129 // current sequence of `basic_blocks` gathered to this point, as a new
130 // `BasicCoverageBlockData`.
131 Self::add_basic_coverage_block(
134 basic_blocks
.split_off(0),
136 debug
!(" because term.kind = {:?}", term
.kind
);
137 // Note that this condition is based on `TerminatorKind`, even though it
138 // theoretically boils down to `successors().len() != 1`; that is, either zero
139 // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
140 // since the BCB CFG ignores things like unwind branches (which exist in the
141 // `Terminator`s `successors()` list) checking the number of successors won't
145 // The following `TerminatorKind`s are either not expected outside an unwind branch,
146 // or they should not (under normal circumstances) branch. Coverage graphs are
147 // simplified by assuring coverage results are accurate for program executions that
150 // Programs that panic and unwind may record slightly inaccurate coverage results
151 // for a coverage region containing the `Terminator` that began the panic. This
152 // is as intended. (See Issue #78544 for a possible future option to support
153 // coverage in test programs that panic.)
154 TerminatorKind
::Goto { .. }
155 | TerminatorKind
::Resume
156 | TerminatorKind
::Unreachable
157 | TerminatorKind
::Drop { .. }
158 | TerminatorKind
::DropAndReplace { .. }
159 | TerminatorKind
::Call { .. }
160 | TerminatorKind
::GeneratorDrop
161 | TerminatorKind
::Assert { .. }
162 | TerminatorKind
::FalseEdge { .. }
163 | TerminatorKind
::FalseUnwind { .. }
164 | TerminatorKind
::InlineAsm { .. }
=> {}
168 if !basic_blocks
.is_empty() {
169 // process any remaining basic_blocks into a final `BasicCoverageBlockData`
170 Self::add_basic_coverage_block(&mut bcbs
, &mut bb_to_bcb
, basic_blocks
.split_off(0));
171 debug
!(" because the end of the MIR CFG was reached while traversing");
177 fn add_basic_coverage_block(
178 bcbs
: &mut IndexVec
<BasicCoverageBlock
, BasicCoverageBlockData
>,
179 bb_to_bcb
: &mut IndexVec
<BasicBlock
, Option
<BasicCoverageBlock
>>,
180 basic_blocks
: Vec
<BasicBlock
>,
182 let bcb
= BasicCoverageBlock
::from_usize(bcbs
.len());
183 for &bb
in basic_blocks
.iter() {
184 bb_to_bcb
[bb
] = Some(bcb
);
186 let bcb_data
= BasicCoverageBlockData
::from(basic_blocks
);
187 debug
!("adding bcb{}: {:?}", bcb
.index(), bcb_data
);
192 pub fn iter_enumerated(
194 ) -> impl Iterator
<Item
= (BasicCoverageBlock
, &BasicCoverageBlockData
)> {
195 self.bcbs
.iter_enumerated()
199 pub fn iter_enumerated_mut(
201 ) -> impl Iterator
<Item
= (BasicCoverageBlock
, &mut BasicCoverageBlockData
)> {
202 self.bcbs
.iter_enumerated_mut()
206 pub fn bcb_from_bb(&self, bb
: BasicBlock
) -> Option
<BasicCoverageBlock
> {
207 if bb
.index() < self.bb_to_bcb
.len() { self.bb_to_bcb[bb] }
else { None }
211 pub fn is_dominated_by(&self, node
: BasicCoverageBlock
, dom
: BasicCoverageBlock
) -> bool
{
212 self.dominators
.as_ref().unwrap().is_dominated_by(node
, dom
)
216 pub fn dominators(&self) -> &Dominators
<BasicCoverageBlock
> {
217 self.dominators
.as_ref().unwrap()
221 impl Index
<BasicCoverageBlock
> for CoverageGraph
{
222 type Output
= BasicCoverageBlockData
;
225 fn index(&self, index
: BasicCoverageBlock
) -> &BasicCoverageBlockData
{
230 impl IndexMut
<BasicCoverageBlock
> for CoverageGraph
{
232 fn index_mut(&mut self, index
: BasicCoverageBlock
) -> &mut BasicCoverageBlockData
{
233 &mut self.bcbs
[index
]
237 impl graph
::DirectedGraph
for CoverageGraph
{
238 type Node
= BasicCoverageBlock
;
241 impl graph
::WithNumNodes
for CoverageGraph
{
243 fn num_nodes(&self) -> usize {
248 impl graph
::WithStartNode
for CoverageGraph
{
250 fn start_node(&self) -> Self::Node
{
251 self.bcb_from_bb(mir
::START_BLOCK
)
252 .expect("mir::START_BLOCK should be in a BasicCoverageBlock")
256 type BcbSuccessors
<'graph
> = std
::slice
::Iter
<'graph
, BasicCoverageBlock
>;
258 impl<'graph
> graph
::GraphSuccessors
<'graph
> for CoverageGraph
{
259 type Item
= BasicCoverageBlock
;
260 type Iter
= std
::iter
::Cloned
<BcbSuccessors
<'graph
>>;
263 impl graph
::WithSuccessors
for CoverageGraph
{
265 fn successors(&self, node
: Self::Node
) -> <Self as GraphSuccessors
<'_
>>::Iter
{
266 self.successors
[node
].iter().cloned()
270 impl graph
::GraphPredecessors
<'graph
> for CoverageGraph
{
271 type Item
= BasicCoverageBlock
;
272 type Iter
= std
::vec
::IntoIter
<BasicCoverageBlock
>;
275 impl graph
::WithPredecessors
for CoverageGraph
{
277 fn predecessors(&self, node
: Self::Node
) -> <Self as graph
::GraphPredecessors
<'_
>>::Iter
{
278 self.predecessors
[node
].clone().into_iter()
282 rustc_index
::newtype_index
! {
283 /// A node in the [control-flow graph][CFG] of CoverageGraph.
284 pub(super) struct BasicCoverageBlock
{
285 DEBUG_FORMAT
= "bcb{}",
290 /// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
292 /// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
293 /// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
294 /// altering the original MIR CFG.
296 /// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
297 /// necessary). The BCB-based CFG is a more aggressive simplification. For example:
299 /// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
300 /// that is injected by the Rust compiler but has no physical source code to count. This also
301 /// means a BasicBlock with a `Call` terminator can be merged into its primary successor target
302 /// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
303 /// of `#[should_panic]` tests and `catch_unwind()` handlers")
304 /// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
305 /// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
306 /// a `Goto`, and merged with its successor into the same BCB.
308 /// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
309 /// In some cases, a BCB's execution count can be computed by `Expression`. Additional
310 /// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
311 /// to the BCB's primary counter or expression).
313 /// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
314 /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
316 #[derive(Debug, Clone)]
317 pub(super) struct BasicCoverageBlockData
{
318 pub basic_blocks
: Vec
<BasicBlock
>,
319 pub counter_kind
: Option
<CoverageKind
>,
320 edge_from_bcbs
: Option
<FxHashMap
<BasicCoverageBlock
, CoverageKind
>>,
323 impl BasicCoverageBlockData
{
324 pub fn from(basic_blocks
: Vec
<BasicBlock
>) -> Self {
325 assert
!(basic_blocks
.len() > 0);
326 Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
330 pub fn leader_bb(&self) -> BasicBlock
{
335 pub fn last_bb(&self) -> BasicBlock
{
336 *self.basic_blocks
.last().unwrap()
340 pub fn terminator
<'a
, 'tcx
>(&self, mir_body
: &'a mir
::Body
<'tcx
>) -> &'a Terminator
<'tcx
> {
341 &mir_body
[self.last_bb()].terminator()
346 counter_kind
: CoverageKind
,
347 ) -> Result
<ExpressionOperandId
, Error
> {
349 // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
350 // have an expression (to be injected into an existing `BasicBlock` represented by this
351 // `BasicCoverageBlock`).
352 self.edge_from_bcbs
.is_none() || counter_kind
.is_expression(),
353 "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
355 let operand
= counter_kind
.as_operand_id();
356 if let Some(replaced
) = self.counter_kind
.replace(counter_kind
) {
357 Error
::from_string(format
!(
358 "attempt to set a BasicCoverageBlock coverage counter more than once; \
359 {:?} already had counter {:?}",
368 pub fn counter(&self) -> Option
<&CoverageKind
> {
369 self.counter_kind
.as_ref()
373 pub fn take_counter(&mut self) -> Option
<CoverageKind
> {
374 self.counter_kind
.take()
377 pub fn set_edge_counter_from(
379 from_bcb
: BasicCoverageBlock
,
380 counter_kind
: CoverageKind
,
381 ) -> Result
<ExpressionOperandId
, Error
> {
382 if level_enabled
!(tracing
::Level
::DEBUG
) {
383 // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
384 // have an expression (to be injected into an existing `BasicBlock` represented by this
385 // `BasicCoverageBlock`).
386 if !self.counter_kind
.as_ref().map_or(true, |c
| c
.is_expression()) {
387 return Error
::from_string(format
!(
388 "attempt to add an incoming edge counter from {:?} when the target BCB already \
394 let operand
= counter_kind
.as_operand_id();
395 if let Some(replaced
) =
396 self.edge_from_bcbs
.get_or_insert_default().insert(from_bcb
, counter_kind
)
398 Error
::from_string(format
!(
399 "attempt to set an edge counter more than once; from_bcb: \
400 {:?} already had counter {:?}",
409 pub fn edge_counter_from(&self, from_bcb
: BasicCoverageBlock
) -> Option
<&CoverageKind
> {
410 if let Some(edge_from_bcbs
) = &self.edge_from_bcbs
{
411 edge_from_bcbs
.get(&from_bcb
)
418 pub fn take_edge_counters(
420 ) -> Option
<impl Iterator
<Item
= (BasicCoverageBlock
, CoverageKind
)>> {
421 self.edge_from_bcbs
.take().map_or(None
, |m
| Some(m
.into_iter()))
424 pub fn id(&self) -> String
{
429 .map(|bb
| bb
.index().to_string())
436 /// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
437 /// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
438 /// the specific branching BCB, representing the edge between the two. The latter case
439 /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
440 #[derive(Clone, Copy, PartialEq, Eq)]
441 pub(super) struct BcbBranch
{
442 pub edge_from_bcb
: Option
<BasicCoverageBlock
>,
443 pub target_bcb
: BasicCoverageBlock
,
448 from_bcb
: BasicCoverageBlock
,
449 to_bcb
: BasicCoverageBlock
,
450 basic_coverage_blocks
: &CoverageGraph
,
452 let edge_from_bcb
= if basic_coverage_blocks
.predecessors
[to_bcb
].len() > 1 {
457 Self { edge_from_bcb, target_bcb: to_bcb }
462 basic_coverage_blocks
: &'a CoverageGraph
,
463 ) -> Option
<&'a CoverageKind
> {
464 if let Some(from_bcb
) = self.edge_from_bcb
{
465 basic_coverage_blocks
[self.target_bcb
].edge_counter_from(from_bcb
)
467 basic_coverage_blocks
[self.target_bcb
].counter()
471 pub fn is_only_path_to_target(&self) -> bool
{
472 self.edge_from_bcb
.is_none()
476 impl std
::fmt
::Debug
for BcbBranch
{
477 fn fmt(&self, fmt
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
478 if let Some(from_bcb
) = self.edge_from_bcb
{
479 write
!(fmt
, "{:?}->{:?}", from_bcb
, self.target_bcb
)
481 write
!(fmt
, "{:?}", self.target_bcb
)
486 // Returns the `Terminator`s non-unwind successors.
487 // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
488 // `catch_unwind()` handlers.
489 fn bcb_filtered_successors
<'a
, 'tcx
>(
490 body
: &'tcx
&'a mir
::Body
<'tcx
>,
491 term_kind
: &'tcx TerminatorKind
<'tcx
>,
492 ) -> Box
<dyn Iterator
<Item
= &'a BasicBlock
> + 'a
> {
493 let mut successors
= term_kind
.successors();
494 box match &term_kind
{
495 // SwitchInt successors are never unwind, and all of them should be traversed.
496 TerminatorKind
::SwitchInt { .. }
=> successors
,
497 // For all other kinds, return only the first successor, if any, and ignore unwinds.
498 // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
499 // `next().into_iter()`) into the `mir::Successors` aliased type.
500 _
=> successors
.next().into_iter().chain(&[]),
502 .filter(move |&&successor
| body
[successor
].terminator().kind
!= TerminatorKind
::Unreachable
)
505 /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
506 /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
507 /// ensures a loop is completely traversed before processing Blocks after the end of the loop.
509 pub(super) struct TraversalContext
{
510 /// From one or more backedges returning to a loop header.
511 pub loop_backedges
: Option
<(Vec
<BasicCoverageBlock
>, BasicCoverageBlock
)>,
513 /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
514 /// backedges, such that the loop is the inner inner-most loop containing these
516 pub worklist
: Vec
<BasicCoverageBlock
>,
519 pub(super) struct TraverseCoverageGraphWithLoops
{
520 pub backedges
: IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>>,
521 pub context_stack
: Vec
<TraversalContext
>,
522 visited
: BitSet
<BasicCoverageBlock
>,
525 impl TraverseCoverageGraphWithLoops
{
526 pub fn new(basic_coverage_blocks
: &CoverageGraph
) -> Self {
527 let start_bcb
= basic_coverage_blocks
.start_node();
528 let backedges
= find_loop_backedges(basic_coverage_blocks
);
529 let mut context_stack
= Vec
::new();
530 context_stack
.push(TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }
);
531 // `context_stack` starts with a `TraversalContext` for the main function context (beginning
532 // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
533 // of the stack as loops are entered, and popped off of the stack when a loop's worklist is
535 let visited
= BitSet
::new_empty(basic_coverage_blocks
.num_nodes());
536 Self { backedges, context_stack, visited }
539 pub fn next(&mut self, basic_coverage_blocks
: &CoverageGraph
) -> Option
<BasicCoverageBlock
> {
541 "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
542 self.context_stack
.iter().rev().collect
::<Vec
<_
>>()
544 while let Some(next_bcb
) = {
545 // Strip contexts with empty worklists from the top of the stack
546 while self.context_stack
.last().map_or(false, |context
| context
.worklist
.is_empty()) {
547 self.context_stack
.pop();
549 // Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
550 self.context_stack
.last_mut().map_or(None
, |context
| context
.worklist
.pop())
552 if !self.visited
.insert(next_bcb
) {
553 debug
!("Already visited: {:?}", next_bcb
);
556 debug
!("Visiting {:?}", next_bcb
);
557 if self.backedges
[next_bcb
].len() > 0 {
558 debug
!("{:?} is a loop header! Start a new TraversalContext...", next_bcb
);
559 self.context_stack
.push(TraversalContext
{
560 loop_backedges
: Some((self.backedges
[next_bcb
].clone(), next_bcb
)),
561 worklist
: Vec
::new(),
564 self.extend_worklist(basic_coverage_blocks
, next_bcb
);
565 return Some(next_bcb
);
570 pub fn extend_worklist(
572 basic_coverage_blocks
: &CoverageGraph
,
573 bcb
: BasicCoverageBlock
,
575 let successors
= &basic_coverage_blocks
.successors
[bcb
];
576 debug
!("{:?} has {} successors:", bcb
, successors
.len());
577 for &successor
in successors
{
578 if successor
== bcb
{
580 "{:?} has itself as its own successor. (Note, the compiled code will \
581 generate an infinite loop.)",
584 // Don't re-add this successor to the worklist. We are already processing it.
587 for context
in self.context_stack
.iter_mut().rev() {
588 // Add successors of the current BCB to the appropriate context. Successors that
589 // stay within a loop are added to the BCBs context worklist. Successors that
590 // exit the loop (they are not dominated by the loop header) must be reachable
591 // from other BCBs outside the loop, and they will be added to a different
594 // Branching blocks (with more than one successor) must be processed before
595 // blocks with only one successor, to prevent unnecessarily complicating
596 // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
597 // branching block would have given an `Expression` (or vice versa).
598 let (some_successor_to_add
, some_loop_header
) =
599 if let Some((_
, loop_header
)) = context
.loop_backedges
{
600 if basic_coverage_blocks
.is_dominated_by(successor
, loop_header
) {
601 (Some(successor
), Some(loop_header
))
606 (Some(successor
), None
)
608 if let Some(successor_to_add
) = some_successor_to_add
{
609 if basic_coverage_blocks
.successors
[successor_to_add
].len() > 1 {
611 "{:?} successor is branching. Prioritize it at the beginning of \
614 if let Some(loop_header
) = some_loop_header
{
615 format
!("worklist for the loop headed by {:?}", loop_header
)
617 String
::from("non-loop worklist")
620 context
.worklist
.insert(0, successor_to_add
);
623 "{:?} successor is non-branching. Defer it to the end of the {}",
625 if let Some(loop_header
) = some_loop_header
{
626 format
!("worklist for the loop headed by {:?}", loop_header
)
628 String
::from("non-loop worklist")
631 context
.worklist
.push(successor_to_add
);
639 pub fn is_complete(&self) -> bool
{
640 self.visited
.count() == self.visited
.domain_size()
643 pub fn unvisited(&self) -> Vec
<BasicCoverageBlock
> {
644 let mut unvisited_set
: BitSet
<BasicCoverageBlock
> =
645 BitSet
::new_filled(self.visited
.domain_size());
646 unvisited_set
.subtract(&self.visited
);
647 unvisited_set
.iter().collect
::<Vec
<_
>>()
651 pub(super) fn find_loop_backedges(
652 basic_coverage_blocks
: &CoverageGraph
,
653 ) -> IndexVec
<BasicCoverageBlock
, Vec
<BasicCoverageBlock
>> {
654 let num_bcbs
= basic_coverage_blocks
.num_nodes();
655 let mut backedges
= IndexVec
::from_elem_n(Vec
::<BasicCoverageBlock
>::new(), num_bcbs
);
657 // Identify loops by their backedges.
659 // The computational complexity is bounded by: n(s) x d where `n` is the number of
660 // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the
661 // MIR); `s` is the average number of successors per node (which is most likely less than 2, and
662 // independent of the size of the function, so it can be treated as a constant);
663 // and `d` is the average number of dominators per node.
665 // The average number of dominators depends on the size and complexity of the function, and
666 // nodes near the start of the function's control flow graph typically have less dominators
667 // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I
668 // think the resulting complexity has the characteristics of O(n log n).
670 // The overall complexity appears to be comparable to many other MIR transform algorithms, and I
671 // don't expect that this function is creating a performance hot spot, but if this becomes an
672 // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an
673 // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps
674 // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short
675 // circuit downstream `is_dominated_by` checks.
677 // For now, that kind of optimization seems unnecessarily complicated.
678 for (bcb
, _
) in basic_coverage_blocks
.iter_enumerated() {
679 for &successor
in &basic_coverage_blocks
.successors
[bcb
] {
680 if basic_coverage_blocks
.is_dominated_by(bcb
, successor
) {
681 let loop_header
= successor
;
682 let backedge_from_bcb
= bcb
;
684 "Found BCB backedge: {:?} -> loop_header: {:?}",
685 backedge_from_bcb
, loop_header
687 backedges
[loop_header
].push(backedge_from_bcb
);
694 pub struct ShortCircuitPreorder
<
698 &'tcx
&'a mir
::Body
<'tcx
>,
699 &'tcx TerminatorKind
<'tcx
>,
700 ) -> Box
<dyn Iterator
<Item
= &'a BasicBlock
> + 'a
>,
702 body
: &'tcx
&'a mir
::Body
<'tcx
>,
703 visited
: BitSet
<BasicBlock
>,
704 worklist
: Vec
<BasicBlock
>,
705 filtered_successors
: F
,
712 &'tcx
&'a mir
::Body
<'tcx
>,
713 &'tcx TerminatorKind
<'tcx
>,
714 ) -> Box
<dyn Iterator
<Item
= &'a BasicBlock
> + 'a
>,
715 > ShortCircuitPreorder
<'a
, 'tcx
, F
>
718 body
: &'tcx
&'a mir
::Body
<'tcx
>,
719 filtered_successors
: F
,
720 ) -> ShortCircuitPreorder
<'a
, 'tcx
, F
> {
721 let worklist
= vec
![mir
::START_BLOCK
];
723 ShortCircuitPreorder
{
725 visited
: BitSet
::new_empty(body
.basic_blocks().len()),
736 &'tcx
&'a mir
::Body
<'tcx
>,
737 &'tcx TerminatorKind
<'tcx
>,
738 ) -> Box
<dyn Iterator
<Item
= &'a BasicBlock
> + 'a
>,
739 > Iterator
for ShortCircuitPreorder
<'a
, 'tcx
, F
>
741 type Item
= (BasicBlock
, &'a BasicBlockData
<'tcx
>);
743 fn next(&mut self) -> Option
<(BasicBlock
, &'a BasicBlockData
<'tcx
>)> {
744 while let Some(idx
) = self.worklist
.pop() {
745 if !self.visited
.insert(idx
) {
749 let data
= &self.body
[idx
];
751 if let Some(ref term
) = data
.terminator
{
752 self.worklist
.extend((self.filtered_successors
)(&self.body
, &term
.kind
));
755 return Some((idx
, data
));
761 fn size_hint(&self) -> (usize, Option
<usize>) {
762 let size
= self.body
.basic_blocks().len() - self.visited
.count();