1 use rustc_ast
::ast
::{self, MetaItem}
;
2 use rustc_ast_pretty
::pprust
;
3 use rustc_span
::symbol
::{sym, Symbol}
;
5 use rustc_data_structures
::work_queue
::WorkQueue
;
6 use rustc_index
::bit_set
::{BitSet, HybridBitSet}
;
7 use rustc_index
::vec
::Idx
;
9 use rustc
::mir
::traversal
;
10 use rustc
::mir
::{self, BasicBlock, BasicBlockData, Body, Location, Statement, Terminator}
;
11 use rustc
::session
::Session
;
12 use rustc
::ty
::{self, TyCtxt}
;
13 use rustc_hir
::def_id
::DefId
;
15 use std
::borrow
::Borrow
;
18 use std
::path
::PathBuf
;
20 pub use self::at_location
::{FlowAtLocation, FlowsAtLocation}
;
21 pub(crate) use self::drop_flag_effects
::*;
22 pub use self::impls
::borrows
::Borrows
;
23 pub use self::impls
::DefinitelyInitializedPlaces
;
24 pub use self::impls
::EverInitializedPlaces
;
25 pub use self::impls
::{MaybeBorrowedLocals, MaybeMutBorrowedLocals}
;
26 pub use self::impls
::{MaybeInitializedPlaces, MaybeUninitializedPlaces}
;
27 pub use self::impls
::{MaybeRequiresStorage, MaybeStorageLive}
;
29 use self::move_paths
::MoveData
;
32 pub mod drop_flag_effects
;
38 pub(crate) mod indexes
{
39 pub(crate) use super::{
40 impls
::borrows
::BorrowIndex
,
41 move_paths
::{InitIndex, MoveOutIndex, MovePathIndex}
,
45 pub(crate) struct DataflowBuilder
<'a
, 'tcx
, BD
>
47 BD
: BitDenotation
<'tcx
>,
50 flow_state
: DataflowAnalysis
<'a
, 'tcx
, BD
>,
51 print_preflow_to
: Option
<String
>,
52 print_postflow_to
: Option
<String
>,
55 /// `DebugFormatted` encapsulates the "{:?}" rendering of some
56 /// arbitrary value. This way: you pay cost of allocating an extra
57 /// string (as well as that of rendering up-front); in exchange, you
58 /// don't have to hand over ownership of your value or deal with
60 pub struct DebugFormatted(String
);
63 pub fn new(input
: &dyn fmt
::Debug
) -> DebugFormatted
{
64 DebugFormatted(format
!("{:?}", input
))
68 impl fmt
::Debug
for DebugFormatted
{
69 fn fmt(&self, w
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
70 write
!(w
, "{}", self.0)
74 pub trait Dataflow
<'tcx
, BD
: BitDenotation
<'tcx
>> {
75 /// Sets up and runs the dataflow problem, using `p` to render results if
76 /// implementation so chooses.
77 fn dataflow
<P
>(&mut self, p
: P
)
79 P
: Fn(&BD
, BD
::Idx
) -> DebugFormatted
,
81 let _
= p
; // default implementation does not instrument process.
86 /// Sets up the entry, gen, and kill sets for this instance of a dataflow problem.
87 fn build_sets(&mut self);
89 /// Finds a fixed-point solution to this instance of a dataflow problem.
90 fn propagate(&mut self);
93 impl<'a
, 'tcx
, BD
> Dataflow
<'tcx
, BD
> for DataflowBuilder
<'a
, 'tcx
, BD
>
95 BD
: BitDenotation
<'tcx
>,
97 fn dataflow
<P
>(&mut self, p
: P
)
99 P
: Fn(&BD
, BD
::Idx
) -> DebugFormatted
,
101 self.flow_state
.build_sets();
102 self.pre_dataflow_instrumentation(|c
, i
| p(c
, i
)).unwrap();
103 self.flow_state
.propagate();
104 self.post_dataflow_instrumentation(|c
, i
| p(c
, i
)).unwrap();
107 fn build_sets(&mut self) {
108 self.flow_state
.build_sets();
110 fn propagate(&mut self) {
111 self.flow_state
.propagate();
115 pub(crate) fn has_rustc_mir_with(attrs
: &[ast
::Attribute
], name
: Symbol
) -> Option
<MetaItem
> {
117 if attr
.check_name(sym
::rustc_mir
) {
118 let items
= attr
.meta_item_list();
119 for item
in items
.iter().flat_map(|l
| l
.iter()) {
120 match item
.meta_item() {
121 Some(mi
) if mi
.check_name(name
) => return Some(mi
.clone()),
130 pub struct MoveDataParamEnv
<'tcx
> {
131 pub(crate) move_data
: MoveData
<'tcx
>,
132 pub(crate) param_env
: ty
::ParamEnv
<'tcx
>,
135 pub fn do_dataflow
<'a
, 'tcx
, BD
, P
>(
137 body
: &'a Body
<'tcx
>,
139 attributes
: &[ast
::Attribute
],
140 dead_unwinds
: &BitSet
<BasicBlock
>,
143 ) -> DataflowResults
<'tcx
, BD
>
145 BD
: BitDenotation
<'tcx
>,
146 P
: Fn(&BD
, BD
::Idx
) -> DebugFormatted
,
148 let flow_state
= DataflowAnalysis
::new(body
, dead_unwinds
, bd
);
149 flow_state
.run(tcx
, def_id
, attributes
, p
)
152 impl<'a
, 'tcx
, BD
> DataflowAnalysis
<'a
, 'tcx
, BD
>
154 BD
: BitDenotation
<'tcx
>,
156 pub(crate) fn run
<P
>(
160 attributes
: &[ast
::Attribute
],
162 ) -> DataflowResults
<'tcx
, BD
>
164 P
: Fn(&BD
, BD
::Idx
) -> DebugFormatted
,
166 let name_found
= |sess
: &Session
, attrs
: &[ast
::Attribute
], name
| -> Option
<String
> {
167 if let Some(item
) = has_rustc_mir_with(attrs
, name
) {
168 if let Some(s
) = item
.value_str() {
169 return Some(s
.to_string());
171 let path
= pprust
::path_to_string(&item
.path
);
172 sess
.span_err(item
.span
, &format
!("{} attribute requires a path", path
));
179 let print_preflow_to
= name_found(tcx
.sess
, attributes
, sym
::borrowck_graphviz_preflow
);
180 let print_postflow_to
= name_found(tcx
.sess
, attributes
, sym
::borrowck_graphviz_postflow
);
183 DataflowBuilder { def_id, print_preflow_to, print_postflow_to, flow_state: self }
;
186 mbcx
.flow_state
.results()
190 struct PropagationContext
<'b
, 'a
, 'tcx
, O
>
192 O
: BitDenotation
<'tcx
>,
194 builder
: &'b
mut DataflowAnalysis
<'a
, 'tcx
, O
>,
197 impl<'a
, 'tcx
, BD
> DataflowAnalysis
<'a
, 'tcx
, BD
>
199 BD
: BitDenotation
<'tcx
>,
201 fn propagate(&mut self) {
202 let mut temp
= BitSet
::new_empty(self.flow_state
.sets
.bits_per_block
);
203 let mut propcx
= PropagationContext { builder: self }
;
204 propcx
.walk_cfg(&mut temp
);
207 fn build_sets(&mut self) {
208 // Build the transfer function for each block.
209 for (bb
, data
) in self.body
.basic_blocks().iter_enumerated() {
210 let &mir
::BasicBlockData { ref statements, ref terminator, is_cleanup: _ }
= data
;
212 let trans
= self.flow_state
.sets
.trans_mut_for(bb
.index());
213 for j_stmt
in 0..statements
.len() {
214 let location
= Location { block: bb, statement_index: j_stmt }
;
215 self.flow_state
.operator
.before_statement_effect(trans
, location
);
216 self.flow_state
.operator
.statement_effect(trans
, location
);
219 if terminator
.is_some() {
220 let location
= Location { block: bb, statement_index: statements.len() }
;
221 self.flow_state
.operator
.before_terminator_effect(trans
, location
);
222 self.flow_state
.operator
.terminator_effect(trans
, location
);
226 // Initialize the flow state at entry to the start block.
227 let on_entry
= self.flow_state
.sets
.entry_set_mut_for(mir
::START_BLOCK
.index());
228 self.flow_state
.operator
.start_block_effect(on_entry
);
232 impl<'b
, 'a
, 'tcx
, BD
> PropagationContext
<'b
, 'a
, 'tcx
, BD
>
234 BD
: BitDenotation
<'tcx
>,
236 fn walk_cfg(&mut self, in_out
: &mut BitSet
<BD
::Idx
>) {
237 let body
= self.builder
.body
;
239 // Initialize the dirty queue in reverse post-order. This makes it more likely that the
240 // entry state for each basic block will have the effects of its predecessors applied
241 // before it is processed. In fact, for CFGs without back edges, this guarantees that
242 // dataflow will converge in exactly `N` iterations, where `N` is the number of basic
244 let mut dirty_queue
: WorkQueue
<mir
::BasicBlock
> =
245 WorkQueue
::with_none(body
.basic_blocks().len());
246 for (bb
, _
) in traversal
::reverse_postorder(body
) {
247 dirty_queue
.insert(bb
);
250 // Add blocks which are not reachable from START_BLOCK to the work queue. These blocks will
251 // be processed after the ones added above.
252 for bb
in body
.basic_blocks().indices() {
253 dirty_queue
.insert(bb
);
256 while let Some(bb
) = dirty_queue
.pop() {
257 let (on_entry
, trans
) = self.builder
.flow_state
.sets
.get_mut(bb
.index());
258 debug_assert
!(in_out
.words().len() == on_entry
.words().len());
259 in_out
.overwrite(on_entry
);
262 let bb_data
= &body
[bb
];
263 self.builder
.propagate_bits_into_graph_successors_of(
272 fn dataflow_path(context
: &str, path
: &str) -> PathBuf
{
273 let mut path
= PathBuf
::from(path
);
274 let new_file_name
= {
275 let orig_file_name
= path
.file_name().unwrap().to_str().unwrap();
276 format
!("{}_{}", context
, orig_file_name
)
278 path
.set_file_name(new_file_name
);
282 impl<'a
, 'tcx
, BD
> DataflowBuilder
<'a
, 'tcx
, BD
>
284 BD
: BitDenotation
<'tcx
>,
286 fn pre_dataflow_instrumentation
<P
>(&self, p
: P
) -> io
::Result
<()>
288 P
: Fn(&BD
, BD
::Idx
) -> DebugFormatted
,
290 if let Some(ref path_str
) = self.print_preflow_to
{
291 let path
= dataflow_path(BD
::name(), path_str
);
292 graphviz
::print_borrowck_graph_to(self, &path
, p
)
298 fn post_dataflow_instrumentation
<P
>(&self, p
: P
) -> io
::Result
<()>
300 P
: Fn(&BD
, BD
::Idx
) -> DebugFormatted
,
302 if let Some(ref path_str
) = self.print_postflow_to
{
303 let path
= dataflow_path(BD
::name(), path_str
);
304 graphviz
::print_borrowck_graph_to(self, &path
, p
)
311 /// DataflowResultsConsumer abstracts over walking the MIR with some
312 /// already constructed dataflow results.
314 /// It abstracts over the FlowState and also completely hides the
315 /// underlying flow analysis results, because it needs to handle cases
316 /// where we are combining the results of *multiple* flow analyses
317 /// (e.g., borrows + inits + uninits).
318 pub(crate) trait DataflowResultsConsumer
<'a
, 'tcx
: 'a
> {
319 type FlowState
: FlowsAtLocation
;
321 // Observation Hooks: override (at least one of) these to get analysis feedback.
322 fn visit_block_entry(&mut self, _bb
: BasicBlock
, _flow_state
: &Self::FlowState
) {}
324 fn visit_statement_entry(
327 _stmt
: &'a Statement
<'tcx
>,
328 _flow_state
: &Self::FlowState
,
332 fn visit_terminator_entry(
335 _term
: &'a Terminator
<'tcx
>,
336 _flow_state
: &Self::FlowState
,
340 // Main entry point: this drives the processing of results.
342 fn analyze_results(&mut self, flow_uninit
: &mut Self::FlowState
) {
343 let flow
= flow_uninit
;
344 for (bb
, _
) in traversal
::reverse_postorder(self.body()) {
345 flow
.reset_to_entry_of(bb
);
346 self.process_basic_block(bb
, flow
);
350 fn process_basic_block(&mut self, bb
: BasicBlock
, flow_state
: &mut Self::FlowState
) {
351 self.visit_block_entry(bb
, flow_state
);
353 let BasicBlockData { ref statements, ref terminator, is_cleanup: _ }
= self.body()[bb
];
354 let mut location
= Location { block: bb, statement_index: 0 }
;
355 for stmt
in statements
.iter() {
356 flow_state
.reconstruct_statement_effect(location
);
357 self.visit_statement_entry(location
, stmt
, flow_state
);
358 flow_state
.apply_local_effect(location
);
359 location
.statement_index
+= 1;
362 if let Some(ref term
) = *terminator
{
363 flow_state
.reconstruct_terminator_effect(location
);
364 self.visit_terminator_entry(location
, term
, flow_state
);
366 // We don't need to apply the effect of the terminator,
367 // since we are only visiting dataflow state on control
368 // flow entry to the various nodes. (But we still need to
369 // reconstruct the effect, because the visit method might
374 // Delegated Hooks: Provide access to the MIR and process the flow state.
376 fn body(&self) -> &'a Body
<'tcx
>;
379 /// Allows iterating dataflow results in a flexible and reasonably fast way.
380 pub struct DataflowResultsCursor
<'mir
, 'tcx
, BD
, DR
= DataflowResults
<'tcx
, BD
>>
382 BD
: BitDenotation
<'tcx
>,
383 DR
: Borrow
<DataflowResults
<'tcx
, BD
>>,
385 flow_state
: FlowAtLocation
<'tcx
, BD
, DR
>,
387 // The statement (or terminator) whose effect has been reconstructed in
389 curr_loc
: Option
<Location
>,
391 body
: &'mir Body
<'tcx
>,
394 pub type DataflowResultsRefCursor
<'mir
, 'tcx
, BD
> =
395 DataflowResultsCursor
<'mir
, 'tcx
, BD
, &'mir DataflowResults
<'tcx
, BD
>>;
397 impl<'mir
, 'tcx
, BD
, DR
> DataflowResultsCursor
<'mir
, 'tcx
, BD
, DR
>
399 BD
: BitDenotation
<'tcx
>,
400 DR
: Borrow
<DataflowResults
<'tcx
, BD
>>,
402 pub fn new(result
: DR
, body
: &'mir Body
<'tcx
>) -> Self {
403 DataflowResultsCursor { flow_state: FlowAtLocation::new(result), curr_loc: None, body }
406 /// Seek to the given location in MIR. This method is fast if you are
407 /// traversing your MIR statements in order.
409 /// After calling `seek`, the current state will reflect all effects up to
410 /// and including the `before_statement_effect` of the statement at location
411 /// `loc`. The `statement_effect` of the statement at `loc` will be
412 /// available as the current effect (see e.g. `each_gen_bit`).
414 /// If `loc.statement_index` equals the number of statements in the block,
415 /// we will reconstruct the terminator effect in the same way as described
417 pub fn seek(&mut self, loc
: Location
) {
418 if self.curr_loc
.map(|cur
| loc
== cur
).unwrap_or(false) {
423 let should_reset
= match self.curr_loc
{
425 Some(cur
) if loc
.block
!= cur
.block
|| loc
.statement_index
< cur
.statement_index
=> {
431 self.flow_state
.reset_to_entry_of(loc
.block
);
434 let curr_loc
= self.curr_loc
.unwrap();
435 start_index
= curr_loc
.statement_index
;
436 // Apply the effect from the last seek to the current state.
437 self.flow_state
.apply_local_effect(curr_loc
);
440 for stmt
in start_index
..loc
.statement_index
{
441 let mut stmt_loc
= loc
;
442 stmt_loc
.statement_index
= stmt
;
443 self.flow_state
.reconstruct_statement_effect(stmt_loc
);
444 self.flow_state
.apply_local_effect(stmt_loc
);
447 if loc
.statement_index
== self.body
[loc
.block
].statements
.len() {
448 self.flow_state
.reconstruct_terminator_effect(loc
);
450 self.flow_state
.reconstruct_statement_effect(loc
);
452 self.curr_loc
= Some(loc
);
455 /// Return whether the current state contains bit `x`.
456 pub fn contains(&self, x
: BD
::Idx
) -> bool
{
457 self.flow_state
.contains(x
)
460 /// Iterate over each `gen` bit in the current effect (invoke `seek` first).
461 pub fn each_gen_bit
<F
>(&self, f
: F
)
465 self.flow_state
.each_gen_bit(f
)
468 pub fn get(&self) -> &BitSet
<BD
::Idx
> {
469 self.flow_state
.as_dense()
473 pub struct DataflowAnalysis
<'a
, 'tcx
, O
>
475 O
: BitDenotation
<'tcx
>,
477 flow_state
: DataflowState
<'tcx
, O
>,
478 dead_unwinds
: &'a BitSet
<mir
::BasicBlock
>,
479 body
: &'a Body
<'tcx
>,
482 impl<'a
, 'tcx
, O
> DataflowAnalysis
<'a
, 'tcx
, O
>
484 O
: BitDenotation
<'tcx
>,
486 pub fn results(self) -> DataflowResults
<'tcx
, O
> {
487 DataflowResults(self.flow_state
)
490 pub fn body(&self) -> &'a Body
<'tcx
> {
495 pub struct DataflowResults
<'tcx
, O
>(pub(crate) DataflowState
<'tcx
, O
>)
497 O
: BitDenotation
<'tcx
>;
499 impl<'tcx
, O
: BitDenotation
<'tcx
>> DataflowResults
<'tcx
, O
> {
500 pub fn sets(&self) -> &AllSets
<O
::Idx
> {
504 pub fn operator(&self) -> &O
{
509 /// State of a dataflow analysis; couples a collection of bit sets
510 /// with operator used to initialize and merge bits during analysis.
511 pub struct DataflowState
<'tcx
, O
: BitDenotation
<'tcx
>> {
512 /// All the sets for the analysis. (Factored into its
513 /// own structure so that we can borrow it mutably
514 /// on its own separate from other fields.)
515 pub sets
: AllSets
<O
::Idx
>,
517 /// operator used to initialize, combine, and interpret bits.
518 pub(crate) operator
: O
,
521 impl<'tcx
, O
: BitDenotation
<'tcx
>> DataflowState
<'tcx
, O
> {
522 pub(crate) fn interpret_set
<'c
, P
>(
525 set
: &BitSet
<O
::Idx
>,
527 ) -> Vec
<DebugFormatted
>
529 P
: Fn(&O
, O
::Idx
) -> DebugFormatted
,
531 set
.iter().map(|i
| render_idx(o
, i
)).collect()
534 pub(crate) fn interpret_hybrid_set
<'c
, P
>(
537 set
: &HybridBitSet
<O
::Idx
>,
539 ) -> Vec
<DebugFormatted
>
541 P
: Fn(&O
, O
::Idx
) -> DebugFormatted
,
543 set
.iter().map(|i
| render_idx(o
, i
)).collect()
547 /// A 2-tuple representing the "gen" and "kill" bitsets during
548 /// dataflow analysis.
550 /// It is best to ensure that the intersection of `gen_set` and
551 /// `kill_set` is empty; otherwise the results of the dataflow will
552 /// have a hidden dependency on what order the bits are generated and
553 /// killed during the iteration. (This is such a good idea that the
554 /// `fn gen` and `fn kill` methods that set their state enforce this
556 #[derive(Debug, Clone, Copy)]
557 pub struct GenKill
<T
> {
558 pub(crate) gen_set
: T
,
559 pub(crate) kill_set
: T
,
562 pub type GenKillSet
<T
> = GenKill
<HybridBitSet
<T
>>;
565 /// Creates a new tuple where `gen_set == kill_set == elem`.
566 pub(crate) fn from_elem(elem
: T
) -> Self
570 GenKill { gen_set: elem.clone(), kill_set: elem }
574 impl<E
: Idx
> GenKillSet
<E
> {
575 pub fn clear(&mut self) {
576 self.gen_set
.clear();
577 self.kill_set
.clear();
580 pub fn gen(&mut self, e
: E
) {
581 self.gen_set
.insert(e
);
582 self.kill_set
.remove(e
);
585 pub fn gen_all(&mut self, i
: impl IntoIterator
<Item
: Borrow
<E
>>) {
587 self.gen(*j
.borrow());
591 pub fn kill(&mut self, e
: E
) {
592 self.gen_set
.remove(e
);
593 self.kill_set
.insert(e
);
596 pub fn kill_all(&mut self, i
: impl IntoIterator
<Item
: Borrow
<E
>>) {
598 self.kill(*j
.borrow());
602 /// Computes `(set ∪ gen) - kill` and assigns the result to `set`.
603 pub(crate) fn apply(&self, set
: &mut BitSet
<E
>) {
604 set
.union(&self.gen_set
);
605 set
.subtract(&self.kill_set
);
610 pub struct AllSets
<E
: Idx
> {
611 /// Analysis bitwidth for each block.
612 bits_per_block
: usize,
614 /// For each block, bits valid on entry to the block.
615 on_entry
: Vec
<BitSet
<E
>>,
617 /// The transfer function of each block expressed as the set of bits
618 /// generated and killed by executing the statements + terminator in the
619 /// block -- with one caveat. In particular, for *call terminators*, the
620 /// effect of storing the destination is not included, since that only takes
621 /// effect on the **success** edge (and not the unwind edge).
622 trans
: Vec
<GenKillSet
<E
>>,
625 impl<E
: Idx
> AllSets
<E
> {
626 pub fn bits_per_block(&self) -> usize {
630 pub fn get_mut(&mut self, block_idx
: usize) -> (&mut BitSet
<E
>, &mut GenKillSet
<E
>) {
631 (&mut self.on_entry
[block_idx
], &mut self.trans
[block_idx
])
634 pub fn trans_for(&self, block_idx
: usize) -> &GenKillSet
<E
> {
635 &self.trans
[block_idx
]
637 pub fn trans_mut_for(&mut self, block_idx
: usize) -> &mut GenKillSet
<E
> {
638 &mut self.trans
[block_idx
]
640 pub fn entry_set_for(&self, block_idx
: usize) -> &BitSet
<E
> {
641 &self.on_entry
[block_idx
]
643 pub fn entry_set_mut_for(&mut self, block_idx
: usize) -> &mut BitSet
<E
> {
644 &mut self.on_entry
[block_idx
]
646 pub fn gen_set_for(&self, block_idx
: usize) -> &HybridBitSet
<E
> {
647 &self.trans_for(block_idx
).gen_set
649 pub fn kill_set_for(&self, block_idx
: usize) -> &HybridBitSet
<E
> {
650 &self.trans_for(block_idx
).kill_set
654 /// Parameterization for the precise form of data flow that is used.
656 /// `BottomValue` determines whether the initial entry set for each basic block is empty or full.
657 /// This also determines the semantics of the lattice `join` operator used to merge dataflow
658 /// results, since dataflow works by starting at the bottom and moving monotonically to a fixed
661 /// This means, for propagation across the graph, that you either want to start at all-zeroes and
662 /// then use Union as your merge when propagating, or you start at all-ones and then use Intersect
663 /// as your merge when propagating.
664 pub trait BottomValue
{
665 /// Specifies the initial value for each bit in the entry set for each basic block.
666 const BOTTOM_VALUE
: bool
;
668 /// Merges `in_set` into `inout_set`, returning `true` if `inout_set` changed.
670 /// It is almost certainly wrong to override this, since it automatically applies
671 /// * `inout_set & in_set` if `BOTTOM_VALUE == true`
672 /// * `inout_set | in_set` if `BOTTOM_VALUE == false`
674 /// This means that if a bit is not `BOTTOM_VALUE`, it is propagated into all target blocks.
675 /// For clarity, the above statement again from a different perspective:
676 /// A bit in the block's entry set is `!BOTTOM_VALUE` if *any* predecessor block's bit value is
679 /// There are situations where you want the opposite behaviour: propagate only if *all*
680 /// predecessor blocks's value is `!BOTTOM_VALUE`.
681 /// E.g. if you want to know whether a bit is *definitely* set at a specific location. This
682 /// means that all code paths leading to the location must have set the bit, instead of any
683 /// code path leading there.
685 /// If you want this kind of "definitely set" analysis, you need to
686 /// 1. Invert `BOTTOM_VALUE`
687 /// 2. Reset the `entry_set` in `start_block_effect` to `!BOTTOM_VALUE`
688 /// 3. Override `join` to do the opposite from what it's doing now.
690 fn join
<T
: Idx
>(&self, inout_set
: &mut BitSet
<T
>, in_set
: &BitSet
<T
>) -> bool
{
691 if !Self::BOTTOM_VALUE { inout_set.union(in_set) }
else { inout_set.intersect(in_set) }
695 /// A specific flavor of dataflow analysis.
697 /// To run a dataflow analysis, one sets up an initial state for the
698 /// `START_BLOCK` via `start_block_effect` and a transfer function (`trans`)
699 /// for each block individually. The entry set for all other basic blocks is
700 /// initialized to `Self::BOTTOM_VALUE`. The dataflow analysis then
701 /// iteratively modifies the various entry sets (but leaves the the transfer
702 /// function unchanged). `BottomValue::join` is used to merge the bitsets from
703 /// two blocks (e.g. when two blocks' terminator jumps to a single block, that
704 /// target block's state is the merged state of both incoming blocks).
705 pub trait BitDenotation
<'tcx
>: BottomValue
{
706 /// Specifies what index type is used to access the bitvector.
709 /// A name describing the dataflow analysis that this
710 /// `BitDenotation` is supporting. The name should be something
711 /// suitable for plugging in as part of a filename (i.e., avoid
712 /// space-characters or other things that tend to look bad on a
713 /// file system, like slashes or periods). It is also better for
714 /// the name to be reasonably short, again because it will be
715 /// plugged into a filename.
716 fn name() -> &'
static str;
718 /// Size of each bitvector allocated for each block in the analysis.
719 fn bits_per_block(&self) -> usize;
721 /// Mutates the entry set according to the effects that
722 /// have been established *prior* to entering the start
723 /// block. This can't access the gen/kill sets, because
724 /// these won't be accounted for correctly.
726 /// (For example, establishing the call arguments.)
727 fn start_block_effect(&self, entry_set
: &mut BitSet
<Self::Idx
>);
729 /// Similar to `statement_effect`, except it applies
730 /// *just before* the statement rather than *just after* it.
732 /// This matters for "dataflow at location" APIs, because the
733 /// before-statement effect is visible while visiting the
734 /// statement, while the after-statement effect only becomes
735 /// visible at the next statement.
737 /// Both the before-statement and after-statement effects are
738 /// applied, in that order, before moving for the next
740 fn before_statement_effect(&self, _trans
: &mut GenKillSet
<Self::Idx
>, _location
: Location
) {}
742 /// Mutates the block-sets (the flow sets for the given
743 /// basic block) according to the effects of evaluating statement.
745 /// This is used, in particular, for building up the
746 /// "transfer-function" representing the overall-effect of the
747 /// block, represented via GEN and KILL sets.
749 /// The statement is identified as `bb_data[idx_stmt]`, where
750 /// `bb_data` is the sequence of statements identified by `bb` in
752 fn statement_effect(&self, trans
: &mut GenKillSet
<Self::Idx
>, location
: Location
);
754 /// Similar to `terminator_effect`, except it applies
755 /// *just before* the terminator rather than *just after* it.
757 /// This matters for "dataflow at location" APIs, because the
758 /// before-terminator effect is visible while visiting the
759 /// terminator, while the after-terminator effect only becomes
760 /// visible at the terminator's successors.
762 /// Both the before-terminator and after-terminator effects are
763 /// applied, in that order, before moving for the next
765 fn before_terminator_effect(&self, _trans
: &mut GenKillSet
<Self::Idx
>, _location
: Location
) {}
767 /// Mutates the block-sets (the flow sets for the given
768 /// basic block) according to the effects of evaluating
771 /// This is used, in particular, for building up the
772 /// "transfer-function" representing the overall-effect of the
773 /// block, represented via GEN and KILL sets.
775 /// The effects applied here cannot depend on which branch the
777 fn terminator_effect(&self, trans
: &mut GenKillSet
<Self::Idx
>, location
: Location
);
779 /// Mutates the block-sets according to the (flow-dependent)
780 /// effect of a successful return from a Call terminator.
782 /// If basic-block BB_x ends with a call-instruction that, upon
783 /// successful return, flows to BB_y, then this method will be
784 /// called on the exit flow-state of BB_x in order to set up the
785 /// entry flow-state of BB_y.
787 /// This is used, in particular, as a special case during the
788 /// "propagate" loop where all of the basic blocks are repeatedly
789 /// visited. Since the effects of a Call terminator are
790 /// flow-dependent, the current MIR cannot encode them via just
791 /// GEN and KILL sets attached to the block, and so instead we add
792 /// this extra machinery to represent the flow-dependent effect.
794 // FIXME: right now this is a bit of a wart in the API. It might
795 // be better to represent this as an additional gen- and
796 // kill-sets associated with each edge coming out of the basic
798 fn propagate_call_return(
800 in_out
: &mut BitSet
<Self::Idx
>,
801 call_bb
: mir
::BasicBlock
,
802 dest_bb
: mir
::BasicBlock
,
803 dest_place
: &mir
::Place
<'tcx
>,
807 impl<'a
, 'tcx
, D
> DataflowAnalysis
<'a
, 'tcx
, D
>
809 D
: BitDenotation
<'tcx
>,
812 body
: &'a Body
<'tcx
>,
813 dead_unwinds
: &'a BitSet
<mir
::BasicBlock
>,
816 let bits_per_block
= denotation
.bits_per_block();
817 let num_blocks
= body
.basic_blocks().len();
819 let on_entry
= if D
::BOTTOM_VALUE
{
820 vec
![BitSet
::new_filled(bits_per_block
); num_blocks
]
822 vec
![BitSet
::new_empty(bits_per_block
); num_blocks
]
824 let nop
= GenKill
::from_elem(HybridBitSet
::new_empty(bits_per_block
));
829 flow_state
: DataflowState
{
830 sets
: AllSets { bits_per_block, on_entry, trans: vec![nop; num_blocks] }
,
831 operator
: denotation
,
837 impl<'a
, 'tcx
, D
> DataflowAnalysis
<'a
, 'tcx
, D
>
839 D
: BitDenotation
<'tcx
>,
841 /// Propagates the bits of `in_out` into all the successors of `bb`,
842 /// using bitwise operator denoted by `self.operator`.
844 /// For most blocks, this is entirely uniform. However, for blocks
845 /// that end with a call terminator, the effect of the call on the
846 /// dataflow state may depend on whether the call returned
847 /// successfully or unwound.
849 /// To reflect this, the `propagate_call_return` method of the
850 /// `BitDenotation` mutates `in_out` when propagating `in_out` via
851 /// a call terminator; such mutation is performed *last*, to
852 /// ensure its side-effects do not leak elsewhere (e.g., into
854 fn propagate_bits_into_graph_successors_of(
856 in_out
: &mut BitSet
<D
::Idx
>,
857 (bb
, bb_data
): (mir
::BasicBlock
, &mir
::BasicBlockData
<'tcx
>),
858 dirty_list
: &mut WorkQueue
<mir
::BasicBlock
>,
860 match bb_data
.terminator().kind
{
861 mir
::TerminatorKind
::Return
862 | mir
::TerminatorKind
::Resume
863 | mir
::TerminatorKind
::Abort
864 | mir
::TerminatorKind
::GeneratorDrop
865 | mir
::TerminatorKind
::Unreachable
=> {}
866 mir
::TerminatorKind
::Goto { target }
867 | mir
::TerminatorKind
::Assert { target, cleanup: None, .. }
868 | mir
::TerminatorKind
::Yield { resume: target, drop: None, .. }
869 | mir
::TerminatorKind
::Drop { target, location: _, unwind: None }
870 | mir
::TerminatorKind
::DropAndReplace { target, value: _, location: _, unwind: None }
=>
872 self.propagate_bits_into_entry_set_for(in_out
, target
, dirty_list
);
874 mir
::TerminatorKind
::Yield { resume: target, drop: Some(drop), .. }
=> {
875 self.propagate_bits_into_entry_set_for(in_out
, target
, dirty_list
);
876 self.propagate_bits_into_entry_set_for(in_out
, drop
, dirty_list
);
878 mir
::TerminatorKind
::Assert { target, cleanup: Some(unwind), .. }
879 | mir
::TerminatorKind
::Drop { target, location: _, unwind: Some(unwind) }
880 | mir
::TerminatorKind
::DropAndReplace
{
884 unwind
: Some(unwind
),
886 self.propagate_bits_into_entry_set_for(in_out
, target
, dirty_list
);
887 if !self.dead_unwinds
.contains(bb
) {
888 self.propagate_bits_into_entry_set_for(in_out
, unwind
, dirty_list
);
891 mir
::TerminatorKind
::SwitchInt { ref targets, .. }
=> {
892 for target
in targets
{
893 self.propagate_bits_into_entry_set_for(in_out
, *target
, dirty_list
);
896 mir
::TerminatorKind
::Call { cleanup, ref destination, .. }
=> {
897 if let Some(unwind
) = cleanup
{
898 if !self.dead_unwinds
.contains(bb
) {
899 self.propagate_bits_into_entry_set_for(in_out
, unwind
, dirty_list
);
902 if let Some((ref dest_place
, dest_bb
)) = *destination
{
903 // N.B.: This must be done *last*, after all other
904 // propagation, as documented in comment above.
905 self.flow_state
.operator
.propagate_call_return(in_out
, bb
, dest_bb
, dest_place
);
906 self.propagate_bits_into_entry_set_for(in_out
, dest_bb
, dirty_list
);
909 mir
::TerminatorKind
::FalseEdges { real_target, imaginary_target }
=> {
910 self.propagate_bits_into_entry_set_for(in_out
, real_target
, dirty_list
);
911 self.propagate_bits_into_entry_set_for(in_out
, imaginary_target
, dirty_list
);
913 mir
::TerminatorKind
::FalseUnwind { real_target, unwind }
=> {
914 self.propagate_bits_into_entry_set_for(in_out
, real_target
, dirty_list
);
915 if let Some(unwind
) = unwind
{
916 if !self.dead_unwinds
.contains(bb
) {
917 self.propagate_bits_into_entry_set_for(in_out
, unwind
, dirty_list
);
924 fn propagate_bits_into_entry_set_for(
926 in_out
: &BitSet
<D
::Idx
>,
928 dirty_queue
: &mut WorkQueue
<mir
::BasicBlock
>,
930 let entry_set
= self.flow_state
.sets
.entry_set_mut_for(bb
.index());
931 let set_changed
= self.flow_state
.operator
.join(entry_set
, &in_out
);
933 dirty_queue
.insert(bb
);