]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/dataflow/mod.rs
New upstream version 1.43.0+dfsg1
[rustc.git] / src / librustc_mir / dataflow / mod.rs
1 use rustc_ast::ast::{self, MetaItem};
2 use rustc_ast_pretty::pprust;
3 use rustc_span::symbol::{sym, Symbol};
4
5 use rustc_data_structures::work_queue::WorkQueue;
6 use rustc_index::bit_set::{BitSet, HybridBitSet};
7 use rustc_index::vec::Idx;
8
9 use rustc::mir::traversal;
10 use rustc::mir::{self, BasicBlock, BasicBlockData, Body, Location, Statement, Terminator};
11 use rustc::session::Session;
12 use rustc::ty::{self, TyCtxt};
13 use rustc_hir::def_id::DefId;
14
15 use std::borrow::Borrow;
16 use std::fmt;
17 use std::io;
18 use std::path::PathBuf;
19
20 pub use self::at_location::{FlowAtLocation, FlowsAtLocation};
21 pub(crate) use self::drop_flag_effects::*;
22 pub use self::impls::borrows::Borrows;
23 pub use self::impls::DefinitelyInitializedPlaces;
24 pub use self::impls::EverInitializedPlaces;
25 pub use self::impls::{MaybeBorrowedLocals, MaybeMutBorrowedLocals};
26 pub use self::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
27 pub use self::impls::{MaybeRequiresStorage, MaybeStorageLive};
28
29 use self::move_paths::MoveData;
30
31 mod at_location;
32 pub mod drop_flag_effects;
33 pub mod generic;
34 mod graphviz;
35 mod impls;
36 pub mod move_paths;
37
38 pub(crate) mod indexes {
39 pub(crate) use super::{
40 impls::borrows::BorrowIndex,
41 move_paths::{InitIndex, MoveOutIndex, MovePathIndex},
42 };
43 }
44
45 pub(crate) struct DataflowBuilder<'a, 'tcx, BD>
46 where
47 BD: BitDenotation<'tcx>,
48 {
49 def_id: DefId,
50 flow_state: DataflowAnalysis<'a, 'tcx, BD>,
51 print_preflow_to: Option<String>,
52 print_postflow_to: Option<String>,
53 }
54
55 /// `DebugFormatted` encapsulates the "{:?}" rendering of some
56 /// arbitrary value. This way: you pay cost of allocating an extra
57 /// string (as well as that of rendering up-front); in exchange, you
58 /// don't have to hand over ownership of your value or deal with
59 /// borrowing it.
60 pub struct DebugFormatted(String);
61
62 impl DebugFormatted {
63 pub fn new(input: &dyn fmt::Debug) -> DebugFormatted {
64 DebugFormatted(format!("{:?}", input))
65 }
66 }
67
68 impl fmt::Debug for DebugFormatted {
69 fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
70 write!(w, "{}", self.0)
71 }
72 }
73
74 pub trait Dataflow<'tcx, BD: BitDenotation<'tcx>> {
75 /// Sets up and runs the dataflow problem, using `p` to render results if
76 /// implementation so chooses.
77 fn dataflow<P>(&mut self, p: P)
78 where
79 P: Fn(&BD, BD::Idx) -> DebugFormatted,
80 {
81 let _ = p; // default implementation does not instrument process.
82 self.build_sets();
83 self.propagate();
84 }
85
86 /// Sets up the entry, gen, and kill sets for this instance of a dataflow problem.
87 fn build_sets(&mut self);
88
89 /// Finds a fixed-point solution to this instance of a dataflow problem.
90 fn propagate(&mut self);
91 }
92
93 impl<'a, 'tcx, BD> Dataflow<'tcx, BD> for DataflowBuilder<'a, 'tcx, BD>
94 where
95 BD: BitDenotation<'tcx>,
96 {
97 fn dataflow<P>(&mut self, p: P)
98 where
99 P: Fn(&BD, BD::Idx) -> DebugFormatted,
100 {
101 self.flow_state.build_sets();
102 self.pre_dataflow_instrumentation(|c, i| p(c, i)).unwrap();
103 self.flow_state.propagate();
104 self.post_dataflow_instrumentation(|c, i| p(c, i)).unwrap();
105 }
106
107 fn build_sets(&mut self) {
108 self.flow_state.build_sets();
109 }
110 fn propagate(&mut self) {
111 self.flow_state.propagate();
112 }
113 }
114
115 pub(crate) fn has_rustc_mir_with(attrs: &[ast::Attribute], name: Symbol) -> Option<MetaItem> {
116 for attr in attrs {
117 if attr.check_name(sym::rustc_mir) {
118 let items = attr.meta_item_list();
119 for item in items.iter().flat_map(|l| l.iter()) {
120 match item.meta_item() {
121 Some(mi) if mi.check_name(name) => return Some(mi.clone()),
122 _ => continue,
123 }
124 }
125 }
126 }
127 return None;
128 }
129
130 pub struct MoveDataParamEnv<'tcx> {
131 pub(crate) move_data: MoveData<'tcx>,
132 pub(crate) param_env: ty::ParamEnv<'tcx>,
133 }
134
135 pub fn do_dataflow<'a, 'tcx, BD, P>(
136 tcx: TyCtxt<'tcx>,
137 body: &'a Body<'tcx>,
138 def_id: DefId,
139 attributes: &[ast::Attribute],
140 dead_unwinds: &BitSet<BasicBlock>,
141 bd: BD,
142 p: P,
143 ) -> DataflowResults<'tcx, BD>
144 where
145 BD: BitDenotation<'tcx>,
146 P: Fn(&BD, BD::Idx) -> DebugFormatted,
147 {
148 let flow_state = DataflowAnalysis::new(body, dead_unwinds, bd);
149 flow_state.run(tcx, def_id, attributes, p)
150 }
151
152 impl<'a, 'tcx, BD> DataflowAnalysis<'a, 'tcx, BD>
153 where
154 BD: BitDenotation<'tcx>,
155 {
156 pub(crate) fn run<P>(
157 self,
158 tcx: TyCtxt<'tcx>,
159 def_id: DefId,
160 attributes: &[ast::Attribute],
161 p: P,
162 ) -> DataflowResults<'tcx, BD>
163 where
164 P: Fn(&BD, BD::Idx) -> DebugFormatted,
165 {
166 let name_found = |sess: &Session, attrs: &[ast::Attribute], name| -> Option<String> {
167 if let Some(item) = has_rustc_mir_with(attrs, name) {
168 if let Some(s) = item.value_str() {
169 return Some(s.to_string());
170 } else {
171 let path = pprust::path_to_string(&item.path);
172 sess.span_err(item.span, &format!("{} attribute requires a path", path));
173 return None;
174 }
175 }
176 return None;
177 };
178
179 let print_preflow_to = name_found(tcx.sess, attributes, sym::borrowck_graphviz_preflow);
180 let print_postflow_to = name_found(tcx.sess, attributes, sym::borrowck_graphviz_postflow);
181
182 let mut mbcx =
183 DataflowBuilder { def_id, print_preflow_to, print_postflow_to, flow_state: self };
184
185 mbcx.dataflow(p);
186 mbcx.flow_state.results()
187 }
188 }
189
190 struct PropagationContext<'b, 'a, 'tcx, O>
191 where
192 O: BitDenotation<'tcx>,
193 {
194 builder: &'b mut DataflowAnalysis<'a, 'tcx, O>,
195 }
196
197 impl<'a, 'tcx, BD> DataflowAnalysis<'a, 'tcx, BD>
198 where
199 BD: BitDenotation<'tcx>,
200 {
201 fn propagate(&mut self) {
202 let mut temp = BitSet::new_empty(self.flow_state.sets.bits_per_block);
203 let mut propcx = PropagationContext { builder: self };
204 propcx.walk_cfg(&mut temp);
205 }
206
207 fn build_sets(&mut self) {
208 // Build the transfer function for each block.
209 for (bb, data) in self.body.basic_blocks().iter_enumerated() {
210 let &mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = data;
211
212 let trans = self.flow_state.sets.trans_mut_for(bb.index());
213 for j_stmt in 0..statements.len() {
214 let location = Location { block: bb, statement_index: j_stmt };
215 self.flow_state.operator.before_statement_effect(trans, location);
216 self.flow_state.operator.statement_effect(trans, location);
217 }
218
219 if terminator.is_some() {
220 let location = Location { block: bb, statement_index: statements.len() };
221 self.flow_state.operator.before_terminator_effect(trans, location);
222 self.flow_state.operator.terminator_effect(trans, location);
223 }
224 }
225
226 // Initialize the flow state at entry to the start block.
227 let on_entry = self.flow_state.sets.entry_set_mut_for(mir::START_BLOCK.index());
228 self.flow_state.operator.start_block_effect(on_entry);
229 }
230 }
231
232 impl<'b, 'a, 'tcx, BD> PropagationContext<'b, 'a, 'tcx, BD>
233 where
234 BD: BitDenotation<'tcx>,
235 {
236 fn walk_cfg(&mut self, in_out: &mut BitSet<BD::Idx>) {
237 let body = self.builder.body;
238
239 // Initialize the dirty queue in reverse post-order. This makes it more likely that the
240 // entry state for each basic block will have the effects of its predecessors applied
241 // before it is processed. In fact, for CFGs without back edges, this guarantees that
242 // dataflow will converge in exactly `N` iterations, where `N` is the number of basic
243 // blocks.
244 let mut dirty_queue: WorkQueue<mir::BasicBlock> =
245 WorkQueue::with_none(body.basic_blocks().len());
246 for (bb, _) in traversal::reverse_postorder(body) {
247 dirty_queue.insert(bb);
248 }
249
250 // Add blocks which are not reachable from START_BLOCK to the work queue. These blocks will
251 // be processed after the ones added above.
252 for bb in body.basic_blocks().indices() {
253 dirty_queue.insert(bb);
254 }
255
256 while let Some(bb) = dirty_queue.pop() {
257 let (on_entry, trans) = self.builder.flow_state.sets.get_mut(bb.index());
258 debug_assert!(in_out.words().len() == on_entry.words().len());
259 in_out.overwrite(on_entry);
260 trans.apply(in_out);
261
262 let bb_data = &body[bb];
263 self.builder.propagate_bits_into_graph_successors_of(
264 in_out,
265 (bb, bb_data),
266 &mut dirty_queue,
267 );
268 }
269 }
270 }
271
272 fn dataflow_path(context: &str, path: &str) -> PathBuf {
273 let mut path = PathBuf::from(path);
274 let new_file_name = {
275 let orig_file_name = path.file_name().unwrap().to_str().unwrap();
276 format!("{}_{}", context, orig_file_name)
277 };
278 path.set_file_name(new_file_name);
279 path
280 }
281
282 impl<'a, 'tcx, BD> DataflowBuilder<'a, 'tcx, BD>
283 where
284 BD: BitDenotation<'tcx>,
285 {
286 fn pre_dataflow_instrumentation<P>(&self, p: P) -> io::Result<()>
287 where
288 P: Fn(&BD, BD::Idx) -> DebugFormatted,
289 {
290 if let Some(ref path_str) = self.print_preflow_to {
291 let path = dataflow_path(BD::name(), path_str);
292 graphviz::print_borrowck_graph_to(self, &path, p)
293 } else {
294 Ok(())
295 }
296 }
297
298 fn post_dataflow_instrumentation<P>(&self, p: P) -> io::Result<()>
299 where
300 P: Fn(&BD, BD::Idx) -> DebugFormatted,
301 {
302 if let Some(ref path_str) = self.print_postflow_to {
303 let path = dataflow_path(BD::name(), path_str);
304 graphviz::print_borrowck_graph_to(self, &path, p)
305 } else {
306 Ok(())
307 }
308 }
309 }
310
311 /// DataflowResultsConsumer abstracts over walking the MIR with some
312 /// already constructed dataflow results.
313 ///
314 /// It abstracts over the FlowState and also completely hides the
315 /// underlying flow analysis results, because it needs to handle cases
316 /// where we are combining the results of *multiple* flow analyses
317 /// (e.g., borrows + inits + uninits).
318 pub(crate) trait DataflowResultsConsumer<'a, 'tcx: 'a> {
319 type FlowState: FlowsAtLocation;
320
321 // Observation Hooks: override (at least one of) these to get analysis feedback.
322 fn visit_block_entry(&mut self, _bb: BasicBlock, _flow_state: &Self::FlowState) {}
323
324 fn visit_statement_entry(
325 &mut self,
326 _loc: Location,
327 _stmt: &'a Statement<'tcx>,
328 _flow_state: &Self::FlowState,
329 ) {
330 }
331
332 fn visit_terminator_entry(
333 &mut self,
334 _loc: Location,
335 _term: &'a Terminator<'tcx>,
336 _flow_state: &Self::FlowState,
337 ) {
338 }
339
340 // Main entry point: this drives the processing of results.
341
342 fn analyze_results(&mut self, flow_uninit: &mut Self::FlowState) {
343 let flow = flow_uninit;
344 for (bb, _) in traversal::reverse_postorder(self.body()) {
345 flow.reset_to_entry_of(bb);
346 self.process_basic_block(bb, flow);
347 }
348 }
349
350 fn process_basic_block(&mut self, bb: BasicBlock, flow_state: &mut Self::FlowState) {
351 self.visit_block_entry(bb, flow_state);
352
353 let BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = self.body()[bb];
354 let mut location = Location { block: bb, statement_index: 0 };
355 for stmt in statements.iter() {
356 flow_state.reconstruct_statement_effect(location);
357 self.visit_statement_entry(location, stmt, flow_state);
358 flow_state.apply_local_effect(location);
359 location.statement_index += 1;
360 }
361
362 if let Some(ref term) = *terminator {
363 flow_state.reconstruct_terminator_effect(location);
364 self.visit_terminator_entry(location, term, flow_state);
365
366 // We don't need to apply the effect of the terminator,
367 // since we are only visiting dataflow state on control
368 // flow entry to the various nodes. (But we still need to
369 // reconstruct the effect, because the visit method might
370 // inspect it.)
371 }
372 }
373
374 // Delegated Hooks: Provide access to the MIR and process the flow state.
375
376 fn body(&self) -> &'a Body<'tcx>;
377 }
378
379 /// Allows iterating dataflow results in a flexible and reasonably fast way.
380 pub struct DataflowResultsCursor<'mir, 'tcx, BD, DR = DataflowResults<'tcx, BD>>
381 where
382 BD: BitDenotation<'tcx>,
383 DR: Borrow<DataflowResults<'tcx, BD>>,
384 {
385 flow_state: FlowAtLocation<'tcx, BD, DR>,
386
387 // The statement (or terminator) whose effect has been reconstructed in
388 // flow_state.
389 curr_loc: Option<Location>,
390
391 body: &'mir Body<'tcx>,
392 }
393
394 pub type DataflowResultsRefCursor<'mir, 'tcx, BD> =
395 DataflowResultsCursor<'mir, 'tcx, BD, &'mir DataflowResults<'tcx, BD>>;
396
397 impl<'mir, 'tcx, BD, DR> DataflowResultsCursor<'mir, 'tcx, BD, DR>
398 where
399 BD: BitDenotation<'tcx>,
400 DR: Borrow<DataflowResults<'tcx, BD>>,
401 {
402 pub fn new(result: DR, body: &'mir Body<'tcx>) -> Self {
403 DataflowResultsCursor { flow_state: FlowAtLocation::new(result), curr_loc: None, body }
404 }
405
406 /// Seek to the given location in MIR. This method is fast if you are
407 /// traversing your MIR statements in order.
408 ///
409 /// After calling `seek`, the current state will reflect all effects up to
410 /// and including the `before_statement_effect` of the statement at location
411 /// `loc`. The `statement_effect` of the statement at `loc` will be
412 /// available as the current effect (see e.g. `each_gen_bit`).
413 ///
414 /// If `loc.statement_index` equals the number of statements in the block,
415 /// we will reconstruct the terminator effect in the same way as described
416 /// above.
417 pub fn seek(&mut self, loc: Location) {
418 if self.curr_loc.map(|cur| loc == cur).unwrap_or(false) {
419 return;
420 }
421
422 let start_index;
423 let should_reset = match self.curr_loc {
424 None => true,
425 Some(cur) if loc.block != cur.block || loc.statement_index < cur.statement_index => {
426 true
427 }
428 _ => false,
429 };
430 if should_reset {
431 self.flow_state.reset_to_entry_of(loc.block);
432 start_index = 0;
433 } else {
434 let curr_loc = self.curr_loc.unwrap();
435 start_index = curr_loc.statement_index;
436 // Apply the effect from the last seek to the current state.
437 self.flow_state.apply_local_effect(curr_loc);
438 }
439
440 for stmt in start_index..loc.statement_index {
441 let mut stmt_loc = loc;
442 stmt_loc.statement_index = stmt;
443 self.flow_state.reconstruct_statement_effect(stmt_loc);
444 self.flow_state.apply_local_effect(stmt_loc);
445 }
446
447 if loc.statement_index == self.body[loc.block].statements.len() {
448 self.flow_state.reconstruct_terminator_effect(loc);
449 } else {
450 self.flow_state.reconstruct_statement_effect(loc);
451 }
452 self.curr_loc = Some(loc);
453 }
454
455 /// Return whether the current state contains bit `x`.
456 pub fn contains(&self, x: BD::Idx) -> bool {
457 self.flow_state.contains(x)
458 }
459
460 /// Iterate over each `gen` bit in the current effect (invoke `seek` first).
461 pub fn each_gen_bit<F>(&self, f: F)
462 where
463 F: FnMut(BD::Idx),
464 {
465 self.flow_state.each_gen_bit(f)
466 }
467
468 pub fn get(&self) -> &BitSet<BD::Idx> {
469 self.flow_state.as_dense()
470 }
471 }
472
473 pub struct DataflowAnalysis<'a, 'tcx, O>
474 where
475 O: BitDenotation<'tcx>,
476 {
477 flow_state: DataflowState<'tcx, O>,
478 dead_unwinds: &'a BitSet<mir::BasicBlock>,
479 body: &'a Body<'tcx>,
480 }
481
482 impl<'a, 'tcx, O> DataflowAnalysis<'a, 'tcx, O>
483 where
484 O: BitDenotation<'tcx>,
485 {
486 pub fn results(self) -> DataflowResults<'tcx, O> {
487 DataflowResults(self.flow_state)
488 }
489
490 pub fn body(&self) -> &'a Body<'tcx> {
491 self.body
492 }
493 }
494
495 pub struct DataflowResults<'tcx, O>(pub(crate) DataflowState<'tcx, O>)
496 where
497 O: BitDenotation<'tcx>;
498
499 impl<'tcx, O: BitDenotation<'tcx>> DataflowResults<'tcx, O> {
500 pub fn sets(&self) -> &AllSets<O::Idx> {
501 &self.0.sets
502 }
503
504 pub fn operator(&self) -> &O {
505 &self.0.operator
506 }
507 }
508
509 /// State of a dataflow analysis; couples a collection of bit sets
510 /// with operator used to initialize and merge bits during analysis.
511 pub struct DataflowState<'tcx, O: BitDenotation<'tcx>> {
512 /// All the sets for the analysis. (Factored into its
513 /// own structure so that we can borrow it mutably
514 /// on its own separate from other fields.)
515 pub sets: AllSets<O::Idx>,
516
517 /// operator used to initialize, combine, and interpret bits.
518 pub(crate) operator: O,
519 }
520
521 impl<'tcx, O: BitDenotation<'tcx>> DataflowState<'tcx, O> {
522 pub(crate) fn interpret_set<'c, P>(
523 &self,
524 o: &'c O,
525 set: &BitSet<O::Idx>,
526 render_idx: &P,
527 ) -> Vec<DebugFormatted>
528 where
529 P: Fn(&O, O::Idx) -> DebugFormatted,
530 {
531 set.iter().map(|i| render_idx(o, i)).collect()
532 }
533
534 pub(crate) fn interpret_hybrid_set<'c, P>(
535 &self,
536 o: &'c O,
537 set: &HybridBitSet<O::Idx>,
538 render_idx: &P,
539 ) -> Vec<DebugFormatted>
540 where
541 P: Fn(&O, O::Idx) -> DebugFormatted,
542 {
543 set.iter().map(|i| render_idx(o, i)).collect()
544 }
545 }
546
547 /// A 2-tuple representing the "gen" and "kill" bitsets during
548 /// dataflow analysis.
549 ///
550 /// It is best to ensure that the intersection of `gen_set` and
551 /// `kill_set` is empty; otherwise the results of the dataflow will
552 /// have a hidden dependency on what order the bits are generated and
553 /// killed during the iteration. (This is such a good idea that the
554 /// `fn gen` and `fn kill` methods that set their state enforce this
555 /// for you.)
556 #[derive(Debug, Clone, Copy)]
557 pub struct GenKill<T> {
558 pub(crate) gen_set: T,
559 pub(crate) kill_set: T,
560 }
561
562 pub type GenKillSet<T> = GenKill<HybridBitSet<T>>;
563
564 impl<T> GenKill<T> {
565 /// Creates a new tuple where `gen_set == kill_set == elem`.
566 pub(crate) fn from_elem(elem: T) -> Self
567 where
568 T: Clone,
569 {
570 GenKill { gen_set: elem.clone(), kill_set: elem }
571 }
572 }
573
574 impl<E: Idx> GenKillSet<E> {
575 pub fn clear(&mut self) {
576 self.gen_set.clear();
577 self.kill_set.clear();
578 }
579
580 pub fn gen(&mut self, e: E) {
581 self.gen_set.insert(e);
582 self.kill_set.remove(e);
583 }
584
585 pub fn gen_all(&mut self, i: impl IntoIterator<Item: Borrow<E>>) {
586 for j in i {
587 self.gen(*j.borrow());
588 }
589 }
590
591 pub fn kill(&mut self, e: E) {
592 self.gen_set.remove(e);
593 self.kill_set.insert(e);
594 }
595
596 pub fn kill_all(&mut self, i: impl IntoIterator<Item: Borrow<E>>) {
597 for j in i {
598 self.kill(*j.borrow());
599 }
600 }
601
602 /// Computes `(set ∪ gen) - kill` and assigns the result to `set`.
603 pub(crate) fn apply(&self, set: &mut BitSet<E>) {
604 set.union(&self.gen_set);
605 set.subtract(&self.kill_set);
606 }
607 }
608
609 #[derive(Debug)]
610 pub struct AllSets<E: Idx> {
611 /// Analysis bitwidth for each block.
612 bits_per_block: usize,
613
614 /// For each block, bits valid on entry to the block.
615 on_entry: Vec<BitSet<E>>,
616
617 /// The transfer function of each block expressed as the set of bits
618 /// generated and killed by executing the statements + terminator in the
619 /// block -- with one caveat. In particular, for *call terminators*, the
620 /// effect of storing the destination is not included, since that only takes
621 /// effect on the **success** edge (and not the unwind edge).
622 trans: Vec<GenKillSet<E>>,
623 }
624
625 impl<E: Idx> AllSets<E> {
626 pub fn bits_per_block(&self) -> usize {
627 self.bits_per_block
628 }
629
630 pub fn get_mut(&mut self, block_idx: usize) -> (&mut BitSet<E>, &mut GenKillSet<E>) {
631 (&mut self.on_entry[block_idx], &mut self.trans[block_idx])
632 }
633
634 pub fn trans_for(&self, block_idx: usize) -> &GenKillSet<E> {
635 &self.trans[block_idx]
636 }
637 pub fn trans_mut_for(&mut self, block_idx: usize) -> &mut GenKillSet<E> {
638 &mut self.trans[block_idx]
639 }
640 pub fn entry_set_for(&self, block_idx: usize) -> &BitSet<E> {
641 &self.on_entry[block_idx]
642 }
643 pub fn entry_set_mut_for(&mut self, block_idx: usize) -> &mut BitSet<E> {
644 &mut self.on_entry[block_idx]
645 }
646 pub fn gen_set_for(&self, block_idx: usize) -> &HybridBitSet<E> {
647 &self.trans_for(block_idx).gen_set
648 }
649 pub fn kill_set_for(&self, block_idx: usize) -> &HybridBitSet<E> {
650 &self.trans_for(block_idx).kill_set
651 }
652 }
653
654 /// Parameterization for the precise form of data flow that is used.
655 ///
656 /// `BottomValue` determines whether the initial entry set for each basic block is empty or full.
657 /// This also determines the semantics of the lattice `join` operator used to merge dataflow
658 /// results, since dataflow works by starting at the bottom and moving monotonically to a fixed
659 /// point.
660 ///
661 /// This means, for propagation across the graph, that you either want to start at all-zeroes and
662 /// then use Union as your merge when propagating, or you start at all-ones and then use Intersect
663 /// as your merge when propagating.
664 pub trait BottomValue {
665 /// Specifies the initial value for each bit in the entry set for each basic block.
666 const BOTTOM_VALUE: bool;
667
668 /// Merges `in_set` into `inout_set`, returning `true` if `inout_set` changed.
669 ///
670 /// It is almost certainly wrong to override this, since it automatically applies
671 /// * `inout_set & in_set` if `BOTTOM_VALUE == true`
672 /// * `inout_set | in_set` if `BOTTOM_VALUE == false`
673 ///
674 /// This means that if a bit is not `BOTTOM_VALUE`, it is propagated into all target blocks.
675 /// For clarity, the above statement again from a different perspective:
676 /// A bit in the block's entry set is `!BOTTOM_VALUE` if *any* predecessor block's bit value is
677 /// `!BOTTOM_VALUE`.
678 ///
679 /// There are situations where you want the opposite behaviour: propagate only if *all*
680 /// predecessor blocks's value is `!BOTTOM_VALUE`.
681 /// E.g. if you want to know whether a bit is *definitely* set at a specific location. This
682 /// means that all code paths leading to the location must have set the bit, instead of any
683 /// code path leading there.
684 ///
685 /// If you want this kind of "definitely set" analysis, you need to
686 /// 1. Invert `BOTTOM_VALUE`
687 /// 2. Reset the `entry_set` in `start_block_effect` to `!BOTTOM_VALUE`
688 /// 3. Override `join` to do the opposite from what it's doing now.
689 #[inline]
690 fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
691 if !Self::BOTTOM_VALUE { inout_set.union(in_set) } else { inout_set.intersect(in_set) }
692 }
693 }
694
695 /// A specific flavor of dataflow analysis.
696 ///
697 /// To run a dataflow analysis, one sets up an initial state for the
698 /// `START_BLOCK` via `start_block_effect` and a transfer function (`trans`)
699 /// for each block individually. The entry set for all other basic blocks is
700 /// initialized to `Self::BOTTOM_VALUE`. The dataflow analysis then
701 /// iteratively modifies the various entry sets (but leaves the the transfer
702 /// function unchanged). `BottomValue::join` is used to merge the bitsets from
703 /// two blocks (e.g. when two blocks' terminator jumps to a single block, that
704 /// target block's state is the merged state of both incoming blocks).
705 pub trait BitDenotation<'tcx>: BottomValue {
706 /// Specifies what index type is used to access the bitvector.
707 type Idx: Idx;
708
709 /// A name describing the dataflow analysis that this
710 /// `BitDenotation` is supporting. The name should be something
711 /// suitable for plugging in as part of a filename (i.e., avoid
712 /// space-characters or other things that tend to look bad on a
713 /// file system, like slashes or periods). It is also better for
714 /// the name to be reasonably short, again because it will be
715 /// plugged into a filename.
716 fn name() -> &'static str;
717
718 /// Size of each bitvector allocated for each block in the analysis.
719 fn bits_per_block(&self) -> usize;
720
721 /// Mutates the entry set according to the effects that
722 /// have been established *prior* to entering the start
723 /// block. This can't access the gen/kill sets, because
724 /// these won't be accounted for correctly.
725 ///
726 /// (For example, establishing the call arguments.)
727 fn start_block_effect(&self, entry_set: &mut BitSet<Self::Idx>);
728
729 /// Similar to `statement_effect`, except it applies
730 /// *just before* the statement rather than *just after* it.
731 ///
732 /// This matters for "dataflow at location" APIs, because the
733 /// before-statement effect is visible while visiting the
734 /// statement, while the after-statement effect only becomes
735 /// visible at the next statement.
736 ///
737 /// Both the before-statement and after-statement effects are
738 /// applied, in that order, before moving for the next
739 /// statement.
740 fn before_statement_effect(&self, _trans: &mut GenKillSet<Self::Idx>, _location: Location) {}
741
742 /// Mutates the block-sets (the flow sets for the given
743 /// basic block) according to the effects of evaluating statement.
744 ///
745 /// This is used, in particular, for building up the
746 /// "transfer-function" representing the overall-effect of the
747 /// block, represented via GEN and KILL sets.
748 ///
749 /// The statement is identified as `bb_data[idx_stmt]`, where
750 /// `bb_data` is the sequence of statements identified by `bb` in
751 /// the MIR.
752 fn statement_effect(&self, trans: &mut GenKillSet<Self::Idx>, location: Location);
753
754 /// Similar to `terminator_effect`, except it applies
755 /// *just before* the terminator rather than *just after* it.
756 ///
757 /// This matters for "dataflow at location" APIs, because the
758 /// before-terminator effect is visible while visiting the
759 /// terminator, while the after-terminator effect only becomes
760 /// visible at the terminator's successors.
761 ///
762 /// Both the before-terminator and after-terminator effects are
763 /// applied, in that order, before moving for the next
764 /// terminator.
765 fn before_terminator_effect(&self, _trans: &mut GenKillSet<Self::Idx>, _location: Location) {}
766
767 /// Mutates the block-sets (the flow sets for the given
768 /// basic block) according to the effects of evaluating
769 /// the terminator.
770 ///
771 /// This is used, in particular, for building up the
772 /// "transfer-function" representing the overall-effect of the
773 /// block, represented via GEN and KILL sets.
774 ///
775 /// The effects applied here cannot depend on which branch the
776 /// terminator took.
777 fn terminator_effect(&self, trans: &mut GenKillSet<Self::Idx>, location: Location);
778
779 /// Mutates the block-sets according to the (flow-dependent)
780 /// effect of a successful return from a Call terminator.
781 ///
782 /// If basic-block BB_x ends with a call-instruction that, upon
783 /// successful return, flows to BB_y, then this method will be
784 /// called on the exit flow-state of BB_x in order to set up the
785 /// entry flow-state of BB_y.
786 ///
787 /// This is used, in particular, as a special case during the
788 /// "propagate" loop where all of the basic blocks are repeatedly
789 /// visited. Since the effects of a Call terminator are
790 /// flow-dependent, the current MIR cannot encode them via just
791 /// GEN and KILL sets attached to the block, and so instead we add
792 /// this extra machinery to represent the flow-dependent effect.
793 //
794 // FIXME: right now this is a bit of a wart in the API. It might
795 // be better to represent this as an additional gen- and
796 // kill-sets associated with each edge coming out of the basic
797 // block.
798 fn propagate_call_return(
799 &self,
800 in_out: &mut BitSet<Self::Idx>,
801 call_bb: mir::BasicBlock,
802 dest_bb: mir::BasicBlock,
803 dest_place: &mir::Place<'tcx>,
804 );
805 }
806
807 impl<'a, 'tcx, D> DataflowAnalysis<'a, 'tcx, D>
808 where
809 D: BitDenotation<'tcx>,
810 {
811 pub fn new(
812 body: &'a Body<'tcx>,
813 dead_unwinds: &'a BitSet<mir::BasicBlock>,
814 denotation: D,
815 ) -> Self {
816 let bits_per_block = denotation.bits_per_block();
817 let num_blocks = body.basic_blocks().len();
818
819 let on_entry = if D::BOTTOM_VALUE {
820 vec![BitSet::new_filled(bits_per_block); num_blocks]
821 } else {
822 vec![BitSet::new_empty(bits_per_block); num_blocks]
823 };
824 let nop = GenKill::from_elem(HybridBitSet::new_empty(bits_per_block));
825
826 DataflowAnalysis {
827 body,
828 dead_unwinds,
829 flow_state: DataflowState {
830 sets: AllSets { bits_per_block, on_entry, trans: vec![nop; num_blocks] },
831 operator: denotation,
832 },
833 }
834 }
835 }
836
837 impl<'a, 'tcx, D> DataflowAnalysis<'a, 'tcx, D>
838 where
839 D: BitDenotation<'tcx>,
840 {
841 /// Propagates the bits of `in_out` into all the successors of `bb`,
842 /// using bitwise operator denoted by `self.operator`.
843 ///
844 /// For most blocks, this is entirely uniform. However, for blocks
845 /// that end with a call terminator, the effect of the call on the
846 /// dataflow state may depend on whether the call returned
847 /// successfully or unwound.
848 ///
849 /// To reflect this, the `propagate_call_return` method of the
850 /// `BitDenotation` mutates `in_out` when propagating `in_out` via
851 /// a call terminator; such mutation is performed *last*, to
852 /// ensure its side-effects do not leak elsewhere (e.g., into
853 /// unwind target).
854 fn propagate_bits_into_graph_successors_of(
855 &mut self,
856 in_out: &mut BitSet<D::Idx>,
857 (bb, bb_data): (mir::BasicBlock, &mir::BasicBlockData<'tcx>),
858 dirty_list: &mut WorkQueue<mir::BasicBlock>,
859 ) {
860 match bb_data.terminator().kind {
861 mir::TerminatorKind::Return
862 | mir::TerminatorKind::Resume
863 | mir::TerminatorKind::Abort
864 | mir::TerminatorKind::GeneratorDrop
865 | mir::TerminatorKind::Unreachable => {}
866 mir::TerminatorKind::Goto { target }
867 | mir::TerminatorKind::Assert { target, cleanup: None, .. }
868 | mir::TerminatorKind::Yield { resume: target, drop: None, .. }
869 | mir::TerminatorKind::Drop { target, location: _, unwind: None }
870 | mir::TerminatorKind::DropAndReplace { target, value: _, location: _, unwind: None } =>
871 {
872 self.propagate_bits_into_entry_set_for(in_out, target, dirty_list);
873 }
874 mir::TerminatorKind::Yield { resume: target, drop: Some(drop), .. } => {
875 self.propagate_bits_into_entry_set_for(in_out, target, dirty_list);
876 self.propagate_bits_into_entry_set_for(in_out, drop, dirty_list);
877 }
878 mir::TerminatorKind::Assert { target, cleanup: Some(unwind), .. }
879 | mir::TerminatorKind::Drop { target, location: _, unwind: Some(unwind) }
880 | mir::TerminatorKind::DropAndReplace {
881 target,
882 value: _,
883 location: _,
884 unwind: Some(unwind),
885 } => {
886 self.propagate_bits_into_entry_set_for(in_out, target, dirty_list);
887 if !self.dead_unwinds.contains(bb) {
888 self.propagate_bits_into_entry_set_for(in_out, unwind, dirty_list);
889 }
890 }
891 mir::TerminatorKind::SwitchInt { ref targets, .. } => {
892 for target in targets {
893 self.propagate_bits_into_entry_set_for(in_out, *target, dirty_list);
894 }
895 }
896 mir::TerminatorKind::Call { cleanup, ref destination, .. } => {
897 if let Some(unwind) = cleanup {
898 if !self.dead_unwinds.contains(bb) {
899 self.propagate_bits_into_entry_set_for(in_out, unwind, dirty_list);
900 }
901 }
902 if let Some((ref dest_place, dest_bb)) = *destination {
903 // N.B.: This must be done *last*, after all other
904 // propagation, as documented in comment above.
905 self.flow_state.operator.propagate_call_return(in_out, bb, dest_bb, dest_place);
906 self.propagate_bits_into_entry_set_for(in_out, dest_bb, dirty_list);
907 }
908 }
909 mir::TerminatorKind::FalseEdges { real_target, imaginary_target } => {
910 self.propagate_bits_into_entry_set_for(in_out, real_target, dirty_list);
911 self.propagate_bits_into_entry_set_for(in_out, imaginary_target, dirty_list);
912 }
913 mir::TerminatorKind::FalseUnwind { real_target, unwind } => {
914 self.propagate_bits_into_entry_set_for(in_out, real_target, dirty_list);
915 if let Some(unwind) = unwind {
916 if !self.dead_unwinds.contains(bb) {
917 self.propagate_bits_into_entry_set_for(in_out, unwind, dirty_list);
918 }
919 }
920 }
921 }
922 }
923
924 fn propagate_bits_into_entry_set_for(
925 &mut self,
926 in_out: &BitSet<D::Idx>,
927 bb: mir::BasicBlock,
928 dirty_queue: &mut WorkQueue<mir::BasicBlock>,
929 ) {
930 let entry_set = self.flow_state.sets.entry_set_mut_for(bb.index());
931 let set_changed = self.flow_state.operator.join(entry_set, &in_out);
932 if set_changed {
933 dirty_queue.insert(bb);
934 }
935 }
936 }