1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use syntax
::attr
::AttrMetaMethods
;
13 use rustc
::ty
::TyCtxt
;
14 use rustc
::mir
::repr
::{self, Mir}
;
20 use super::MirBorrowckCtxt
;
21 use super::gather_moves
::{Location, MoveData, MovePathData, MovePathIndex, MoveOutIndex, PathMap}
;
23 use bitslice
::BitSlice
; // adds set_bit/get_bit to &[usize] bitvector rep.
26 fn dataflow(&mut self);
29 impl<'b
, 'a
: 'b
, 'tcx
: 'a
> Dataflow
for MirBorrowckCtxt
<'b
, 'a
, 'tcx
> {
30 fn dataflow(&mut self) {
31 self.build_gen_and_kill_sets();
32 self.pre_dataflow_instrumentation().unwrap();
34 self.post_dataflow_instrumentation().unwrap();
38 struct PropagationContext
<'c
, 'b
: 'c
, 'a
: 'b
, 'tcx
: 'a
, OnReturn
>
39 where OnReturn
: Fn(&MoveData
, &mut [usize], &repr
::Lvalue
)
41 mbcx
: &'c
mut MirBorrowckCtxt
<'b
, 'a
, 'tcx
>,
46 impl<'b
, 'a
: 'b
, 'tcx
: 'a
> MirBorrowckCtxt
<'b
, 'a
, 'tcx
> {
47 fn propagate(&mut self) {
48 let mut temp
= vec
![0; self.flow_state
.sets
.words_per_block
];
49 let mut propcx
= PropagationContext
{
52 on_return
: |move_data
, in_out
, dest_lval
| {
53 let move_path_index
= move_data
.rev_lookup
.find(dest_lval
);
54 on_all_children_bits(in_out
,
56 &move_data
.move_paths
,
59 in_out
.clear_bit(mpi
.idx());
63 while propcx
.changed
{
64 propcx
.changed
= false;
65 propcx
.reset(&mut temp
);
66 propcx
.walk_cfg(&mut temp
);
70 fn build_gen_and_kill_sets(&mut self) {
71 // First we need to build the gen- and kill-sets. The
72 // gather_moves information provides a high-level mapping from
73 // mir-locations to the MoveOuts (and those correspond
74 // directly to gen-sets here). But we still need to figure out
77 let move_data
= &self.flow_state
.operator
;
78 let move_paths
= &move_data
.move_paths
;
79 let loc_map
= &move_data
.loc_map
;
80 let path_map
= &move_data
.path_map
;
81 let rev_lookup
= &move_data
.rev_lookup
;
83 for bb
in self.mir
.all_basic_blocks() {
84 let &repr
::BasicBlockData
{ ref statements
,
87 self.mir
.basic_block_data(bb
);
89 let mut sets
= self.flow_state
.sets
.for_block(bb
.index());
90 for (j
, stmt
) in statements
.iter().enumerate() {
91 let loc
= Location { block: bb, index: j }
;
92 debug
!("stmt {:?} at loc {:?} moves out of move_indexes {:?}",
93 stmt
, loc
, &loc_map
[loc
]);
94 for move_index
in &loc_map
[loc
] {
95 // Every path deinitialized by a *particular move*
96 // has corresponding bit, "gen'ed" (i.e. set)
97 // here, in dataflow vector
98 zero_to_one(&mut sets
.gen_set
, *move_index
);
101 repr
::StatementKind
::Assign(ref lvalue
, _
) => {
102 // assigning into this `lvalue` kills all
103 // MoveOuts from it, and *also* all MoveOuts
104 // for children and associated fragment sets.
105 let move_path_index
= rev_lookup
.find(lvalue
);
107 on_all_children_bits(sets
.kill_set
,
112 kill_set
.set_bit(mpi
.idx());
118 let loc
= Location { block: bb, index: statements.len() }
;
119 debug
!("terminator {:?} at loc {:?} moves out of move_indexes {:?}",
120 terminator
, loc
, &loc_map
[loc
]);
121 for move_index
in &loc_map
[loc
] {
122 zero_to_one(&mut sets
.gen_set
, *move_index
);
126 fn zero_to_one(gen_set
: &mut [usize], move_index
: MoveOutIndex
) {
127 let retval
= gen_set
.set_bit(move_index
.idx());
133 fn on_all_children_bits
<Each
>(set
: &mut [usize],
135 move_paths
: &MovePathData
,
136 move_path_index
: MovePathIndex
,
138 where Each
: Fn(&mut [usize], MoveOutIndex
)
140 // 1. invoke `each_child` callback for all moves that directly
141 // influence path for `move_path_index`
142 for move_index
in &path_map
[move_path_index
] {
143 each_child(set
, *move_index
);
146 // 2. for each child of the path (that is named in this
149 // (Unnamed children are irrelevant to dataflow; by
150 // definition they have no associated moves.)
151 let mut next_child_index
= move_paths
[move_path_index
].first_child
;
152 while let Some(child_index
) = next_child_index
{
153 on_all_children_bits(set
, path_map
, move_paths
, child_index
, each_child
);
154 next_child_index
= move_paths
[child_index
].next_sibling
;
158 impl<'c
, 'b
: 'c
, 'a
: 'b
, 'tcx
: 'a
, OnReturn
> PropagationContext
<'c
, 'b
, 'a
, 'tcx
, OnReturn
>
159 where OnReturn
: Fn(&MoveData
, &mut [usize], &repr
::Lvalue
)
161 fn reset(&mut self, bits
: &mut [usize]) {
162 let e
= if self.mbcx
.flow_state
.operator
.initial_value() {usize::MAX}
else {0}
;
168 fn walk_cfg(&mut self, in_out
: &mut [usize]) {
169 let &mut MirBorrowckCtxt { ref mir, ref mut flow_state, .. }
= self.mbcx
;
170 for (idx
, bb
) in mir
.basic_blocks
.iter().enumerate() {
172 let sets
= flow_state
.sets
.for_block(idx
);
173 debug_assert
!(in_out
.len() == sets
.on_entry
.len());
174 in_out
.clone_from_slice(sets
.on_entry
);
175 bitwise(in_out
, sets
.gen_set
, &Union
);
176 bitwise(in_out
, sets
.kill_set
, &Subtract
);
178 flow_state
.propagate_bits_into_graph_successors_of(in_out
,
186 impl<'b
, 'a
: 'b
, 'tcx
: 'a
> MirBorrowckCtxt
<'b
, 'a
, 'tcx
> {
187 fn pre_dataflow_instrumentation(&self) -> io
::Result
<()> {
188 self.if_attr_meta_name_found(
189 "borrowck_graphviz_preflow",
191 graphviz
::print_borrowck_graph_to(this
, "preflow", path
)
195 fn post_dataflow_instrumentation(&self) -> io
::Result
<()> {
196 self.if_attr_meta_name_found(
197 "borrowck_graphviz_postflow",
199 graphviz
::print_borrowck_graph_to(this
, "postflow", path
)
203 fn if_attr_meta_name_found
<F
>(&self,
205 callback
: F
) -> io
::Result
<()>
206 where F
: for <'aa
, 'bb
> FnOnce(&'aa
Self, &'bb
str) -> io
::Result
<()>
208 for attr
in self.attributes
{
209 if attr
.check_name("rustc_mir") {
210 let items
= attr
.meta_item_list();
211 for item
in items
.iter().flat_map(|l
| l
.iter()) {
212 if item
.check_name(name
) {
213 if let Some(s
) = item
.value_str() {
214 return callback(self, &s
);
216 self.bcx
.tcx
.sess
.span_err(
218 &format
!("{} attribute requires a path", item
.name()));
229 /// Maps each block to a set of bits
230 #[derive(Clone, Debug)]
236 fn new(init_word
: usize, num_words
: usize) -> Self {
237 Bits { bits: vec![init_word; num_words] }
241 pub struct DataflowState
<O
: BitDenotation
>
243 /// All the sets for the analysis. (Factored into its
244 /// own structure so that we can borrow it mutably
245 /// on its own separate from other fields.)
248 /// operator used to initialize, combine, and interpret bits.
253 /// Analysis bitwidth for each block.
254 bits_per_block
: usize,
256 /// Number of words associated with each block entry
257 /// equal to bits_per_block / usize::BITS, rounded up.
258 words_per_block
: usize,
260 /// For each block, bits generated by executing the statements in
261 /// the block. (For comparison, the Terminator for each block is
262 /// handled in a flow-specific manner during propagation.)
265 /// For each block, bits killed by executing the statements in the
266 /// block. (For comparison, the Terminator for each block is
267 /// handled in a flow-specific manner during propagation.)
270 /// For each block, bits valid on entry to the block.
274 pub struct BlockSets
<'a
> {
275 on_entry
: &'a
mut [usize],
276 gen_set
: &'a
mut [usize],
277 kill_set
: &'a
mut [usize],
281 pub fn bits_per_block(&self) -> usize { self.bits_per_block }
282 pub fn bytes_per_block(&self) -> usize { (self.bits_per_block + 7) / 8 }
283 pub fn for_block(&mut self, block_idx
: usize) -> BlockSets
{
284 let offset
= self.words_per_block
* block_idx
;
285 let range
= offset
..(offset
+ self.words_per_block
);
287 on_entry
: &mut self.on_entry_sets
.bits
[range
.clone()],
288 gen_set
: &mut self.gen_sets
.bits
[range
.clone()],
289 kill_set
: &mut self.kill_sets
.bits
[range
],
293 fn lookup_set_for
<'a
>(&self, sets
: &'a Bits
, block_idx
: usize) -> &'a
[usize] {
294 let offset
= self.words_per_block
* block_idx
;
295 &sets
.bits
[offset
..(offset
+ self.words_per_block
)]
297 pub fn gen_set_for(&self, block_idx
: usize) -> &[usize] {
298 self.lookup_set_for(&self.gen_sets
, block_idx
)
300 pub fn kill_set_for(&self, block_idx
: usize) -> &[usize] {
301 self.lookup_set_for(&self.kill_sets
, block_idx
)
303 pub fn on_entry_set_for(&self, block_idx
: usize) -> &[usize] {
304 self.lookup_set_for(&self.on_entry_sets
, block_idx
)
308 impl<O
: BitDenotation
> DataflowState
<O
> {
309 fn each_bit
<F
>(&self, words
: &[usize], mut f
: F
)
310 where F
: FnMut(usize) {
311 //! Helper for iterating over the bits in a bitvector.
313 for (word_index
, &word
) in words
.iter().enumerate() {
315 let usize_bits
: usize = mem
::size_of
::<usize>();
316 let base_index
= word_index
* usize_bits
;
317 for offset
in 0..usize_bits
{
318 let bit
= 1 << offset
;
319 if (word
& bit
) != 0 {
320 // NB: we round up the total number of bits
321 // that we store in any given bit set so that
322 // it is an even multiple of usize::BITS. This
323 // means that there may be some stray bits at
324 // the end that do not correspond to any
325 // actual value; that's why we first check
326 // that we are in range of bits_per_block.
327 let bit_index
= base_index
+ offset
as usize;
328 if bit_index
>= self.sets
.bits_per_block() {
339 pub fn interpret_set(&self, words
: &[usize]) -> Vec
<&O
::Bit
> {
340 let mut v
= Vec
::new();
341 self.each_bit(words
, |i
| {
342 v
.push(self.operator
.interpret(i
));
348 pub trait BitwiseOperator
{
349 /// Joins two predecessor bits together, typically either `|` or `&`
350 fn join(&self, pred1
: usize, pred2
: usize) -> usize;
353 /// Parameterization for the precise form of data flow that is used.
354 pub trait DataflowOperator
: BitwiseOperator
{
355 /// Specifies the initial value for each bit in the `on_entry` set
356 fn initial_value(&self) -> bool
;
359 pub trait BitDenotation
: DataflowOperator
{
360 /// Specifies what is represented by each bit in the dataflow bitvector.
362 /// Size of each bivector allocated for each block in the analysis.
363 fn bits_per_block(&self) -> usize;
364 /// Provides the meaning of each entry in the dataflow bitvector.
365 /// (Mostly intended for use for better debug instrumentation.)
366 fn interpret(&self, idx
: usize) -> &Self::Bit
;
369 impl<D
: BitDenotation
> DataflowState
<D
> {
370 pub fn new(mir
: &Mir
, denotation
: D
) -> Self {
371 let bits_per_block
= denotation
.bits_per_block();
372 let usize_bits
= mem
::size_of
::<usize>() * 8;
373 let words_per_block
= (bits_per_block
+ usize_bits
- 1) / usize_bits
;
374 let num_blocks
= mir
.basic_blocks
.len();
375 let num_words
= num_blocks
* words_per_block
;
377 let entry
= if denotation
.initial_value() { usize::MAX }
else {0}
;
379 let zeroes
= Bits
::new(0, num_words
);
380 let on_entry
= Bits
::new(entry
, num_words
);
384 bits_per_block
: bits_per_block
,
385 words_per_block
: words_per_block
,
386 gen_sets
: zeroes
.clone(),
388 on_entry_sets
: on_entry
,
390 operator
: denotation
,
395 impl<D
: BitDenotation
> DataflowState
<D
> {
396 /// Propagates the bits of `in_out` into all the successors of `bb`,
397 /// using bitwise operator denoted by `self.operator`.
399 /// For most blocks, this is entirely uniform. However, for blocks
400 /// that end with a call terminator, the effect of the call on the
401 /// dataflow state may depend on whether the call returned
402 /// successfully or unwound. To reflect this, the `on_return`
403 /// callback mutates `in_out` when propagating `in_out` via a call
404 /// terminator; such mutation is performed *last*, to ensure its
405 /// side-effects do not leak elsewhere (e.g. into unwind target).
406 fn propagate_bits_into_graph_successors_of
<OnReturn
>(
408 in_out
: &mut [usize],
410 bb
: &repr
::BasicBlockData
,
411 on_return
: OnReturn
) where OnReturn
: Fn(&D
, &mut [usize], &repr
::Lvalue
)
413 match bb
.terminator().kind
{
414 repr
::TerminatorKind
::Return
|
415 repr
::TerminatorKind
::Resume
=> {}
416 repr
::TerminatorKind
::Goto { ref target }
|
417 repr
::TerminatorKind
::Drop { ref target, value: _, unwind: None }
=> {
418 self.propagate_bits_into_entry_set_for(in_out
, changed
, target
);
420 repr
::TerminatorKind
::Drop { ref target, value: _, unwind: Some(ref unwind) }
=> {
421 self.propagate_bits_into_entry_set_for(in_out
, changed
, target
);
422 self.propagate_bits_into_entry_set_for(in_out
, changed
, unwind
);
424 repr
::TerminatorKind
::If { ref targets, .. }
=> {
425 self.propagate_bits_into_entry_set_for(in_out
, changed
, &targets
.0);
426 self.propagate_bits_into_entry_set_for(in_out
, changed
, &targets
.1);
428 repr
::TerminatorKind
::Switch { ref targets, .. }
|
429 repr
::TerminatorKind
::SwitchInt { ref targets, .. }
=> {
430 for target
in targets
{
431 self.propagate_bits_into_entry_set_for(in_out
, changed
, target
);
434 repr
::TerminatorKind
::Call { ref cleanup, ref destination, func: _, args: _ }
=> {
435 if let Some(ref unwind
) = *cleanup
{
436 self.propagate_bits_into_entry_set_for(in_out
, changed
, unwind
);
438 if let Some((ref dest_lval
, ref dest_bb
)) = *destination
{
439 // N.B.: This must be done *last*, after all other
440 // propagation, as documented in comment above.
441 on_return(&self.operator
, in_out
, dest_lval
);
442 self.propagate_bits_into_entry_set_for(in_out
, changed
, dest_bb
);
448 fn propagate_bits_into_entry_set_for(&mut self,
451 bb
: &repr
::BasicBlock
) {
452 let entry_set
= self.sets
.for_block(bb
.index()).on_entry
;
453 let set_changed
= bitwise(entry_set
, in_out
, &self.operator
);
461 impl<'tcx
> DataflowState
<MoveData
<'tcx
>> {
462 pub fn new_move_analysis(mir
: &Mir
<'tcx
>, tcx
: &TyCtxt
<'tcx
>) -> Self {
463 let move_data
= MoveData
::gather_moves(mir
, tcx
);
464 DataflowState
::new(mir
, move_data
)
468 impl<'tcx
> BitwiseOperator
for MoveData
<'tcx
> {
470 fn join(&self, pred1
: usize, pred2
: usize) -> usize {
471 pred1
| pred2
// moves from both preds are in scope
475 impl<'tcx
> DataflowOperator
for MoveData
<'tcx
> {
477 fn initial_value(&self) -> bool
{
478 false // no loans in scope by default
483 fn bitwise
<Op
:BitwiseOperator
>(out_vec
: &mut [usize],
486 assert_eq
!(out_vec
.len(), in_vec
.len());
487 let mut changed
= false;
488 for (out_elt
, in_elt
) in out_vec
.iter_mut().zip(in_vec
) {
489 let old_val
= *out_elt
;
490 let new_val
= op
.join(old_val
, *in_elt
);
492 changed
|= old_val
!= new_val
;
498 impl BitwiseOperator
for Union
{
499 fn join(&self, a
: usize, b
: usize) -> usize { a | b }
502 impl BitwiseOperator
for Subtract
{
503 fn join(&self, a
: usize, b
: usize) -> usize { a & !b }