]> git.proxmox.com Git - rustc.git/blob - src/librustc_borrowck/borrowck/mir/dataflow.rs
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / librustc_borrowck / borrowck / mir / dataflow.rs
1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use syntax::attr::AttrMetaMethods;
12
13 use rustc::ty::TyCtxt;
14 use rustc::mir::repr::{self, Mir};
15
16 use std::io;
17 use std::mem;
18 use std::usize;
19
20 use super::MirBorrowckCtxt;
21 use super::gather_moves::{Location, MoveData, MovePathData, MovePathIndex, MoveOutIndex, PathMap};
22 use super::graphviz;
23 use bitslice::BitSlice; // adds set_bit/get_bit to &[usize] bitvector rep.
24
25 pub trait Dataflow {
26 fn dataflow(&mut self);
27 }
28
29 impl<'b, 'a: 'b, 'tcx: 'a> Dataflow for MirBorrowckCtxt<'b, 'a, 'tcx> {
30 fn dataflow(&mut self) {
31 self.build_gen_and_kill_sets();
32 self.pre_dataflow_instrumentation().unwrap();
33 self.propagate();
34 self.post_dataflow_instrumentation().unwrap();
35 }
36 }
37
38 struct PropagationContext<'c, 'b: 'c, 'a: 'b, 'tcx: 'a, OnReturn>
39 where OnReturn: Fn(&MoveData, &mut [usize], &repr::Lvalue)
40 {
41 mbcx: &'c mut MirBorrowckCtxt<'b, 'a, 'tcx>,
42 changed: bool,
43 on_return: OnReturn
44 }
45
46 impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> {
47 fn propagate(&mut self) {
48 let mut temp = vec![0; self.flow_state.sets.words_per_block];
49 let mut propcx = PropagationContext {
50 mbcx: &mut *self,
51 changed: true,
52 on_return: |move_data, in_out, dest_lval| {
53 let move_path_index = move_data.rev_lookup.find(dest_lval);
54 on_all_children_bits(in_out,
55 &move_data.path_map,
56 &move_data.move_paths,
57 move_path_index,
58 &|in_out, mpi| {
59 in_out.clear_bit(mpi.idx());
60 });
61 },
62 };
63 while propcx.changed {
64 propcx.changed = false;
65 propcx.reset(&mut temp);
66 propcx.walk_cfg(&mut temp);
67 }
68 }
69
70 fn build_gen_and_kill_sets(&mut self) {
71 // First we need to build the gen- and kill-sets. The
72 // gather_moves information provides a high-level mapping from
73 // mir-locations to the MoveOuts (and those correspond
74 // directly to gen-sets here). But we still need to figure out
75 // the kill-sets.
76
77 let move_data = &self.flow_state.operator;
78 let move_paths = &move_data.move_paths;
79 let loc_map = &move_data.loc_map;
80 let path_map = &move_data.path_map;
81 let rev_lookup = &move_data.rev_lookup;
82
83 for bb in self.mir.all_basic_blocks() {
84 let &repr::BasicBlockData { ref statements,
85 ref terminator,
86 is_cleanup: _ } =
87 self.mir.basic_block_data(bb);
88
89 let mut sets = self.flow_state.sets.for_block(bb.index());
90 for (j, stmt) in statements.iter().enumerate() {
91 let loc = Location { block: bb, index: j };
92 debug!("stmt {:?} at loc {:?} moves out of move_indexes {:?}",
93 stmt, loc, &loc_map[loc]);
94 for move_index in &loc_map[loc] {
95 // Every path deinitialized by a *particular move*
96 // has corresponding bit, "gen'ed" (i.e. set)
97 // here, in dataflow vector
98 zero_to_one(&mut sets.gen_set, *move_index);
99 }
100 match stmt.kind {
101 repr::StatementKind::Assign(ref lvalue, _) => {
102 // assigning into this `lvalue` kills all
103 // MoveOuts from it, and *also* all MoveOuts
104 // for children and associated fragment sets.
105 let move_path_index = rev_lookup.find(lvalue);
106
107 on_all_children_bits(sets.kill_set,
108 path_map,
109 move_paths,
110 move_path_index,
111 &|kill_set, mpi| {
112 kill_set.set_bit(mpi.idx());
113 });
114 }
115 }
116 }
117
118 let loc = Location { block: bb, index: statements.len() };
119 debug!("terminator {:?} at loc {:?} moves out of move_indexes {:?}",
120 terminator, loc, &loc_map[loc]);
121 for move_index in &loc_map[loc] {
122 zero_to_one(&mut sets.gen_set, *move_index);
123 }
124 }
125
126 fn zero_to_one(gen_set: &mut [usize], move_index: MoveOutIndex) {
127 let retval = gen_set.set_bit(move_index.idx());
128 assert!(retval);
129 }
130 }
131 }
132
133 fn on_all_children_bits<Each>(set: &mut [usize],
134 path_map: &PathMap,
135 move_paths: &MovePathData,
136 move_path_index: MovePathIndex,
137 each_child: &Each)
138 where Each: Fn(&mut [usize], MoveOutIndex)
139 {
140 // 1. invoke `each_child` callback for all moves that directly
141 // influence path for `move_path_index`
142 for move_index in &path_map[move_path_index] {
143 each_child(set, *move_index);
144 }
145
146 // 2. for each child of the path (that is named in this
147 // function), recur.
148 //
149 // (Unnamed children are irrelevant to dataflow; by
150 // definition they have no associated moves.)
151 let mut next_child_index = move_paths[move_path_index].first_child;
152 while let Some(child_index) = next_child_index {
153 on_all_children_bits(set, path_map, move_paths, child_index, each_child);
154 next_child_index = move_paths[child_index].next_sibling;
155 }
156 }
157
158 impl<'c, 'b: 'c, 'a: 'b, 'tcx: 'a, OnReturn> PropagationContext<'c, 'b, 'a, 'tcx, OnReturn>
159 where OnReturn: Fn(&MoveData, &mut [usize], &repr::Lvalue)
160 {
161 fn reset(&mut self, bits: &mut [usize]) {
162 let e = if self.mbcx.flow_state.operator.initial_value() {usize::MAX} else {0};
163 for b in bits {
164 *b = e;
165 }
166 }
167
168 fn walk_cfg(&mut self, in_out: &mut [usize]) {
169 let &mut MirBorrowckCtxt { ref mir, ref mut flow_state, .. } = self.mbcx;
170 for (idx, bb) in mir.basic_blocks.iter().enumerate() {
171 {
172 let sets = flow_state.sets.for_block(idx);
173 debug_assert!(in_out.len() == sets.on_entry.len());
174 in_out.clone_from_slice(sets.on_entry);
175 bitwise(in_out, sets.gen_set, &Union);
176 bitwise(in_out, sets.kill_set, &Subtract);
177 }
178 flow_state.propagate_bits_into_graph_successors_of(in_out,
179 &mut self.changed,
180 bb,
181 &self.on_return);
182 }
183 }
184 }
185
186 impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> {
187 fn pre_dataflow_instrumentation(&self) -> io::Result<()> {
188 self.if_attr_meta_name_found(
189 "borrowck_graphviz_preflow",
190 |this, path: &str| {
191 graphviz::print_borrowck_graph_to(this, "preflow", path)
192 })
193 }
194
195 fn post_dataflow_instrumentation(&self) -> io::Result<()> {
196 self.if_attr_meta_name_found(
197 "borrowck_graphviz_postflow",
198 |this, path: &str| {
199 graphviz::print_borrowck_graph_to(this, "postflow", path)
200 })
201 }
202
203 fn if_attr_meta_name_found<F>(&self,
204 name: &str,
205 callback: F) -> io::Result<()>
206 where F: for <'aa, 'bb> FnOnce(&'aa Self, &'bb str) -> io::Result<()>
207 {
208 for attr in self.attributes {
209 if attr.check_name("rustc_mir") {
210 let items = attr.meta_item_list();
211 for item in items.iter().flat_map(|l| l.iter()) {
212 if item.check_name(name) {
213 if let Some(s) = item.value_str() {
214 return callback(self, &s);
215 } else {
216 self.bcx.tcx.sess.span_err(
217 item.span,
218 &format!("{} attribute requires a path", item.name()));
219 }
220 }
221 }
222 }
223 }
224
225 Ok(())
226 }
227 }
228
229 /// Maps each block to a set of bits
230 #[derive(Clone, Debug)]
231 struct Bits {
232 bits: Vec<usize>,
233 }
234
235 impl Bits {
236 fn new(init_word: usize, num_words: usize) -> Self {
237 Bits { bits: vec![init_word; num_words] }
238 }
239 }
240
241 pub struct DataflowState<O: BitDenotation>
242 {
243 /// All the sets for the analysis. (Factored into its
244 /// own structure so that we can borrow it mutably
245 /// on its own separate from other fields.)
246 pub sets: AllSets,
247
248 /// operator used to initialize, combine, and interpret bits.
249 operator: O,
250 }
251
252 pub struct AllSets {
253 /// Analysis bitwidth for each block.
254 bits_per_block: usize,
255
256 /// Number of words associated with each block entry
257 /// equal to bits_per_block / usize::BITS, rounded up.
258 words_per_block: usize,
259
260 /// For each block, bits generated by executing the statements in
261 /// the block. (For comparison, the Terminator for each block is
262 /// handled in a flow-specific manner during propagation.)
263 gen_sets: Bits,
264
265 /// For each block, bits killed by executing the statements in the
266 /// block. (For comparison, the Terminator for each block is
267 /// handled in a flow-specific manner during propagation.)
268 kill_sets: Bits,
269
270 /// For each block, bits valid on entry to the block.
271 on_entry_sets: Bits,
272 }
273
274 pub struct BlockSets<'a> {
275 on_entry: &'a mut [usize],
276 gen_set: &'a mut [usize],
277 kill_set: &'a mut [usize],
278 }
279
280 impl AllSets {
281 pub fn bits_per_block(&self) -> usize { self.bits_per_block }
282 pub fn bytes_per_block(&self) -> usize { (self.bits_per_block + 7) / 8 }
283 pub fn for_block(&mut self, block_idx: usize) -> BlockSets {
284 let offset = self.words_per_block * block_idx;
285 let range = offset..(offset + self.words_per_block);
286 BlockSets {
287 on_entry: &mut self.on_entry_sets.bits[range.clone()],
288 gen_set: &mut self.gen_sets.bits[range.clone()],
289 kill_set: &mut self.kill_sets.bits[range],
290 }
291 }
292
293 fn lookup_set_for<'a>(&self, sets: &'a Bits, block_idx: usize) -> &'a [usize] {
294 let offset = self.words_per_block * block_idx;
295 &sets.bits[offset..(offset + self.words_per_block)]
296 }
297 pub fn gen_set_for(&self, block_idx: usize) -> &[usize] {
298 self.lookup_set_for(&self.gen_sets, block_idx)
299 }
300 pub fn kill_set_for(&self, block_idx: usize) -> &[usize] {
301 self.lookup_set_for(&self.kill_sets, block_idx)
302 }
303 pub fn on_entry_set_for(&self, block_idx: usize) -> &[usize] {
304 self.lookup_set_for(&self.on_entry_sets, block_idx)
305 }
306 }
307
308 impl<O: BitDenotation> DataflowState<O> {
309 fn each_bit<F>(&self, words: &[usize], mut f: F)
310 where F: FnMut(usize) {
311 //! Helper for iterating over the bits in a bitvector.
312
313 for (word_index, &word) in words.iter().enumerate() {
314 if word != 0 {
315 let usize_bits: usize = mem::size_of::<usize>();
316 let base_index = word_index * usize_bits;
317 for offset in 0..usize_bits {
318 let bit = 1 << offset;
319 if (word & bit) != 0 {
320 // NB: we round up the total number of bits
321 // that we store in any given bit set so that
322 // it is an even multiple of usize::BITS. This
323 // means that there may be some stray bits at
324 // the end that do not correspond to any
325 // actual value; that's why we first check
326 // that we are in range of bits_per_block.
327 let bit_index = base_index + offset as usize;
328 if bit_index >= self.sets.bits_per_block() {
329 return;
330 } else {
331 f(bit_index);
332 }
333 }
334 }
335 }
336 }
337 }
338
339 pub fn interpret_set(&self, words: &[usize]) -> Vec<&O::Bit> {
340 let mut v = Vec::new();
341 self.each_bit(words, |i| {
342 v.push(self.operator.interpret(i));
343 });
344 v
345 }
346 }
347
348 pub trait BitwiseOperator {
349 /// Joins two predecessor bits together, typically either `|` or `&`
350 fn join(&self, pred1: usize, pred2: usize) -> usize;
351 }
352
353 /// Parameterization for the precise form of data flow that is used.
354 pub trait DataflowOperator : BitwiseOperator {
355 /// Specifies the initial value for each bit in the `on_entry` set
356 fn initial_value(&self) -> bool;
357 }
358
359 pub trait BitDenotation: DataflowOperator {
360 /// Specifies what is represented by each bit in the dataflow bitvector.
361 type Bit;
362 /// Size of each bivector allocated for each block in the analysis.
363 fn bits_per_block(&self) -> usize;
364 /// Provides the meaning of each entry in the dataflow bitvector.
365 /// (Mostly intended for use for better debug instrumentation.)
366 fn interpret(&self, idx: usize) -> &Self::Bit;
367 }
368
369 impl<D: BitDenotation> DataflowState<D> {
370 pub fn new(mir: &Mir, denotation: D) -> Self {
371 let bits_per_block = denotation.bits_per_block();
372 let usize_bits = mem::size_of::<usize>() * 8;
373 let words_per_block = (bits_per_block + usize_bits - 1) / usize_bits;
374 let num_blocks = mir.basic_blocks.len();
375 let num_words = num_blocks * words_per_block;
376
377 let entry = if denotation.initial_value() { usize::MAX } else {0};
378
379 let zeroes = Bits::new(0, num_words);
380 let on_entry = Bits::new(entry, num_words);
381
382 DataflowState {
383 sets: AllSets {
384 bits_per_block: bits_per_block,
385 words_per_block: words_per_block,
386 gen_sets: zeroes.clone(),
387 kill_sets: zeroes,
388 on_entry_sets: on_entry,
389 },
390 operator: denotation,
391 }
392 }
393 }
394
395 impl<D: BitDenotation> DataflowState<D> {
396 /// Propagates the bits of `in_out` into all the successors of `bb`,
397 /// using bitwise operator denoted by `self.operator`.
398 ///
399 /// For most blocks, this is entirely uniform. However, for blocks
400 /// that end with a call terminator, the effect of the call on the
401 /// dataflow state may depend on whether the call returned
402 /// successfully or unwound. To reflect this, the `on_return`
403 /// callback mutates `in_out` when propagating `in_out` via a call
404 /// terminator; such mutation is performed *last*, to ensure its
405 /// side-effects do not leak elsewhere (e.g. into unwind target).
406 fn propagate_bits_into_graph_successors_of<OnReturn>(
407 &mut self,
408 in_out: &mut [usize],
409 changed: &mut bool,
410 bb: &repr::BasicBlockData,
411 on_return: OnReturn) where OnReturn: Fn(&D, &mut [usize], &repr::Lvalue)
412 {
413 match bb.terminator().kind {
414 repr::TerminatorKind::Return |
415 repr::TerminatorKind::Resume => {}
416 repr::TerminatorKind::Goto { ref target } |
417 repr::TerminatorKind::Drop { ref target, value: _, unwind: None } => {
418 self.propagate_bits_into_entry_set_for(in_out, changed, target);
419 }
420 repr::TerminatorKind::Drop { ref target, value: _, unwind: Some(ref unwind) } => {
421 self.propagate_bits_into_entry_set_for(in_out, changed, target);
422 self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
423 }
424 repr::TerminatorKind::If { ref targets, .. } => {
425 self.propagate_bits_into_entry_set_for(in_out, changed, &targets.0);
426 self.propagate_bits_into_entry_set_for(in_out, changed, &targets.1);
427 }
428 repr::TerminatorKind::Switch { ref targets, .. } |
429 repr::TerminatorKind::SwitchInt { ref targets, .. } => {
430 for target in targets {
431 self.propagate_bits_into_entry_set_for(in_out, changed, target);
432 }
433 }
434 repr::TerminatorKind::Call { ref cleanup, ref destination, func: _, args: _ } => {
435 if let Some(ref unwind) = *cleanup {
436 self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
437 }
438 if let Some((ref dest_lval, ref dest_bb)) = *destination {
439 // N.B.: This must be done *last*, after all other
440 // propagation, as documented in comment above.
441 on_return(&self.operator, in_out, dest_lval);
442 self.propagate_bits_into_entry_set_for(in_out, changed, dest_bb);
443 }
444 }
445 }
446 }
447
448 fn propagate_bits_into_entry_set_for(&mut self,
449 in_out: &[usize],
450 changed: &mut bool,
451 bb: &repr::BasicBlock) {
452 let entry_set = self.sets.for_block(bb.index()).on_entry;
453 let set_changed = bitwise(entry_set, in_out, &self.operator);
454 if set_changed {
455 *changed = true;
456 }
457 }
458 }
459
460
461 impl<'tcx> DataflowState<MoveData<'tcx>> {
462 pub fn new_move_analysis(mir: &Mir<'tcx>, tcx: &TyCtxt<'tcx>) -> Self {
463 let move_data = MoveData::gather_moves(mir, tcx);
464 DataflowState::new(mir, move_data)
465 }
466 }
467
468 impl<'tcx> BitwiseOperator for MoveData<'tcx> {
469 #[inline]
470 fn join(&self, pred1: usize, pred2: usize) -> usize {
471 pred1 | pred2 // moves from both preds are in scope
472 }
473 }
474
475 impl<'tcx> DataflowOperator for MoveData<'tcx> {
476 #[inline]
477 fn initial_value(&self) -> bool {
478 false // no loans in scope by default
479 }
480 }
481
482 #[inline]
483 fn bitwise<Op:BitwiseOperator>(out_vec: &mut [usize],
484 in_vec: &[usize],
485 op: &Op) -> bool {
486 assert_eq!(out_vec.len(), in_vec.len());
487 let mut changed = false;
488 for (out_elt, in_elt) in out_vec.iter_mut().zip(in_vec) {
489 let old_val = *out_elt;
490 let new_val = op.join(old_val, *in_elt);
491 *out_elt = new_val;
492 changed |= old_val != new_val;
493 }
494 changed
495 }
496
497 struct Union;
498 impl BitwiseOperator for Union {
499 fn join(&self, a: usize, b: usize) -> usize { a | b }
500 }
501 struct Subtract;
502 impl BitwiseOperator for Subtract {
503 fn join(&self, a: usize, b: usize) -> usize { a & !b }
504 }