]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/build/scope.rs
New upstream version 1.25.0+dfsg1
[rustc.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
15 `region::Scope`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `region::Scope` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate places that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those places onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```
51 # let cond = true;
52 loop {
53 let x = ..;
54 if cond { break; }
55 let y = ..;
56 }
57 ```
58
59 When processing the `let x`, we will add one drop to the scope for
60 `x`. The break will then insert a drop for `x`. When we process `let
61 y`, we will add another drop (in fact, to a subscope, but let's ignore
62 that for now); any later drops would also drop `y`.
63
64 ### Early exit
65
66 There are numerous "normal" ways to early exit a scope: `break`,
67 `continue`, `return` (panics are handled separately). Whenever an
68 early exit occurs, the method `exit_scope` is called. It is given the
69 current point in execution where the early exit occurs, as well as the
70 scope you want to branch to (note that all early exits from to some
71 other enclosing scope). `exit_scope` will record this exit point and
72 also add all drops.
73
74 Panics are handled in a similar fashion, except that a panic always
75 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
76 `panic(p)` with the current point `p`. Or else you can call
77 `diverge_cleanup`, which will produce a block that you can branch to
78 which does the appropriate cleanup and then diverges. `panic(p)`
79 simply calls `diverge_cleanup()` and adds an edge from `p` to the
80 result.
81
82 ### Loop scopes
83
84 In addition to the normal scope stack, we track a loop scope stack
85 that contains only loops. It tracks where a `break` and `continue`
86 should go to.
87
88 */
89
90 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
91 use hair::LintLevel;
92 use rustc::middle::region;
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::hir;
95 use rustc::hir::def_id::LOCAL_CRATE;
96 use rustc::mir::*;
97 use syntax_pos::{Span};
98 use rustc_data_structures::indexed_vec::Idx;
99 use rustc_data_structures::fx::FxHashMap;
100
101 #[derive(Debug)]
102 pub struct Scope<'tcx> {
103 /// The visibility scope this scope was created in.
104 visibility_scope: VisibilityScope,
105
106 /// the region span of this scope within source code.
107 region_scope: region::Scope,
108
109 /// the span of that region_scope
110 region_scope_span: Span,
111
112 /// Whether there's anything to do for the cleanup path, that is,
113 /// when unwinding through this scope. This includes destructors,
114 /// but not StorageDead statements, which don't get emitted at all
115 /// for unwinding, for several reasons:
116 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
117 /// * LLVM's memory dependency analysis can't handle it atm
118 /// * polluting the cleanup MIR with StorageDead creates
119 /// landing pads even though there's no actual destructors
120 /// * freeing up stack space has no effect during unwinding
121 needs_cleanup: bool,
122
123 /// set of places to drop when exiting this scope. This starts
124 /// out empty but grows as variables are declared during the
125 /// building process. This is a stack, so we always drop from the
126 /// end of the vector (top of the stack) first.
127 drops: Vec<DropData<'tcx>>,
128
129 /// The cache for drop chain on “normal” exit into a particular BasicBlock.
130 cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>,
131
132 /// The cache for drop chain on "generator drop" exit.
133 cached_generator_drop: Option<BasicBlock>,
134
135 /// The cache for drop chain on "unwind" exit.
136 cached_unwind: CachedBlock,
137 }
138
139 #[derive(Debug)]
140 struct DropData<'tcx> {
141 /// span where drop obligation was incurred (typically where place was declared)
142 span: Span,
143
144 /// place to drop
145 location: Place<'tcx>,
146
147 /// Whether this is a full value Drop, or just a StorageDead.
148 kind: DropKind
149 }
150
151 #[derive(Debug, Default, Clone, Copy)]
152 struct CachedBlock {
153 /// The cached block for the cleanups-on-diverge path. This block
154 /// contains code to run the current drop and all the preceding
155 /// drops (i.e. those having lower index in Drop’s Scope drop
156 /// array)
157 unwind: Option<BasicBlock>,
158
159 /// The cached block for unwinds during cleanups-on-generator-drop path
160 ///
161 /// This is split from the standard unwind path here to prevent drop
162 /// elaboration from creating drop flags that would have to be captured
163 /// by the generator. I'm not sure how important this optimization is,
164 /// but it is here.
165 generator_drop: Option<BasicBlock>,
166 }
167
168 #[derive(Debug)]
169 enum DropKind {
170 Value {
171 cached_block: CachedBlock,
172 },
173 Storage
174 }
175
176 #[derive(Clone, Debug)]
177 pub struct BreakableScope<'tcx> {
178 /// Region scope of the loop
179 pub region_scope: region::Scope,
180 /// Where the body of the loop begins. `None` if block
181 pub continue_block: Option<BasicBlock>,
182 /// Block to branch into when the loop or block terminates (either by being `break`-en out
183 /// from, or by having its condition to become false)
184 pub break_block: BasicBlock,
185 /// The destination of the loop/block expression itself (i.e. where to put the result of a
186 /// `break` expression)
187 pub break_destination: Place<'tcx>,
188 }
189
190 impl CachedBlock {
191 fn invalidate(&mut self) {
192 self.generator_drop = None;
193 self.unwind = None;
194 }
195
196 fn get(&self, generator_drop: bool) -> Option<BasicBlock> {
197 if generator_drop {
198 self.generator_drop
199 } else {
200 self.unwind
201 }
202 }
203
204 fn ref_mut(&mut self, generator_drop: bool) -> &mut Option<BasicBlock> {
205 if generator_drop {
206 &mut self.generator_drop
207 } else {
208 &mut self.unwind
209 }
210 }
211 }
212
213 impl DropKind {
214 fn may_panic(&self) -> bool {
215 match *self {
216 DropKind::Value { .. } => true,
217 DropKind::Storage => false
218 }
219 }
220 }
221
222 impl<'tcx> Scope<'tcx> {
223 /// Invalidate all the cached blocks in the scope.
224 ///
225 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
226 /// larger extent of code.
227 ///
228 /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
229 /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
230 /// top-of-scope (as opposed to dependent scopes).
231 fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
232 // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
233 // with lots of `try!`?
234
235 // cached exits drop storage and refer to the top-of-scope
236 self.cached_exits.clear();
237
238 if !storage_only {
239 // the current generator drop and unwind ignore
240 // storage but refer to top-of-scope
241 self.cached_generator_drop = None;
242 self.cached_unwind.invalidate();
243 }
244
245 if !storage_only && !this_scope_only {
246 for dropdata in &mut self.drops {
247 if let DropKind::Value { ref mut cached_block } = dropdata.kind {
248 cached_block.invalidate();
249 }
250 }
251 }
252 }
253
254 /// Given a span and this scope's visibility scope, make a SourceInfo.
255 fn source_info(&self, span: Span) -> SourceInfo {
256 SourceInfo {
257 span,
258 scope: self.visibility_scope
259 }
260 }
261 }
262
263 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
264 // Adding and removing scopes
265 // ==========================
266 /// Start a breakable scope, which tracks where `continue` and `break`
267 /// should branch to. See module comment for more details.
268 ///
269 /// Returns the might_break attribute of the BreakableScope used.
270 pub fn in_breakable_scope<F, R>(&mut self,
271 loop_block: Option<BasicBlock>,
272 break_block: BasicBlock,
273 break_destination: Place<'tcx>,
274 f: F) -> R
275 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> R
276 {
277 let region_scope = self.topmost_scope();
278 let scope = BreakableScope {
279 region_scope,
280 continue_block: loop_block,
281 break_block,
282 break_destination,
283 };
284 self.breakable_scopes.push(scope);
285 let res = f(self);
286 let breakable_scope = self.breakable_scopes.pop().unwrap();
287 assert!(breakable_scope.region_scope == region_scope);
288 res
289 }
290
291 pub fn in_opt_scope<F, R>(&mut self,
292 opt_scope: Option<(region::Scope, SourceInfo)>,
293 mut block: BasicBlock,
294 f: F)
295 -> BlockAnd<R>
296 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
297 {
298 debug!("in_opt_scope(opt_scope={:?}, block={:?})", opt_scope, block);
299 if let Some(region_scope) = opt_scope { self.push_scope(region_scope); }
300 let rv = unpack!(block = f(self));
301 if let Some(region_scope) = opt_scope {
302 unpack!(block = self.pop_scope(region_scope, block));
303 }
304 debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
305 block.and(rv)
306 }
307
308 /// Convenience wrapper that pushes a scope and then executes `f`
309 /// to build its contents, popping the scope afterwards.
310 pub fn in_scope<F, R>(&mut self,
311 region_scope: (region::Scope, SourceInfo),
312 lint_level: LintLevel,
313 mut block: BasicBlock,
314 f: F)
315 -> BlockAnd<R>
316 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
317 {
318 debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block);
319 let visibility_scope = self.visibility_scope;
320 let tcx = self.hir.tcx();
321 if let LintLevel::Explicit(node_id) = lint_level {
322 let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
323 let sets = tcx.lint_levels(LOCAL_CRATE);
324 let parent_hir_id =
325 tcx.hir.definitions().node_to_hir_id(
326 self.visibility_scope_info[visibility_scope].lint_root
327 );
328 let current_hir_id =
329 tcx.hir.definitions().node_to_hir_id(node_id);
330 sets.lint_level_set(parent_hir_id) ==
331 sets.lint_level_set(current_hir_id)
332 });
333
334 if !same_lint_scopes {
335 self.visibility_scope =
336 self.new_visibility_scope(region_scope.1.span, lint_level,
337 None);
338 }
339 }
340 self.push_scope(region_scope);
341 let rv = unpack!(block = f(self));
342 unpack!(block = self.pop_scope(region_scope, block));
343 self.visibility_scope = visibility_scope;
344 debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
345 block.and(rv)
346 }
347
348 /// Push a scope onto the stack. You can then build code in this
349 /// scope and call `pop_scope` afterwards. Note that these two
350 /// calls must be paired; using `in_scope` as a convenience
351 /// wrapper maybe preferable.
352 pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
353 debug!("push_scope({:?})", region_scope);
354 let vis_scope = self.visibility_scope;
355 self.scopes.push(Scope {
356 visibility_scope: vis_scope,
357 region_scope: region_scope.0,
358 region_scope_span: region_scope.1.span,
359 needs_cleanup: false,
360 drops: vec![],
361 cached_generator_drop: None,
362 cached_exits: FxHashMap(),
363 cached_unwind: CachedBlock::default(),
364 });
365 }
366
367 /// Pops a scope, which should have region scope `region_scope`,
368 /// adding any drops onto the end of `block` that are needed.
369 /// This must match 1-to-1 with `push_scope`.
370 pub fn pop_scope(&mut self,
371 region_scope: (region::Scope, SourceInfo),
372 mut block: BasicBlock)
373 -> BlockAnd<()> {
374 debug!("pop_scope({:?}, {:?})", region_scope, block);
375 // If we are emitting a `drop` statement, we need to have the cached
376 // diverge cleanup pads ready in case that drop panics.
377 let may_panic =
378 self.scopes.last().unwrap().drops.iter().any(|s| s.kind.may_panic());
379 if may_panic {
380 self.diverge_cleanup();
381 }
382 let scope = self.scopes.pop().unwrap();
383 assert_eq!(scope.region_scope, region_scope.0);
384
385 self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
386 let resume_block = self.resume_block();
387 unpack!(block = build_scope_drops(&mut self.cfg,
388 resume_block,
389 &scope,
390 &self.scopes,
391 block,
392 self.arg_count,
393 false));
394
395 block.unit()
396 }
397
398
399 /// Branch out of `block` to `target`, exiting all scopes up to
400 /// and including `region_scope`. This will insert whatever drops are
401 /// needed, as well as tracking this exit for the SEME region. See
402 /// module comment for details.
403 pub fn exit_scope(&mut self,
404 span: Span,
405 region_scope: (region::Scope, SourceInfo),
406 mut block: BasicBlock,
407 target: BasicBlock) {
408 debug!("exit_scope(region_scope={:?}, block={:?}, target={:?})",
409 region_scope, block, target);
410 let scope_count = 1 + self.scopes.iter().rev()
411 .position(|scope| scope.region_scope == region_scope.0)
412 .unwrap_or_else(|| {
413 span_bug!(span, "region_scope {:?} does not enclose", region_scope)
414 });
415 let len = self.scopes.len();
416 assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
417
418 // If we are emitting a `drop` statement, we need to have the cached
419 // diverge cleanup pads ready in case that drop panics.
420 let may_panic = self.scopes[(len - scope_count)..].iter()
421 .any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
422 if may_panic {
423 self.diverge_cleanup();
424 }
425
426 {
427 let resume_block = self.resume_block();
428 let mut rest = &mut self.scopes[(len - scope_count)..];
429 while let Some((scope, rest_)) = {rest}.split_last_mut() {
430 rest = rest_;
431 block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) {
432 self.cfg.terminate(block, scope.source_info(span),
433 TerminatorKind::Goto { target: e });
434 return;
435 } else {
436 let b = self.cfg.start_new_block();
437 self.cfg.terminate(block, scope.source_info(span),
438 TerminatorKind::Goto { target: b });
439 scope.cached_exits.insert((target, region_scope.0), b);
440 b
441 };
442
443 // End all regions for scopes out of which we are breaking.
444 self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
445
446 unpack!(block = build_scope_drops(&mut self.cfg,
447 resume_block,
448 scope,
449 rest,
450 block,
451 self.arg_count,
452 false));
453 }
454 }
455 let scope = &self.scopes[len - scope_count];
456 self.cfg.terminate(block, scope.source_info(span),
457 TerminatorKind::Goto { target: target });
458 }
459
460 /// Creates a path that performs all required cleanup for dropping a generator.
461 ///
462 /// This path terminates in GeneratorDrop. Returns the start of the path.
463 /// None indicates there’s no cleanup to do at this point.
464 pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
465 if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
466 return None;
467 }
468
469 // Fill in the cache
470 self.diverge_cleanup_gen(true);
471
472 let src_info = self.scopes[0].source_info(self.fn_span);
473 let mut block = self.cfg.start_new_block();
474 let result = block;
475 let resume_block = self.resume_block();
476 let mut rest = &mut self.scopes[..];
477
478 while let Some((scope, rest_)) = {rest}.split_last_mut() {
479 rest = rest_;
480 if !scope.needs_cleanup {
481 continue;
482 }
483 block = if let Some(b) = scope.cached_generator_drop {
484 self.cfg.terminate(block, src_info,
485 TerminatorKind::Goto { target: b });
486 return Some(result);
487 } else {
488 let b = self.cfg.start_new_block();
489 scope.cached_generator_drop = Some(b);
490 self.cfg.terminate(block, src_info,
491 TerminatorKind::Goto { target: b });
492 b
493 };
494
495 // End all regions for scopes out of which we are breaking.
496 self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
497
498 unpack!(block = build_scope_drops(&mut self.cfg,
499 resume_block,
500 scope,
501 rest,
502 block,
503 self.arg_count,
504 true));
505 }
506
507 self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
508
509 Some(result)
510 }
511
512 /// Creates a new visibility scope, nested in the current one.
513 pub fn new_visibility_scope(&mut self,
514 span: Span,
515 lint_level: LintLevel,
516 safety: Option<Safety>) -> VisibilityScope {
517 let parent = self.visibility_scope;
518 debug!("new_visibility_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
519 span, lint_level, safety,
520 parent, self.visibility_scope_info.get(parent));
521 let scope = self.visibility_scopes.push(VisibilityScopeData {
522 span,
523 parent_scope: Some(parent),
524 });
525 let scope_info = VisibilityScopeInfo {
526 lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
527 lint_root
528 } else {
529 self.visibility_scope_info[parent].lint_root
530 },
531 safety: safety.unwrap_or_else(|| {
532 self.visibility_scope_info[parent].safety
533 })
534 };
535 self.visibility_scope_info.push(scope_info);
536 scope
537 }
538
539 // Finding scopes
540 // ==============
541 /// Finds the breakable scope for a given label. This is used for
542 /// resolving `break` and `continue`.
543 pub fn find_breakable_scope(&mut self,
544 span: Span,
545 label: region::Scope)
546 -> &mut BreakableScope<'tcx> {
547 // find the loop-scope with the correct id
548 self.breakable_scopes.iter_mut()
549 .rev()
550 .filter(|breakable_scope| breakable_scope.region_scope == label)
551 .next()
552 .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
553 }
554
555 /// Given a span and the current visibility scope, make a SourceInfo.
556 pub fn source_info(&self, span: Span) -> SourceInfo {
557 SourceInfo {
558 span,
559 scope: self.visibility_scope
560 }
561 }
562
563 /// Returns the `region::Scope` of the scope which should be exited by a
564 /// return.
565 pub fn region_scope_of_return_scope(&self) -> region::Scope {
566 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
567 // We want `scopes[1]`, which is the `ParameterScope`.
568 assert!(self.scopes.len() >= 2);
569 assert!(match self.scopes[1].region_scope.data() {
570 region::ScopeData::Arguments(_) => true,
571 _ => false,
572 });
573 self.scopes[1].region_scope
574 }
575
576 /// Returns the topmost active scope, which is known to be alive until
577 /// the next scope expression.
578 pub fn topmost_scope(&self) -> region::Scope {
579 self.scopes.last().expect("topmost_scope: no scopes present").region_scope
580 }
581
582 /// Returns the scope that we should use as the lifetime of an
583 /// operand. Basically, an operand must live until it is consumed.
584 /// This is similar to, but not quite the same as, the temporary
585 /// scope (which can be larger or smaller).
586 ///
587 /// Consider:
588 ///
589 /// let x = foo(bar(X, Y));
590 ///
591 /// We wish to pop the storage for X and Y after `bar()` is
592 /// called, not after the whole `let` is completed.
593 ///
594 /// As another example, if the second argument diverges:
595 ///
596 /// foo(Box::new(2), panic!())
597 ///
598 /// We would allocate the box but then free it on the unwinding
599 /// path; we would also emit a free on the 'success' path from
600 /// panic, but that will turn out to be removed as dead-code.
601 ///
602 /// When building statics/constants, returns `None` since
603 /// intermediate values do not have to be dropped in that case.
604 pub fn local_scope(&self) -> Option<region::Scope> {
605 match self.hir.body_owner_kind {
606 hir::BodyOwnerKind::Const |
607 hir::BodyOwnerKind::Static(_) =>
608 // No need to free storage in this context.
609 None,
610 hir::BodyOwnerKind::Fn =>
611 Some(self.topmost_scope()),
612 }
613 }
614
615 // Schedule an abort block - this is used for some ABIs that cannot unwind
616 pub fn schedule_abort(&mut self) -> BasicBlock {
617 self.scopes[0].needs_cleanup = true;
618 let abortblk = self.cfg.start_new_cleanup_block();
619 let source_info = self.scopes[0].source_info(self.fn_span);
620 self.cfg.terminate(abortblk, source_info, TerminatorKind::Abort);
621 self.cached_resume_block = Some(abortblk);
622 abortblk
623 }
624
625 // Scheduling drops
626 // ================
627 /// Indicates that `place` should be dropped on exit from
628 /// `region_scope`.
629 pub fn schedule_drop(&mut self,
630 span: Span,
631 region_scope: region::Scope,
632 place: &Place<'tcx>,
633 place_ty: Ty<'tcx>) {
634 let needs_drop = self.hir.needs_drop(place_ty);
635 let drop_kind = if needs_drop {
636 DropKind::Value { cached_block: CachedBlock::default() }
637 } else {
638 // Only temps and vars need their storage dead.
639 match *place {
640 Place::Local(index) if index.index() > self.arg_count => DropKind::Storage,
641 _ => return
642 }
643 };
644
645 for scope in self.scopes.iter_mut().rev() {
646 let this_scope = scope.region_scope == region_scope;
647 // When building drops, we try to cache chains of drops in such a way so these drops
648 // could be reused by the drops which would branch into the cached (already built)
649 // blocks. This, however, means that whenever we add a drop into a scope which already
650 // had some blocks built (and thus, cached) for it, we must invalidate all caches which
651 // might branch into the scope which had a drop just added to it. This is necessary,
652 // because otherwise some other code might use the cache to branch into already built
653 // chain of drops, essentially ignoring the newly added drop.
654 //
655 // For example consider there’s two scopes with a drop in each. These are built and
656 // thus the caches are filled:
657 //
658 // +--------------------------------------------------------+
659 // | +---------------------------------+ |
660 // | | +--------+ +-------------+ | +---------------+ |
661 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
662 // | | +--------+ +-------------+ | +---------------+ |
663 // | +------------|outer_scope cache|--+ |
664 // +------------------------------|middle_scope cache|------+
665 //
666 // Now, a new, inner-most scope is added along with a new drop into both inner-most and
667 // outer-most scopes:
668 //
669 // +------------------------------------------------------------+
670 // | +----------------------------------+ |
671 // | | +--------+ +-------------+ | +---------------+ | +-------------+
672 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
673 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
674 // | | +-+ +-------------+ | |
675 // | +---|invalid outer_scope cache|----+ |
676 // +----=----------------|invalid middle_scope cache|-----------+
677 //
678 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
679 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
680 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
681 // wrong results.
682 //
683 // The cache and its invalidation for unwind branch is somewhat special. The cache is
684 // per-drop, rather than per scope, which has a several different implications. Adding
685 // a new drop into a scope will not invalidate cached blocks of the prior drops in the
686 // scope. That is true, because none of the already existing drops will have an edge
687 // into a block with the newly added drop.
688 //
689 // Note that this code iterates scopes from the inner-most to the outer-most,
690 // invalidating caches of each scope visited. This way bare minimum of the
691 // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
692 // cache of outer scpoe stays intact.
693 scope.invalidate_cache(!needs_drop, this_scope);
694 if this_scope {
695 if let DropKind::Value { .. } = drop_kind {
696 scope.needs_cleanup = true;
697 }
698
699 let region_scope_span = region_scope.span(self.hir.tcx(),
700 &self.hir.region_scope_tree);
701 // Attribute scope exit drops to scope's closing brace.
702 let scope_end = self.hir.tcx().sess.codemap().end_point(region_scope_span);
703
704 scope.drops.push(DropData {
705 span: scope_end,
706 location: place.clone(),
707 kind: drop_kind
708 });
709 return;
710 }
711 }
712 span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, place);
713 }
714
715 // Other
716 // =====
717 /// Creates a path that performs all required cleanup for unwinding.
718 ///
719 /// This path terminates in Resume. Returns the start of the path.
720 /// See module comment for more details. None indicates there’s no
721 /// cleanup to do at this point.
722 pub fn diverge_cleanup(&mut self) -> BasicBlock {
723 self.diverge_cleanup_gen(false)
724 }
725
726 fn resume_block(&mut self) -> BasicBlock {
727 if let Some(target) = self.cached_resume_block {
728 target
729 } else {
730 let resumeblk = self.cfg.start_new_cleanup_block();
731 self.cfg.terminate(resumeblk,
732 SourceInfo {
733 scope: ARGUMENT_VISIBILITY_SCOPE,
734 span: self.fn_span
735 },
736 TerminatorKind::Resume);
737 self.cached_resume_block = Some(resumeblk);
738 resumeblk
739 }
740 }
741
742 fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
743 // To start, create the resume terminator.
744 let mut target = self.resume_block();
745
746 let Builder { ref mut cfg, ref mut scopes, .. } = *self;
747
748 // Build up the drops in **reverse** order. The end result will
749 // look like:
750 //
751 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
752 //
753 // However, we build this in **reverse order**. That is, we
754 // process scopes[0], then scopes[1], etc, pointing each one at
755 // the result generates from the one before. Along the way, we
756 // store caches. If everything is cached, we'll just walk right
757 // to left reading the cached results but never created anything.
758
759 if scopes.iter().any(|scope| scope.needs_cleanup) {
760 for scope in scopes.iter_mut() {
761 target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
762 scope, target, generator_drop);
763 }
764 }
765
766 target
767 }
768
769 /// Utility function for *non*-scope code to build their own drops
770 pub fn build_drop(&mut self,
771 block: BasicBlock,
772 span: Span,
773 location: Place<'tcx>,
774 ty: Ty<'tcx>) -> BlockAnd<()> {
775 if !self.hir.needs_drop(ty) {
776 return block.unit();
777 }
778 let source_info = self.source_info(span);
779 let next_target = self.cfg.start_new_block();
780 let diverge_target = self.diverge_cleanup();
781 self.cfg.terminate(block, source_info,
782 TerminatorKind::Drop {
783 location,
784 target: next_target,
785 unwind: Some(diverge_target),
786 });
787 next_target.unit()
788 }
789
790 /// Utility function for *non*-scope code to build their own drops
791 pub fn build_drop_and_replace(&mut self,
792 block: BasicBlock,
793 span: Span,
794 location: Place<'tcx>,
795 value: Operand<'tcx>) -> BlockAnd<()> {
796 let source_info = self.source_info(span);
797 let next_target = self.cfg.start_new_block();
798 let diverge_target = self.diverge_cleanup();
799 self.cfg.terminate(block, source_info,
800 TerminatorKind::DropAndReplace {
801 location,
802 value,
803 target: next_target,
804 unwind: Some(diverge_target),
805 });
806 next_target.unit()
807 }
808
809 /// Create an Assert terminator and return the success block.
810 /// If the boolean condition operand is not the expected value,
811 /// a runtime panic will be caused with the given message.
812 pub fn assert(&mut self, block: BasicBlock,
813 cond: Operand<'tcx>,
814 expected: bool,
815 msg: AssertMessage<'tcx>,
816 span: Span)
817 -> BasicBlock {
818 let source_info = self.source_info(span);
819
820 let success_block = self.cfg.start_new_block();
821 let cleanup = self.diverge_cleanup();
822
823 self.cfg.terminate(block, source_info,
824 TerminatorKind::Assert {
825 cond,
826 expected,
827 msg,
828 target: success_block,
829 cleanup: Some(cleanup),
830 });
831
832 success_block
833 }
834 }
835
836 /// Builds drops for pop_scope and exit_scope.
837 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
838 resume_block: BasicBlock,
839 scope: &Scope<'tcx>,
840 earlier_scopes: &[Scope<'tcx>],
841 mut block: BasicBlock,
842 arg_count: usize,
843 generator_drop: bool)
844 -> BlockAnd<()> {
845 debug!("build_scope_drops({:?} -> {:?})", block, scope);
846 let mut iter = scope.drops.iter().rev();
847 while let Some(drop_data) = iter.next() {
848 let source_info = scope.source_info(drop_data.span);
849 match drop_data.kind {
850 DropKind::Value { .. } => {
851 // Try to find the next block with its cached block for us to
852 // diverge into, either a previous block in this current scope or
853 // the top of the previous scope.
854 //
855 // If it wasn't for EndRegion, we could just chain all the DropData
856 // together and pick the first DropKind::Value. Please do that
857 // when we replace EndRegion with NLL.
858 let on_diverge = iter.clone().filter_map(|dd| {
859 match dd.kind {
860 DropKind::Value { cached_block } => Some(cached_block),
861 DropKind::Storage => None
862 }
863 }).next().or_else(|| {
864 if earlier_scopes.iter().any(|scope| scope.needs_cleanup) {
865 // If *any* scope requires cleanup code to be run,
866 // we must use the cached unwind from the *topmost*
867 // scope, to ensure all EndRegions from surrounding
868 // scopes are executed before the drop code runs.
869 Some(earlier_scopes.last().unwrap().cached_unwind)
870 } else {
871 // We don't need any further cleanup, so return None
872 // to avoid creating a landing pad. We can skip
873 // EndRegions because all local regions end anyway
874 // when the function unwinds.
875 //
876 // This is an important optimization because LLVM is
877 // terrible at optimizing landing pads. FIXME: I think
878 // it would be cleaner and better to do this optimization
879 // in SimplifyCfg instead of here.
880 None
881 }
882 });
883
884 let on_diverge = on_diverge.map(|cached_block| {
885 cached_block.get(generator_drop).unwrap_or_else(|| {
886 span_bug!(drop_data.span, "cached block not present?")
887 })
888 });
889
890 let next = cfg.start_new_block();
891 cfg.terminate(block, source_info, TerminatorKind::Drop {
892 location: drop_data.location.clone(),
893 target: next,
894 unwind: Some(on_diverge.unwrap_or(resume_block))
895 });
896 block = next;
897 }
898 DropKind::Storage => {}
899 }
900
901 // We do not need to emit StorageDead for generator drops
902 if generator_drop {
903 continue
904 }
905
906 // Drop the storage for both value and storage drops.
907 // Only temps and vars need their storage dead.
908 match drop_data.location {
909 Place::Local(index) if index.index() > arg_count => {
910 cfg.push(block, Statement {
911 source_info,
912 kind: StatementKind::StorageDead(index)
913 });
914 }
915 _ => continue
916 }
917 }
918 block.unit()
919 }
920
921 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
922 cfg: &mut CFG<'tcx>,
923 span: Span,
924 scope: &mut Scope<'tcx>,
925 mut target: BasicBlock,
926 generator_drop: bool)
927 -> BasicBlock
928 {
929 // Build up the drops in **reverse** order. The end result will
930 // look like:
931 //
932 // [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
933 // | |
934 // +---------------------------------------------------------+
935 // code for scope
936 //
937 // The code in this function reads from right to left. At each
938 // point, we check for cached blocks representing the
939 // remainder. If everything is cached, we'll just walk right to
940 // left reading the cached results but never create anything.
941
942 let visibility_scope = scope.visibility_scope;
943 let source_info = |span| SourceInfo {
944 span,
945 scope: visibility_scope
946 };
947
948 // Next, build up the drops. Here we iterate the vector in
949 // *forward* order, so that we generate drops[0] first (right to
950 // left in diagram above).
951 for (j, drop_data) in scope.drops.iter_mut().enumerate() {
952 debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data);
953 // Only full value drops are emitted in the diverging path,
954 // not StorageDead.
955 //
956 // Note: This may not actually be what we desire (are we
957 // "freeing" stack storage as we unwind, or merely observing a
958 // frozen stack)? In particular, the intent may have been to
959 // match the behavior of clang, but on inspection eddyb says
960 // this is not what clang does.
961 let cached_block = match drop_data.kind {
962 DropKind::Value { ref mut cached_block } => cached_block.ref_mut(generator_drop),
963 DropKind::Storage => continue
964 };
965 target = if let Some(cached_block) = *cached_block {
966 cached_block
967 } else {
968 let block = cfg.start_new_cleanup_block();
969 cfg.terminate(block, source_info(drop_data.span),
970 TerminatorKind::Drop {
971 location: drop_data.location.clone(),
972 target,
973 unwind: None
974 });
975 *cached_block = Some(block);
976 block
977 };
978 }
979
980 // Finally, push the EndRegion block, used by mir-borrowck, and set
981 // `cached_unwind` to point to it (Block becomes trivial goto after
982 // pass that removes all EndRegions).
983 target = {
984 let cached_block = scope.cached_unwind.ref_mut(generator_drop);
985 if let Some(cached_block) = *cached_block {
986 cached_block
987 } else {
988 let block = cfg.start_new_cleanup_block();
989 cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
990 cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
991 *cached_block = Some(block);
992 block
993 }
994 };
995
996 debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
997
998 target
999 }