]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/build/scope.rs
New upstream version 1.13.0+dfsg1
[rustc.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
15 `CodeExtent`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `CodeExtent` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, three should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```rust,ignore
51 loop {
52 let x = ...;
53 if cond { break; }
54 let y = ...;
55 }
56 ```
57
58 When processing the `let x`, we will add one drop to the scope for
59 `x`. The break will then insert a drop for `x`. When we process `let
60 y`, we will add another drop (in fact, to a subscope, but let's ignore
61 that for now); any later drops would also drop `y`.
62
63 ### Early exit
64
65 There are numerous "normal" ways to early exit a scope: `break`,
66 `continue`, `return` (panics are handled separately). Whenever an
67 early exit occurs, the method `exit_scope` is called. It is given the
68 current point in execution where the early exit occurs, as well as the
69 scope you want to branch to (note that all early exits from to some
70 other enclosing scope). `exit_scope` will record thid exit point and
71 also add all drops.
72
73 Panics are handled in a similar fashion, except that a panic always
74 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
75 `panic(p)` with the current point `p`. Or else you can call
76 `diverge_cleanup`, which will produce a block that you can branch to
77 which does the appropriate cleanup and then diverges. `panic(p)`
78 simply calls `diverge_cleanup()` and adds an edge from `p` to the
79 result.
80
81 ### Loop scopes
82
83 In addition to the normal scope stack, we track a loop scope stack
84 that contains only loops. It tracks where a `break` and `continue`
85 should go to.
86
87 */
88
89 use build::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary, ScopeId};
90 use rustc::middle::region::{CodeExtent, CodeExtentData};
91 use rustc::middle::lang_items;
92 use rustc::ty::subst::{Kind, Substs, Subst};
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::mir::repr::*;
95 use syntax_pos::Span;
96 use rustc_data_structures::indexed_vec::Idx;
97 use rustc_data_structures::fnv::FnvHashMap;
98
99 use std::iter;
100
101 pub struct Scope<'tcx> {
102 /// the scope-id within the scope_auxiliary
103 id: ScopeId,
104
105 /// The visibility scope this scope was created in.
106 visibility_scope: VisibilityScope,
107
108 /// the extent of this scope within source code; also stored in
109 /// `ScopeAuxiliary`, but kept here for convenience
110 extent: CodeExtent,
111
112 /// Whether there's anything to do for the cleanup path, that is,
113 /// when unwinding through this scope. This includes destructors,
114 /// but not StorageDead statements, which don't get emitted at all
115 /// for unwinding, for several reasons:
116 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
117 /// * LLVM's memory dependency analysis can't handle it atm
118 /// * pollutting the cleanup MIR with StorageDead creates
119 /// landing pads even though there's no actual destructors
120 /// * freeing up stack space has no effect during unwinding
121 needs_cleanup: bool,
122
123 /// set of lvalues to drop when exiting this scope. This starts
124 /// out empty but grows as variables are declared during the
125 /// building process. This is a stack, so we always drop from the
126 /// end of the vector (top of the stack) first.
127 drops: Vec<DropData<'tcx>>,
128
129 /// A scope may only have one associated free, because:
130 ///
131 /// 1. We require a `free` to only be scheduled in the scope of
132 /// `EXPR` in `box EXPR`;
133 /// 2. It only makes sense to have it translated into the diverge-path.
134 ///
135 /// This kind of drop will be run *after* all the regular drops
136 /// scheduled onto this scope, because drops may have dependencies
137 /// on the allocated memory.
138 ///
139 /// This is expected to go away once `box EXPR` becomes a sugar
140 /// for placement protocol and gets desugared in some earlier
141 /// stage.
142 free: Option<FreeData<'tcx>>,
143
144 /// The cache for drop chain on “normal” exit into a particular BasicBlock.
145 cached_exits: FnvHashMap<(BasicBlock, CodeExtent), BasicBlock>,
146 }
147
148 struct DropData<'tcx> {
149 /// span where drop obligation was incurred (typically where lvalue was declared)
150 span: Span,
151
152 /// lvalue to drop
153 location: Lvalue<'tcx>,
154
155 /// Whether this is a full value Drop, or just a StorageDead.
156 kind: DropKind
157 }
158
159 enum DropKind {
160 Value {
161 /// The cached block for the cleanups-on-diverge path. This block
162 /// contains code to run the current drop and all the preceding
163 /// drops (i.e. those having lower index in Drop’s Scope drop
164 /// array)
165 cached_block: Option<BasicBlock>
166 },
167 Storage
168 }
169
170 struct FreeData<'tcx> {
171 /// span where free obligation was incurred
172 span: Span,
173
174 /// Lvalue containing the allocated box.
175 value: Lvalue<'tcx>,
176
177 /// type of item for which the box was allocated for (i.e. the T in Box<T>).
178 item_ty: Ty<'tcx>,
179
180 /// The cached block containing code to run the free. The block will also execute all the drops
181 /// in the scope.
182 cached_block: Option<BasicBlock>
183 }
184
185 #[derive(Clone, Debug)]
186 pub struct LoopScope {
187 /// Extent of the loop
188 pub extent: CodeExtent,
189 /// Where the body of the loop begins
190 pub continue_block: BasicBlock,
191 /// Block to branch into when the loop terminates (either by being `break`-en out from, or by
192 /// having its condition to become false)
193 pub break_block: BasicBlock,
194 /// Indicates the reachability of the break_block for this loop
195 pub might_break: bool
196 }
197
198 impl<'tcx> Scope<'tcx> {
199 /// Invalidate all the cached blocks in the scope.
200 ///
201 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
202 /// larger extent of code.
203 ///
204 /// `unwind` controls whether caches for the unwind branch are also invalidated.
205 fn invalidate_cache(&mut self, unwind: bool) {
206 self.cached_exits.clear();
207 if !unwind { return; }
208 for dropdata in &mut self.drops {
209 if let DropKind::Value { ref mut cached_block } = dropdata.kind {
210 *cached_block = None;
211 }
212 }
213 if let Some(ref mut freedata) = self.free {
214 freedata.cached_block = None;
215 }
216 }
217
218 /// Returns the cached entrypoint for diverging exit from this scope.
219 ///
220 /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
221 /// this method to work correctly.
222 fn cached_block(&self) -> Option<BasicBlock> {
223 let mut drops = self.drops.iter().rev().filter_map(|data| {
224 match data.kind {
225 DropKind::Value { cached_block } => Some(cached_block),
226 DropKind::Storage => None
227 }
228 });
229 if let Some(cached_block) = drops.next() {
230 Some(cached_block.expect("drop cache is not filled"))
231 } else if let Some(ref data) = self.free {
232 Some(data.cached_block.expect("free cache is not filled"))
233 } else {
234 None
235 }
236 }
237
238 /// Given a span and this scope's visibility scope, make a SourceInfo.
239 fn source_info(&self, span: Span) -> SourceInfo {
240 SourceInfo {
241 span: span,
242 scope: self.visibility_scope
243 }
244 }
245 }
246
247 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
248 // Adding and removing scopes
249 // ==========================
250 /// Start a loop scope, which tracks where `continue` and `break`
251 /// should branch to. See module comment for more details.
252 ///
253 /// Returns the might_break attribute of the LoopScope used.
254 pub fn in_loop_scope<F>(&mut self,
255 loop_block: BasicBlock,
256 break_block: BasicBlock,
257 f: F)
258 -> bool
259 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>)
260 {
261 let extent = self.extent_of_innermost_scope();
262 let loop_scope = LoopScope {
263 extent: extent.clone(),
264 continue_block: loop_block,
265 break_block: break_block,
266 might_break: false
267 };
268 self.loop_scopes.push(loop_scope);
269 f(self);
270 let loop_scope = self.loop_scopes.pop().unwrap();
271 assert!(loop_scope.extent == extent);
272 loop_scope.might_break
273 }
274
275 /// Convenience wrapper that pushes a scope and then executes `f`
276 /// to build its contents, popping the scope afterwards.
277 pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R>
278 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
279 {
280 debug!("in_scope(extent={:?}, block={:?})", extent, block);
281 self.push_scope(extent, block);
282 let rv = unpack!(block = f(self));
283 unpack!(block = self.pop_scope(extent, block));
284 debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
285 block.and(rv)
286 }
287
288 /// Push a scope onto the stack. You can then build code in this
289 /// scope and call `pop_scope` afterwards. Note that these two
290 /// calls must be paired; using `in_scope` as a convenience
291 /// wrapper maybe preferable.
292 pub fn push_scope(&mut self, extent: CodeExtent, entry: BasicBlock) {
293 debug!("push_scope({:?})", extent);
294 let id = ScopeId::new(self.scope_auxiliary.len());
295 let vis_scope = self.visibility_scope;
296 self.scopes.push(Scope {
297 id: id,
298 visibility_scope: vis_scope,
299 extent: extent,
300 needs_cleanup: false,
301 drops: vec![],
302 free: None,
303 cached_exits: FnvHashMap()
304 });
305 self.scope_auxiliary.push(ScopeAuxiliary {
306 extent: extent,
307 dom: self.cfg.current_location(entry),
308 postdoms: vec![]
309 });
310 }
311
312 /// Pops a scope, which should have extent `extent`, adding any
313 /// drops onto the end of `block` that are needed. This must
314 /// match 1-to-1 with `push_scope`.
315 pub fn pop_scope(&mut self,
316 extent: CodeExtent,
317 mut block: BasicBlock)
318 -> BlockAnd<()> {
319 debug!("pop_scope({:?}, {:?})", extent, block);
320 // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
321 // to make sure all the `cached_block`s are filled in.
322 self.diverge_cleanup();
323 let scope = self.scopes.pop().unwrap();
324 assert_eq!(scope.extent, extent);
325 unpack!(block = build_scope_drops(&mut self.cfg, &scope, &self.scopes, block));
326 self.scope_auxiliary[scope.id]
327 .postdoms
328 .push(self.cfg.current_location(block));
329 block.unit()
330 }
331
332
333 /// Branch out of `block` to `target`, exiting all scopes up to
334 /// and including `extent`. This will insert whatever drops are
335 /// needed, as well as tracking this exit for the SEME region. See
336 /// module comment for details.
337 pub fn exit_scope(&mut self,
338 span: Span,
339 extent: CodeExtent,
340 mut block: BasicBlock,
341 target: BasicBlock) {
342 debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target);
343 let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent)
344 .unwrap_or_else(||{
345 span_bug!(span, "extent {:?} does not enclose", extent)
346 });
347 let len = self.scopes.len();
348 assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
349 let tmp = self.get_unit_temp();
350 {
351 let mut rest = &mut self.scopes[(len - scope_count)..];
352 while let Some((scope, rest_)) = {rest}.split_last_mut() {
353 rest = rest_;
354 block = if let Some(&e) = scope.cached_exits.get(&(target, extent)) {
355 self.cfg.terminate(block, scope.source_info(span),
356 TerminatorKind::Goto { target: e });
357 return;
358 } else {
359 let b = self.cfg.start_new_block();
360 self.cfg.terminate(block, scope.source_info(span),
361 TerminatorKind::Goto { target: b });
362 scope.cached_exits.insert((target, extent), b);
363 b
364 };
365 unpack!(block = build_scope_drops(&mut self.cfg, scope, rest, block));
366 if let Some(ref free_data) = scope.free {
367 let next = self.cfg.start_new_block();
368 let free = build_free(self.hir.tcx(), &tmp, free_data, next);
369 self.cfg.terminate(block, scope.source_info(span), free);
370 block = next;
371 }
372 self.scope_auxiliary[scope.id]
373 .postdoms
374 .push(self.cfg.current_location(block));
375 }
376 }
377 let scope = &self.scopes[len - scope_count];
378 self.cfg.terminate(block, scope.source_info(span),
379 TerminatorKind::Goto { target: target });
380 }
381
382 /// Creates a new visibility scope, nested in the current one.
383 pub fn new_visibility_scope(&mut self, span: Span) -> VisibilityScope {
384 let parent = self.visibility_scope;
385 let scope = VisibilityScope::new(self.visibility_scopes.len());
386 self.visibility_scopes.push(VisibilityScopeData {
387 span: span,
388 parent_scope: Some(parent),
389 });
390 scope
391 }
392
393 // Finding scopes
394 // ==============
395 /// Finds the loop scope for a given label. This is used for
396 /// resolving `break` and `continue`.
397 pub fn find_loop_scope(&mut self,
398 span: Span,
399 label: Option<CodeExtent>)
400 -> &mut LoopScope {
401 let loop_scopes = &mut self.loop_scopes;
402 match label {
403 None => {
404 // no label? return the innermost loop scope
405 loop_scopes.iter_mut().rev().next()
406 }
407 Some(label) => {
408 // otherwise, find the loop-scope with the correct id
409 loop_scopes.iter_mut()
410 .rev()
411 .filter(|loop_scope| loop_scope.extent == label)
412 .next()
413 }
414 }.unwrap_or_else(|| span_bug!(span, "no enclosing loop scope found?"))
415 }
416
417 /// Given a span and the current visibility scope, make a SourceInfo.
418 pub fn source_info(&self, span: Span) -> SourceInfo {
419 SourceInfo {
420 span: span,
421 scope: self.visibility_scope
422 }
423 }
424
425 pub fn extent_of_innermost_scope(&self) -> CodeExtent {
426 self.scopes.last().map(|scope| scope.extent).unwrap()
427 }
428
429 /// Returns the extent of the scope which should be exited by a
430 /// return.
431 pub fn extent_of_return_scope(&self) -> CodeExtent {
432 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
433 // We want `scopes[1]`, which is the `ParameterScope`.
434 assert!(self.scopes.len() >= 2);
435 assert!(match self.hir.tcx().region_maps.code_extent_data(self.scopes[1].extent) {
436 CodeExtentData::ParameterScope { .. } => true,
437 _ => false,
438 });
439 self.scopes[1].extent
440 }
441
442 // Scheduling drops
443 // ================
444 /// Indicates that `lvalue` should be dropped on exit from
445 /// `extent`.
446 pub fn schedule_drop(&mut self,
447 span: Span,
448 extent: CodeExtent,
449 lvalue: &Lvalue<'tcx>,
450 lvalue_ty: Ty<'tcx>) {
451 let needs_drop = self.hir.needs_drop(lvalue_ty);
452 let drop_kind = if needs_drop {
453 DropKind::Value { cached_block: None }
454 } else {
455 // Only temps and vars need their storage dead.
456 match *lvalue {
457 Lvalue::Temp(_) | Lvalue::Var(_) => DropKind::Storage,
458 _ => return
459 }
460 };
461
462 for scope in self.scopes.iter_mut().rev() {
463 let this_scope = scope.extent == extent;
464 // When building drops, we try to cache chains of drops in such a way so these drops
465 // could be reused by the drops which would branch into the cached (already built)
466 // blocks. This, however, means that whenever we add a drop into a scope which already
467 // had some blocks built (and thus, cached) for it, we must invalidate all caches which
468 // might branch into the scope which had a drop just added to it. This is necessary,
469 // because otherwise some other code might use the cache to branch into already built
470 // chain of drops, essentially ignoring the newly added drop.
471 //
472 // For example consider there’s two scopes with a drop in each. These are built and
473 // thus the caches are filled:
474 //
475 // +--------------------------------------------------------+
476 // | +---------------------------------+ |
477 // | | +--------+ +-------------+ | +---------------+ |
478 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
479 // | | +--------+ +-------------+ | +---------------+ |
480 // | +------------|outer_scope cache|--+ |
481 // +------------------------------|middle_scope cache|------+
482 //
483 // Now, a new, inner-most scope is added along with a new drop into both inner-most and
484 // outer-most scopes:
485 //
486 // +------------------------------------------------------------+
487 // | +----------------------------------+ |
488 // | | +--------+ +-------------+ | +---------------+ | +-------------+
489 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
490 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
491 // | | +-+ +-------------+ | |
492 // | +---|invalid outer_scope cache|----+ |
493 // +----=----------------|invalid middle_scope cache|-----------+
494 //
495 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
496 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
497 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
498 // wrong results.
499 //
500 // The cache and its invalidation for unwind branch is somewhat special. The cache is
501 // per-drop, rather than per scope, which has a several different implications. Adding
502 // a new drop into a scope will not invalidate cached blocks of the prior drops in the
503 // scope. That is true, because none of the already existing drops will have an edge
504 // into a block with the newly added drop.
505 //
506 // Note that this code iterates scopes from the inner-most to the outer-most,
507 // invalidating caches of each scope visited. This way bare minimum of the
508 // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
509 // cache of outer scpoe stays intact.
510 let invalidate_unwind = needs_drop && !this_scope;
511 scope.invalidate_cache(invalidate_unwind);
512 if this_scope {
513 if let DropKind::Value { .. } = drop_kind {
514 scope.needs_cleanup = true;
515 }
516 scope.drops.push(DropData {
517 span: span,
518 location: lvalue.clone(),
519 kind: drop_kind
520 });
521 return;
522 }
523 }
524 span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
525 }
526
527 /// Schedule dropping of a not-yet-fully-initialised box.
528 ///
529 /// This cleanup will only be translated into unwind branch.
530 /// The extent should be for the `EXPR` inside `box EXPR`.
531 /// There may only be one “free” scheduled in any given scope.
532 pub fn schedule_box_free(&mut self,
533 span: Span,
534 extent: CodeExtent,
535 value: &Lvalue<'tcx>,
536 item_ty: Ty<'tcx>) {
537 for scope in self.scopes.iter_mut().rev() {
538 // See the comment in schedule_drop above. The primary difference is that we invalidate
539 // the unwind blocks unconditionally. That’s because the box free may be considered
540 // outer-most cleanup within the scope.
541 scope.invalidate_cache(true);
542 if scope.extent == extent {
543 assert!(scope.free.is_none(), "scope already has a scheduled free!");
544 scope.needs_cleanup = true;
545 scope.free = Some(FreeData {
546 span: span,
547 value: value.clone(),
548 item_ty: item_ty,
549 cached_block: None
550 });
551 return;
552 }
553 }
554 span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
555 }
556
557 // Other
558 // =====
559 /// Creates a path that performs all required cleanup for unwinding.
560 ///
561 /// This path terminates in Resume. Returns the start of the path.
562 /// See module comment for more details. None indicates there’s no
563 /// cleanup to do at this point.
564 pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
565 if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
566 return None;
567 }
568 assert!(!self.scopes.is_empty()); // or `any` above would be false
569
570 let unit_temp = self.get_unit_temp();
571 let Builder { ref mut hir, ref mut cfg, ref mut scopes,
572 ref mut cached_resume_block, .. } = *self;
573
574 // Build up the drops in **reverse** order. The end result will
575 // look like:
576 //
577 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
578 //
579 // However, we build this in **reverse order**. That is, we
580 // process scopes[0], then scopes[1], etc, pointing each one at
581 // the result generates from the one before. Along the way, we
582 // store caches. If everything is cached, we'll just walk right
583 // to left reading the cached results but never created anything.
584
585 // To start, create the resume terminator.
586 let mut target = if let Some(target) = *cached_resume_block {
587 target
588 } else {
589 let resumeblk = cfg.start_new_cleanup_block();
590 cfg.terminate(resumeblk,
591 scopes[0].source_info(self.fn_span),
592 TerminatorKind::Resume);
593 *cached_resume_block = Some(resumeblk);
594 resumeblk
595 };
596
597 for scope in scopes.iter_mut().filter(|s| s.needs_cleanup) {
598 target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target);
599 }
600 Some(target)
601 }
602
603 /// Utility function for *non*-scope code to build their own drops
604 pub fn build_drop(&mut self,
605 block: BasicBlock,
606 span: Span,
607 location: Lvalue<'tcx>,
608 ty: Ty<'tcx>) -> BlockAnd<()> {
609 if !self.hir.needs_drop(ty) {
610 return block.unit();
611 }
612 let source_info = self.source_info(span);
613 let next_target = self.cfg.start_new_block();
614 let diverge_target = self.diverge_cleanup();
615 self.cfg.terminate(block, source_info,
616 TerminatorKind::Drop {
617 location: location,
618 target: next_target,
619 unwind: diverge_target,
620 });
621 next_target.unit()
622 }
623
624 /// Utility function for *non*-scope code to build their own drops
625 pub fn build_drop_and_replace(&mut self,
626 block: BasicBlock,
627 span: Span,
628 location: Lvalue<'tcx>,
629 value: Operand<'tcx>) -> BlockAnd<()> {
630 let source_info = self.source_info(span);
631 let next_target = self.cfg.start_new_block();
632 let diverge_target = self.diverge_cleanup();
633 self.cfg.terminate(block, source_info,
634 TerminatorKind::DropAndReplace {
635 location: location,
636 value: value,
637 target: next_target,
638 unwind: diverge_target,
639 });
640 next_target.unit()
641 }
642
643 /// Create an Assert terminator and return the success block.
644 /// If the boolean condition operand is not the expected value,
645 /// a runtime panic will be caused with the given message.
646 pub fn assert(&mut self, block: BasicBlock,
647 cond: Operand<'tcx>,
648 expected: bool,
649 msg: AssertMessage<'tcx>,
650 span: Span)
651 -> BasicBlock {
652 let source_info = self.source_info(span);
653
654 let success_block = self.cfg.start_new_block();
655 let cleanup = self.diverge_cleanup();
656
657 self.cfg.terminate(block, source_info,
658 TerminatorKind::Assert {
659 cond: cond,
660 expected: expected,
661 msg: msg,
662 target: success_block,
663 cleanup: cleanup
664 });
665
666 success_block
667 }
668 }
669
670 /// Builds drops for pop_scope and exit_scope.
671 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
672 scope: &Scope<'tcx>,
673 earlier_scopes: &[Scope<'tcx>],
674 mut block: BasicBlock)
675 -> BlockAnd<()> {
676 let mut iter = scope.drops.iter().rev().peekable();
677 while let Some(drop_data) = iter.next() {
678 let source_info = scope.source_info(drop_data.span);
679 if let DropKind::Value { .. } = drop_data.kind {
680 // Try to find the next block with its cached block
681 // for us to diverge into in case the drop panics.
682 let on_diverge = iter.peek().iter().filter_map(|dd| {
683 match dd.kind {
684 DropKind::Value { cached_block } => cached_block,
685 DropKind::Storage => None
686 }
687 }).next();
688 // If there’s no `cached_block`s within current scope,
689 // we must look for one in the enclosing scope.
690 let on_diverge = on_diverge.or_else(||{
691 earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
692 });
693 let next = cfg.start_new_block();
694 cfg.terminate(block, source_info, TerminatorKind::Drop {
695 location: drop_data.location.clone(),
696 target: next,
697 unwind: on_diverge
698 });
699 block = next;
700 }
701 match drop_data.kind {
702 DropKind::Value { .. } |
703 DropKind::Storage => {
704 // Only temps and vars need their storage dead.
705 match drop_data.location {
706 Lvalue::Temp(_) | Lvalue::Var(_) => {}
707 _ => continue
708 }
709
710 cfg.push(block, Statement {
711 source_info: source_info,
712 kind: StatementKind::StorageDead(drop_data.location.clone())
713 });
714 }
715 }
716 }
717 block.unit()
718 }
719
720 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
721 cfg: &mut CFG<'tcx>,
722 unit_temp: &Lvalue<'tcx>,
723 scope: &mut Scope<'tcx>,
724 mut target: BasicBlock)
725 -> BasicBlock
726 {
727 // Build up the drops in **reverse** order. The end result will
728 // look like:
729 //
730 // [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
731 // | |
732 // +------------------------------------+
733 // code for scope
734 //
735 // The code in this function reads from right to left. At each
736 // point, we check for cached blocks representing the
737 // remainder. If everything is cached, we'll just walk right to
738 // left reading the cached results but never created anything.
739
740 let visibility_scope = scope.visibility_scope;
741 let source_info = |span| SourceInfo {
742 span: span,
743 scope: visibility_scope
744 };
745
746 // Next, build up any free.
747 if let Some(ref mut free_data) = scope.free {
748 target = if let Some(cached_block) = free_data.cached_block {
749 cached_block
750 } else {
751 let into = cfg.start_new_cleanup_block();
752 cfg.terminate(into, source_info(free_data.span),
753 build_free(tcx, unit_temp, free_data, target));
754 free_data.cached_block = Some(into);
755 into
756 };
757 }
758
759 // Next, build up the drops. Here we iterate the vector in
760 // *forward* order, so that we generate drops[0] first (right to
761 // left in diagram above).
762 for drop_data in &mut scope.drops {
763 // Only full value drops are emitted in the diverging path,
764 // not StorageDead.
765 let cached_block = match drop_data.kind {
766 DropKind::Value { ref mut cached_block } => cached_block,
767 DropKind::Storage => continue
768 };
769 target = if let Some(cached_block) = *cached_block {
770 cached_block
771 } else {
772 let block = cfg.start_new_cleanup_block();
773 cfg.terminate(block, source_info(drop_data.span),
774 TerminatorKind::Drop {
775 location: drop_data.location.clone(),
776 target: target,
777 unwind: None
778 });
779 *cached_block = Some(block);
780 block
781 };
782 }
783
784 target
785 }
786
787 fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
788 unit_temp: &Lvalue<'tcx>,
789 data: &FreeData<'tcx>,
790 target: BasicBlock)
791 -> TerminatorKind<'tcx> {
792 let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
793 .unwrap_or_else(|e| tcx.sess.fatal(&e));
794 let substs = Substs::new(tcx, iter::once(Kind::from(data.item_ty)));
795 TerminatorKind::Call {
796 func: Operand::Constant(Constant {
797 span: data.span,
798 ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
799 literal: Literal::Item {
800 def_id: free_func,
801 substs: substs
802 }
803 }),
804 args: vec![Operand::Consume(data.value.clone())],
805 destination: Some((unit_temp.clone(), target)),
806 cleanup: None
807 }
808 }