]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/build/scope.rs
New upstream version 1.17.0+dfsg1
[rustc.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
15 `CodeExtent`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `CodeExtent` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, there should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```rust,ignore
51 loop {
52 let x = ...;
53 if cond { break; }
54 let y = ...;
55 }
56 ```
57
58 When processing the `let x`, we will add one drop to the scope for
59 `x`. The break will then insert a drop for `x`. When we process `let
60 y`, we will add another drop (in fact, to a subscope, but let's ignore
61 that for now); any later drops would also drop `y`.
62
63 ### Early exit
64
65 There are numerous "normal" ways to early exit a scope: `break`,
66 `continue`, `return` (panics are handled separately). Whenever an
67 early exit occurs, the method `exit_scope` is called. It is given the
68 current point in execution where the early exit occurs, as well as the
69 scope you want to branch to (note that all early exits from to some
70 other enclosing scope). `exit_scope` will record this exit point and
71 also add all drops.
72
73 Panics are handled in a similar fashion, except that a panic always
74 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
75 `panic(p)` with the current point `p`. Or else you can call
76 `diverge_cleanup`, which will produce a block that you can branch to
77 which does the appropriate cleanup and then diverges. `panic(p)`
78 simply calls `diverge_cleanup()` and adds an edge from `p` to the
79 result.
80
81 ### Loop scopes
82
83 In addition to the normal scope stack, we track a loop scope stack
84 that contains only loops. It tracks where a `break` and `continue`
85 should go to.
86
87 */
88
89 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
90 use rustc::middle::region::{CodeExtent, CodeExtentData};
91 use rustc::middle::lang_items;
92 use rustc::ty::subst::{Kind, Subst};
93 use rustc::ty::{Ty, TyCtxt};
94 use rustc::mir::*;
95 use syntax_pos::Span;
96 use rustc_data_structures::indexed_vec::Idx;
97 use rustc_data_structures::fx::FxHashMap;
98
99 pub struct Scope<'tcx> {
100 /// The visibility scope this scope was created in.
101 visibility_scope: VisibilityScope,
102
103 /// the extent of this scope within source code.
104 extent: CodeExtent,
105
106 /// Whether there's anything to do for the cleanup path, that is,
107 /// when unwinding through this scope. This includes destructors,
108 /// but not StorageDead statements, which don't get emitted at all
109 /// for unwinding, for several reasons:
110 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
111 /// * LLVM's memory dependency analysis can't handle it atm
112 /// * pollutting the cleanup MIR with StorageDead creates
113 /// landing pads even though there's no actual destructors
114 /// * freeing up stack space has no effect during unwinding
115 needs_cleanup: bool,
116
117 /// set of lvalues to drop when exiting this scope. This starts
118 /// out empty but grows as variables are declared during the
119 /// building process. This is a stack, so we always drop from the
120 /// end of the vector (top of the stack) first.
121 drops: Vec<DropData<'tcx>>,
122
123 /// A scope may only have one associated free, because:
124 ///
125 /// 1. We require a `free` to only be scheduled in the scope of
126 /// `EXPR` in `box EXPR`;
127 /// 2. It only makes sense to have it translated into the diverge-path.
128 ///
129 /// This kind of drop will be run *after* all the regular drops
130 /// scheduled onto this scope, because drops may have dependencies
131 /// on the allocated memory.
132 ///
133 /// This is expected to go away once `box EXPR` becomes a sugar
134 /// for placement protocol and gets desugared in some earlier
135 /// stage.
136 free: Option<FreeData<'tcx>>,
137
138 /// The cache for drop chain on “normal” exit into a particular BasicBlock.
139 cached_exits: FxHashMap<(BasicBlock, CodeExtent), BasicBlock>,
140 }
141
142 struct DropData<'tcx> {
143 /// span where drop obligation was incurred (typically where lvalue was declared)
144 span: Span,
145
146 /// lvalue to drop
147 location: Lvalue<'tcx>,
148
149 /// Whether this is a full value Drop, or just a StorageDead.
150 kind: DropKind
151 }
152
153 enum DropKind {
154 Value {
155 /// The cached block for the cleanups-on-diverge path. This block
156 /// contains code to run the current drop and all the preceding
157 /// drops (i.e. those having lower index in Drop’s Scope drop
158 /// array)
159 cached_block: Option<BasicBlock>
160 },
161 Storage
162 }
163
164 struct FreeData<'tcx> {
165 /// span where free obligation was incurred
166 span: Span,
167
168 /// Lvalue containing the allocated box.
169 value: Lvalue<'tcx>,
170
171 /// type of item for which the box was allocated for (i.e. the T in Box<T>).
172 item_ty: Ty<'tcx>,
173
174 /// The cached block containing code to run the free. The block will also execute all the drops
175 /// in the scope.
176 cached_block: Option<BasicBlock>
177 }
178
179 #[derive(Clone, Debug)]
180 pub struct LoopScope<'tcx> {
181 /// Extent of the loop
182 pub extent: CodeExtent,
183 /// Where the body of the loop begins
184 pub continue_block: BasicBlock,
185 /// Block to branch into when the loop terminates (either by being `break`-en out from, or by
186 /// having its condition to become false)
187 pub break_block: BasicBlock,
188 /// The destination of the loop expression itself (i.e. where to put the result of a `break`
189 /// expression)
190 pub break_destination: Lvalue<'tcx>,
191 }
192
193 impl<'tcx> Scope<'tcx> {
194 /// Invalidate all the cached blocks in the scope.
195 ///
196 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
197 /// larger extent of code.
198 ///
199 /// `unwind` controls whether caches for the unwind branch are also invalidated.
200 fn invalidate_cache(&mut self, unwind: bool) {
201 self.cached_exits.clear();
202 if !unwind { return; }
203 for dropdata in &mut self.drops {
204 if let DropKind::Value { ref mut cached_block } = dropdata.kind {
205 *cached_block = None;
206 }
207 }
208 if let Some(ref mut freedata) = self.free {
209 freedata.cached_block = None;
210 }
211 }
212
213 /// Returns the cached entrypoint for diverging exit from this scope.
214 ///
215 /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
216 /// this method to work correctly.
217 fn cached_block(&self) -> Option<BasicBlock> {
218 let mut drops = self.drops.iter().rev().filter_map(|data| {
219 match data.kind {
220 DropKind::Value { cached_block } => Some(cached_block),
221 DropKind::Storage => None
222 }
223 });
224 if let Some(cached_block) = drops.next() {
225 Some(cached_block.expect("drop cache is not filled"))
226 } else if let Some(ref data) = self.free {
227 Some(data.cached_block.expect("free cache is not filled"))
228 } else {
229 None
230 }
231 }
232
233 /// Given a span and this scope's visibility scope, make a SourceInfo.
234 fn source_info(&self, span: Span) -> SourceInfo {
235 SourceInfo {
236 span: span,
237 scope: self.visibility_scope
238 }
239 }
240 }
241
242 impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
243 // Adding and removing scopes
244 // ==========================
245 /// Start a loop scope, which tracks where `continue` and `break`
246 /// should branch to. See module comment for more details.
247 ///
248 /// Returns the might_break attribute of the LoopScope used.
249 pub fn in_loop_scope<F>(&mut self,
250 loop_block: BasicBlock,
251 break_block: BasicBlock,
252 break_destination: Lvalue<'tcx>,
253 f: F)
254 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>)
255 {
256 let extent = self.topmost_scope();
257 let loop_scope = LoopScope {
258 extent: extent,
259 continue_block: loop_block,
260 break_block: break_block,
261 break_destination: break_destination,
262 };
263 self.loop_scopes.push(loop_scope);
264 f(self);
265 let loop_scope = self.loop_scopes.pop().unwrap();
266 assert!(loop_scope.extent == extent);
267 }
268
269 /// Convenience wrapper that pushes a scope and then executes `f`
270 /// to build its contents, popping the scope afterwards.
271 pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R>
272 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd<R>
273 {
274 debug!("in_scope(extent={:?}, block={:?})", extent, block);
275 self.push_scope(extent);
276 let rv = unpack!(block = f(self));
277 unpack!(block = self.pop_scope(extent, block));
278 debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
279 block.and(rv)
280 }
281
282 /// Push a scope onto the stack. You can then build code in this
283 /// scope and call `pop_scope` afterwards. Note that these two
284 /// calls must be paired; using `in_scope` as a convenience
285 /// wrapper maybe preferable.
286 pub fn push_scope(&mut self, extent: CodeExtent) {
287 debug!("push_scope({:?})", extent);
288 let vis_scope = self.visibility_scope;
289 self.scopes.push(Scope {
290 visibility_scope: vis_scope,
291 extent: extent,
292 needs_cleanup: false,
293 drops: vec![],
294 free: None,
295 cached_exits: FxHashMap()
296 });
297 }
298
299 /// Pops a scope, which should have extent `extent`, adding any
300 /// drops onto the end of `block` that are needed. This must
301 /// match 1-to-1 with `push_scope`.
302 pub fn pop_scope(&mut self,
303 extent: CodeExtent,
304 mut block: BasicBlock)
305 -> BlockAnd<()> {
306 debug!("pop_scope({:?}, {:?})", extent, block);
307 // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
308 // to make sure all the `cached_block`s are filled in.
309 self.diverge_cleanup();
310 let scope = self.scopes.pop().unwrap();
311 assert_eq!(scope.extent, extent);
312 unpack!(block = build_scope_drops(&mut self.cfg,
313 &scope,
314 &self.scopes,
315 block,
316 self.arg_count));
317 block.unit()
318 }
319
320
321 /// Branch out of `block` to `target`, exiting all scopes up to
322 /// and including `extent`. This will insert whatever drops are
323 /// needed, as well as tracking this exit for the SEME region. See
324 /// module comment for details.
325 pub fn exit_scope(&mut self,
326 span: Span,
327 extent: CodeExtent,
328 mut block: BasicBlock,
329 target: BasicBlock) {
330 debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target);
331 let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent)
332 .unwrap_or_else(||{
333 span_bug!(span, "extent {:?} does not enclose", extent)
334 });
335 let len = self.scopes.len();
336 assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
337 let tmp = self.get_unit_temp();
338 {
339 let mut rest = &mut self.scopes[(len - scope_count)..];
340 while let Some((scope, rest_)) = {rest}.split_last_mut() {
341 rest = rest_;
342 block = if let Some(&e) = scope.cached_exits.get(&(target, extent)) {
343 self.cfg.terminate(block, scope.source_info(span),
344 TerminatorKind::Goto { target: e });
345 return;
346 } else {
347 let b = self.cfg.start_new_block();
348 self.cfg.terminate(block, scope.source_info(span),
349 TerminatorKind::Goto { target: b });
350 scope.cached_exits.insert((target, extent), b);
351 b
352 };
353 unpack!(block = build_scope_drops(&mut self.cfg,
354 scope,
355 rest,
356 block,
357 self.arg_count));
358 if let Some(ref free_data) = scope.free {
359 let next = self.cfg.start_new_block();
360 let free = build_free(self.hir.tcx(), &tmp, free_data, next);
361 self.cfg.terminate(block, scope.source_info(span), free);
362 block = next;
363 }
364 }
365 }
366 let scope = &self.scopes[len - scope_count];
367 self.cfg.terminate(block, scope.source_info(span),
368 TerminatorKind::Goto { target: target });
369 }
370
371 /// Creates a new visibility scope, nested in the current one.
372 pub fn new_visibility_scope(&mut self, span: Span) -> VisibilityScope {
373 let parent = self.visibility_scope;
374 let scope = VisibilityScope::new(self.visibility_scopes.len());
375 self.visibility_scopes.push(VisibilityScopeData {
376 span: span,
377 parent_scope: Some(parent),
378 });
379 scope
380 }
381
382 // Finding scopes
383 // ==============
384 /// Finds the loop scope for a given label. This is used for
385 /// resolving `break` and `continue`.
386 pub fn find_loop_scope(&mut self,
387 span: Span,
388 label: CodeExtent)
389 -> &mut LoopScope<'tcx> {
390 // find the loop-scope with the correct id
391 self.loop_scopes.iter_mut()
392 .rev()
393 .filter(|loop_scope| loop_scope.extent == label)
394 .next()
395 .unwrap_or_else(|| span_bug!(span, "no enclosing loop scope found?"))
396 }
397
398 /// Given a span and the current visibility scope, make a SourceInfo.
399 pub fn source_info(&self, span: Span) -> SourceInfo {
400 SourceInfo {
401 span: span,
402 scope: self.visibility_scope
403 }
404 }
405
406 /// Returns the extent of the scope which should be exited by a
407 /// return.
408 pub fn extent_of_return_scope(&self) -> CodeExtent {
409 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
410 // We want `scopes[1]`, which is the `ParameterScope`.
411 assert!(self.scopes.len() >= 2);
412 assert!(match self.hir.tcx().region_maps.code_extent_data(self.scopes[1].extent) {
413 CodeExtentData::ParameterScope { .. } => true,
414 _ => false,
415 });
416 self.scopes[1].extent
417 }
418
419 /// Returns the topmost active scope, which is known to be alive until
420 /// the next scope expression.
421 pub fn topmost_scope(&self) -> CodeExtent {
422 self.scopes.last().expect("topmost_scope: no scopes present").extent
423 }
424
425 // Scheduling drops
426 // ================
427 /// Indicates that `lvalue` should be dropped on exit from
428 /// `extent`.
429 pub fn schedule_drop(&mut self,
430 span: Span,
431 extent: CodeExtent,
432 lvalue: &Lvalue<'tcx>,
433 lvalue_ty: Ty<'tcx>) {
434 let needs_drop = self.hir.needs_drop(lvalue_ty);
435 let drop_kind = if needs_drop {
436 DropKind::Value { cached_block: None }
437 } else {
438 // Only temps and vars need their storage dead.
439 match *lvalue {
440 Lvalue::Local(index) if index.index() > self.arg_count => DropKind::Storage,
441 _ => return
442 }
443 };
444
445 for scope in self.scopes.iter_mut().rev() {
446 let this_scope = scope.extent == extent;
447 // When building drops, we try to cache chains of drops in such a way so these drops
448 // could be reused by the drops which would branch into the cached (already built)
449 // blocks. This, however, means that whenever we add a drop into a scope which already
450 // had some blocks built (and thus, cached) for it, we must invalidate all caches which
451 // might branch into the scope which had a drop just added to it. This is necessary,
452 // because otherwise some other code might use the cache to branch into already built
453 // chain of drops, essentially ignoring the newly added drop.
454 //
455 // For example consider there’s two scopes with a drop in each. These are built and
456 // thus the caches are filled:
457 //
458 // +--------------------------------------------------------+
459 // | +---------------------------------+ |
460 // | | +--------+ +-------------+ | +---------------+ |
461 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
462 // | | +--------+ +-------------+ | +---------------+ |
463 // | +------------|outer_scope cache|--+ |
464 // +------------------------------|middle_scope cache|------+
465 //
466 // Now, a new, inner-most scope is added along with a new drop into both inner-most and
467 // outer-most scopes:
468 //
469 // +------------------------------------------------------------+
470 // | +----------------------------------+ |
471 // | | +--------+ +-------------+ | +---------------+ | +-------------+
472 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
473 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
474 // | | +-+ +-------------+ | |
475 // | +---|invalid outer_scope cache|----+ |
476 // +----=----------------|invalid middle_scope cache|-----------+
477 //
478 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
479 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
480 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
481 // wrong results.
482 //
483 // The cache and its invalidation for unwind branch is somewhat special. The cache is
484 // per-drop, rather than per scope, which has a several different implications. Adding
485 // a new drop into a scope will not invalidate cached blocks of the prior drops in the
486 // scope. That is true, because none of the already existing drops will have an edge
487 // into a block with the newly added drop.
488 //
489 // Note that this code iterates scopes from the inner-most to the outer-most,
490 // invalidating caches of each scope visited. This way bare minimum of the
491 // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
492 // cache of outer scpoe stays intact.
493 let invalidate_unwind = needs_drop && !this_scope;
494 scope.invalidate_cache(invalidate_unwind);
495 if this_scope {
496 if let DropKind::Value { .. } = drop_kind {
497 scope.needs_cleanup = true;
498 }
499 let tcx = self.hir.tcx();
500 let extent_span = extent.span(&tcx.region_maps, &tcx.hir).unwrap();
501 // Attribute scope exit drops to scope's closing brace
502 let scope_end = Span { lo: extent_span.hi, .. extent_span};
503 scope.drops.push(DropData {
504 span: scope_end,
505 location: lvalue.clone(),
506 kind: drop_kind
507 });
508 return;
509 }
510 }
511 span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
512 }
513
514 /// Schedule dropping of a not-yet-fully-initialised box.
515 ///
516 /// This cleanup will only be translated into unwind branch.
517 /// The extent should be for the `EXPR` inside `box EXPR`.
518 /// There may only be one “free” scheduled in any given scope.
519 pub fn schedule_box_free(&mut self,
520 span: Span,
521 extent: CodeExtent,
522 value: &Lvalue<'tcx>,
523 item_ty: Ty<'tcx>) {
524 for scope in self.scopes.iter_mut().rev() {
525 // See the comment in schedule_drop above. The primary difference is that we invalidate
526 // the unwind blocks unconditionally. That’s because the box free may be considered
527 // outer-most cleanup within the scope.
528 scope.invalidate_cache(true);
529 if scope.extent == extent {
530 assert!(scope.free.is_none(), "scope already has a scheduled free!");
531 scope.needs_cleanup = true;
532 scope.free = Some(FreeData {
533 span: span,
534 value: value.clone(),
535 item_ty: item_ty,
536 cached_block: None
537 });
538 return;
539 }
540 }
541 span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
542 }
543
544 // Other
545 // =====
546 /// Creates a path that performs all required cleanup for unwinding.
547 ///
548 /// This path terminates in Resume. Returns the start of the path.
549 /// See module comment for more details. None indicates there’s no
550 /// cleanup to do at this point.
551 pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
552 if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
553 return None;
554 }
555 assert!(!self.scopes.is_empty()); // or `any` above would be false
556
557 let unit_temp = self.get_unit_temp();
558 let Builder { ref mut hir, ref mut cfg, ref mut scopes,
559 ref mut cached_resume_block, .. } = *self;
560
561 // Build up the drops in **reverse** order. The end result will
562 // look like:
563 //
564 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
565 //
566 // However, we build this in **reverse order**. That is, we
567 // process scopes[0], then scopes[1], etc, pointing each one at
568 // the result generates from the one before. Along the way, we
569 // store caches. If everything is cached, we'll just walk right
570 // to left reading the cached results but never created anything.
571
572 // To start, create the resume terminator.
573 let mut target = if let Some(target) = *cached_resume_block {
574 target
575 } else {
576 let resumeblk = cfg.start_new_cleanup_block();
577 cfg.terminate(resumeblk,
578 scopes[0].source_info(self.fn_span),
579 TerminatorKind::Resume);
580 *cached_resume_block = Some(resumeblk);
581 resumeblk
582 };
583
584 for scope in scopes.iter_mut().filter(|s| s.needs_cleanup) {
585 target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target);
586 }
587 Some(target)
588 }
589
590 /// Utility function for *non*-scope code to build their own drops
591 pub fn build_drop(&mut self,
592 block: BasicBlock,
593 span: Span,
594 location: Lvalue<'tcx>,
595 ty: Ty<'tcx>) -> BlockAnd<()> {
596 if !self.hir.needs_drop(ty) {
597 return block.unit();
598 }
599 let source_info = self.source_info(span);
600 let next_target = self.cfg.start_new_block();
601 let diverge_target = self.diverge_cleanup();
602 self.cfg.terminate(block, source_info,
603 TerminatorKind::Drop {
604 location: location,
605 target: next_target,
606 unwind: diverge_target,
607 });
608 next_target.unit()
609 }
610
611 /// Utility function for *non*-scope code to build their own drops
612 pub fn build_drop_and_replace(&mut self,
613 block: BasicBlock,
614 span: Span,
615 location: Lvalue<'tcx>,
616 value: Operand<'tcx>) -> BlockAnd<()> {
617 let source_info = self.source_info(span);
618 let next_target = self.cfg.start_new_block();
619 let diverge_target = self.diverge_cleanup();
620 self.cfg.terminate(block, source_info,
621 TerminatorKind::DropAndReplace {
622 location: location,
623 value: value,
624 target: next_target,
625 unwind: diverge_target,
626 });
627 next_target.unit()
628 }
629
630 /// Create an Assert terminator and return the success block.
631 /// If the boolean condition operand is not the expected value,
632 /// a runtime panic will be caused with the given message.
633 pub fn assert(&mut self, block: BasicBlock,
634 cond: Operand<'tcx>,
635 expected: bool,
636 msg: AssertMessage<'tcx>,
637 span: Span)
638 -> BasicBlock {
639 let source_info = self.source_info(span);
640
641 let success_block = self.cfg.start_new_block();
642 let cleanup = self.diverge_cleanup();
643
644 self.cfg.terminate(block, source_info,
645 TerminatorKind::Assert {
646 cond: cond,
647 expected: expected,
648 msg: msg,
649 target: success_block,
650 cleanup: cleanup
651 });
652
653 success_block
654 }
655 }
656
657 /// Builds drops for pop_scope and exit_scope.
658 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
659 scope: &Scope<'tcx>,
660 earlier_scopes: &[Scope<'tcx>],
661 mut block: BasicBlock,
662 arg_count: usize)
663 -> BlockAnd<()> {
664 let mut iter = scope.drops.iter().rev().peekable();
665 while let Some(drop_data) = iter.next() {
666 let source_info = scope.source_info(drop_data.span);
667 if let DropKind::Value { .. } = drop_data.kind {
668 // Try to find the next block with its cached block
669 // for us to diverge into in case the drop panics.
670 let on_diverge = iter.peek().iter().filter_map(|dd| {
671 match dd.kind {
672 DropKind::Value { cached_block } => cached_block,
673 DropKind::Storage => None
674 }
675 }).next();
676 // If there’s no `cached_block`s within current scope,
677 // we must look for one in the enclosing scope.
678 let on_diverge = on_diverge.or_else(||{
679 earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
680 });
681 let next = cfg.start_new_block();
682 cfg.terminate(block, source_info, TerminatorKind::Drop {
683 location: drop_data.location.clone(),
684 target: next,
685 unwind: on_diverge
686 });
687 block = next;
688 }
689 match drop_data.kind {
690 DropKind::Value { .. } |
691 DropKind::Storage => {
692 // Only temps and vars need their storage dead.
693 match drop_data.location {
694 Lvalue::Local(index) if index.index() > arg_count => {}
695 _ => continue
696 }
697
698 cfg.push(block, Statement {
699 source_info: source_info,
700 kind: StatementKind::StorageDead(drop_data.location.clone())
701 });
702 }
703 }
704 }
705 block.unit()
706 }
707
708 fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
709 cfg: &mut CFG<'tcx>,
710 unit_temp: &Lvalue<'tcx>,
711 scope: &mut Scope<'tcx>,
712 mut target: BasicBlock)
713 -> BasicBlock
714 {
715 // Build up the drops in **reverse** order. The end result will
716 // look like:
717 //
718 // [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
719 // | |
720 // +------------------------------------+
721 // code for scope
722 //
723 // The code in this function reads from right to left. At each
724 // point, we check for cached blocks representing the
725 // remainder. If everything is cached, we'll just walk right to
726 // left reading the cached results but never created anything.
727
728 let visibility_scope = scope.visibility_scope;
729 let source_info = |span| SourceInfo {
730 span: span,
731 scope: visibility_scope
732 };
733
734 // Next, build up any free.
735 if let Some(ref mut free_data) = scope.free {
736 target = if let Some(cached_block) = free_data.cached_block {
737 cached_block
738 } else {
739 let into = cfg.start_new_cleanup_block();
740 cfg.terminate(into, source_info(free_data.span),
741 build_free(tcx, unit_temp, free_data, target));
742 free_data.cached_block = Some(into);
743 into
744 };
745 }
746
747 // Next, build up the drops. Here we iterate the vector in
748 // *forward* order, so that we generate drops[0] first (right to
749 // left in diagram above).
750 for drop_data in &mut scope.drops {
751 // Only full value drops are emitted in the diverging path,
752 // not StorageDead.
753 let cached_block = match drop_data.kind {
754 DropKind::Value { ref mut cached_block } => cached_block,
755 DropKind::Storage => continue
756 };
757 target = if let Some(cached_block) = *cached_block {
758 cached_block
759 } else {
760 let block = cfg.start_new_cleanup_block();
761 cfg.terminate(block, source_info(drop_data.span),
762 TerminatorKind::Drop {
763 location: drop_data.location.clone(),
764 target: target,
765 unwind: None
766 });
767 *cached_block = Some(block);
768 block
769 };
770 }
771
772 target
773 }
774
775 fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
776 unit_temp: &Lvalue<'tcx>,
777 data: &FreeData<'tcx>,
778 target: BasicBlock)
779 -> TerminatorKind<'tcx> {
780 let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem);
781 let substs = tcx.intern_substs(&[Kind::from(data.item_ty)]);
782 TerminatorKind::Call {
783 func: Operand::Constant(Constant {
784 span: data.span,
785 ty: tcx.item_type(free_func).subst(tcx, substs),
786 literal: Literal::Item {
787 def_id: free_func,
788 substs: substs
789 }
790 }),
791 args: vec![Operand::Consume(data.value.clone())],
792 destination: Some((unit_temp.clone(), target)),
793 cleanup: None
794 }
795 }