1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
26 For now, we keep a mapping from each `CodeExtent` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, three should
30 be no need to remember this mapping.
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
58 When processing the `let x`, we will add one drop to the scope for
59 `x`. The break will then insert a drop for `x`. When we process `let
60 y`, we will add another drop (in fact, to a subscope, but let's ignore
61 that for now); any later drops would also drop `y`.
65 There are numerous "normal" ways to early exit a scope: `break`,
66 `continue`, `return` (panics are handled separately). Whenever an
67 early exit occurs, the method `exit_scope` is called. It is given the
68 current point in execution where the early exit occurs, as well as the
69 scope you want to branch to (note that all early exits from to some
70 other enclosing scope). `exit_scope` will record thid exit point and
73 Panics are handled in a similar fashion, except that a panic always
74 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
75 `panic(p)` with the current point `p`. Or else you can call
76 `diverge_cleanup`, which will produce a block that you can branch to
77 which does the appropriate cleanup and then diverges. `panic(p)`
78 simply calls `diverge_cleanup()` and adds an edge from `p` to the
83 In addition to the normal scope stack, we track a loop scope stack
84 that contains only loops. It tracks where a `break` and `continue`
89 use build
::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary, ScopeId}
;
90 use rustc
::middle
::region
::{CodeExtent, CodeExtentData}
;
91 use rustc
::middle
::lang_items
;
92 use rustc
::ty
::subst
::{Substs, Subst, VecPerParamSpace}
;
93 use rustc
::ty
::{Ty, TyCtxt}
;
94 use rustc
::mir
::repr
::*;
96 use rustc_data_structures
::indexed_vec
::Idx
;
97 use rustc_data_structures
::fnv
::FnvHashMap
;
99 pub struct Scope
<'tcx
> {
100 /// the scope-id within the scope_auxiliary
103 /// The visibility scope this scope was created in.
104 visibility_scope
: VisibilityScope
,
106 /// the extent of this scope within source code; also stored in
107 /// `ScopeAuxiliary`, but kept here for convenience
110 /// Whether there's anything to do for the cleanup path, that is,
111 /// when unwinding through this scope. This includes destructors,
112 /// but not StorageDead statements, which don't get emitted at all
113 /// for unwinding, for several reasons:
114 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
115 /// * LLVM's memory dependency analysis can't handle it atm
116 /// * pollutting the cleanup MIR with StorageDead creates
117 /// landing pads even though there's no actual destructors
118 /// * freeing up stack space has no effect during unwinding
121 /// set of lvalues to drop when exiting this scope. This starts
122 /// out empty but grows as variables are declared during the
123 /// building process. This is a stack, so we always drop from the
124 /// end of the vector (top of the stack) first.
125 drops
: Vec
<DropData
<'tcx
>>,
127 /// A scope may only have one associated free, because:
129 /// 1. We require a `free` to only be scheduled in the scope of
130 /// `EXPR` in `box EXPR`;
131 /// 2. It only makes sense to have it translated into the diverge-path.
133 /// This kind of drop will be run *after* all the regular drops
134 /// scheduled onto this scope, because drops may have dependencies
135 /// on the allocated memory.
137 /// This is expected to go away once `box EXPR` becomes a sugar
138 /// for placement protocol and gets desugared in some earlier
140 free
: Option
<FreeData
<'tcx
>>,
142 /// The cache for drop chain on “normal” exit into a particular BasicBlock.
143 cached_exits
: FnvHashMap
<(BasicBlock
, CodeExtent
), BasicBlock
>,
146 struct DropData
<'tcx
> {
147 /// span where drop obligation was incurred (typically where lvalue was declared)
151 location
: Lvalue
<'tcx
>,
153 /// Whether this is a full value Drop, or just a StorageDead.
159 /// The cached block for the cleanups-on-diverge path. This block
160 /// contains code to run the current drop and all the preceding
161 /// drops (i.e. those having lower index in Drop’s Scope drop
163 cached_block
: Option
<BasicBlock
>
168 struct FreeData
<'tcx
> {
169 /// span where free obligation was incurred
172 /// Lvalue containing the allocated box.
175 /// type of item for which the box was allocated for (i.e. the T in Box<T>).
178 /// The cached block containing code to run the free. The block will also execute all the drops
180 cached_block
: Option
<BasicBlock
>
183 #[derive(Clone, Debug)]
184 pub struct LoopScope
{
185 /// Extent of the loop
186 pub extent
: CodeExtent
,
187 /// Where the body of the loop begins
188 pub continue_block
: BasicBlock
,
189 /// Block to branch into when the loop terminates (either by being `break`-en out from, or by
190 /// having its condition to become false)
191 pub break_block
: BasicBlock
,
192 /// Indicates the reachability of the break_block for this loop
193 pub might_break
: bool
196 impl<'tcx
> Scope
<'tcx
> {
197 /// Invalidate all the cached blocks in the scope.
199 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
200 /// larger extent of code.
202 /// `unwind` controls whether caches for the unwind branch are also invalidated.
203 fn invalidate_cache(&mut self, unwind
: bool
) {
204 self.cached_exits
.clear();
205 if !unwind { return; }
206 for dropdata
in &mut self.drops
{
207 if let DropKind
::Value { ref mut cached_block }
= dropdata
.kind
{
208 *cached_block
= None
;
211 if let Some(ref mut freedata
) = self.free
{
212 freedata
.cached_block
= None
;
216 /// Returns the cached entrypoint for diverging exit from this scope.
218 /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
219 /// this method to work correctly.
220 fn cached_block(&self) -> Option
<BasicBlock
> {
221 let mut drops
= self.drops
.iter().rev().filter_map(|data
| {
223 DropKind
::Value { cached_block }
=> Some(cached_block
),
224 DropKind
::Storage
=> None
227 if let Some(cached_block
) = drops
.next() {
228 Some(cached_block
.expect("drop cache is not filled"))
229 } else if let Some(ref data
) = self.free
{
230 Some(data
.cached_block
.expect("free cache is not filled"))
236 /// Given a span and this scope's visibility scope, make a SourceInfo.
237 fn source_info(&self, span
: Span
) -> SourceInfo
{
240 scope
: self.visibility_scope
245 impl<'a
, 'gcx
, 'tcx
> Builder
<'a
, 'gcx
, 'tcx
> {
246 // Adding and removing scopes
247 // ==========================
248 /// Start a loop scope, which tracks where `continue` and `break`
249 /// should branch to. See module comment for more details.
251 /// Returns the might_break attribute of the LoopScope used.
252 pub fn in_loop_scope
<F
>(&mut self,
253 loop_block
: BasicBlock
,
254 break_block
: BasicBlock
,
257 where F
: FnOnce(&mut Builder
<'a
, 'gcx
, 'tcx
>)
259 let extent
= self.extent_of_innermost_scope();
260 let loop_scope
= LoopScope
{
261 extent
: extent
.clone(),
262 continue_block
: loop_block
,
263 break_block
: break_block
,
266 self.loop_scopes
.push(loop_scope
);
268 let loop_scope
= self.loop_scopes
.pop().unwrap();
269 assert
!(loop_scope
.extent
== extent
);
270 loop_scope
.might_break
273 /// Convenience wrapper that pushes a scope and then executes `f`
274 /// to build its contents, popping the scope afterwards.
275 pub fn in_scope
<F
, R
>(&mut self, extent
: CodeExtent
, mut block
: BasicBlock
, f
: F
) -> BlockAnd
<R
>
276 where F
: FnOnce(&mut Builder
<'a
, 'gcx
, 'tcx
>) -> BlockAnd
<R
>
278 debug
!("in_scope(extent={:?}, block={:?})", extent
, block
);
279 self.push_scope(extent
, block
);
280 let rv
= unpack
!(block
= f(self));
281 unpack
!(block
= self.pop_scope(extent
, block
));
282 debug
!("in_scope: exiting extent={:?} block={:?}", extent
, block
);
286 /// Push a scope onto the stack. You can then build code in this
287 /// scope and call `pop_scope` afterwards. Note that these two
288 /// calls must be paired; using `in_scope` as a convenience
289 /// wrapper maybe preferable.
290 pub fn push_scope(&mut self, extent
: CodeExtent
, entry
: BasicBlock
) {
291 debug
!("push_scope({:?})", extent
);
292 let id
= ScopeId
::new(self.scope_auxiliary
.len());
293 let vis_scope
= self.visibility_scope
;
294 self.scopes
.push(Scope
{
296 visibility_scope
: vis_scope
,
298 needs_cleanup
: false,
301 cached_exits
: FnvHashMap()
303 self.scope_auxiliary
.push(ScopeAuxiliary
{
305 dom
: self.cfg
.current_location(entry
),
310 /// Pops a scope, which should have extent `extent`, adding any
311 /// drops onto the end of `block` that are needed. This must
312 /// match 1-to-1 with `push_scope`.
313 pub fn pop_scope(&mut self,
315 mut block
: BasicBlock
)
317 debug
!("pop_scope({:?}, {:?})", extent
, block
);
318 // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
319 // to make sure all the `cached_block`s are filled in.
320 self.diverge_cleanup();
321 let scope
= self.scopes
.pop().unwrap();
322 assert_eq
!(scope
.extent
, extent
);
323 unpack
!(block
= build_scope_drops(&mut self.cfg
, &scope
, &self.scopes
, block
));
324 self.scope_auxiliary
[scope
.id
]
326 .push(self.cfg
.current_location(block
));
331 /// Branch out of `block` to `target`, exiting all scopes up to
332 /// and including `extent`. This will insert whatever drops are
333 /// needed, as well as tracking this exit for the SEME region. See
334 /// module comment for details.
335 pub fn exit_scope(&mut self,
338 mut block
: BasicBlock
,
339 target
: BasicBlock
) {
340 debug
!("exit_scope(extent={:?}, block={:?}, target={:?})", extent
, block
, target
);
341 let scope_count
= 1 + self.scopes
.iter().rev().position(|scope
| scope
.extent
== extent
)
343 span_bug
!(span
, "extent {:?} does not enclose", extent
)
345 let len
= self.scopes
.len();
346 assert
!(scope_count
< len
, "should not use `exit_scope` to pop ALL scopes");
347 let tmp
= self.get_unit_temp();
349 let mut rest
= &mut self.scopes
[(len
- scope_count
)..];
350 while let Some((scope
, rest_
)) = {rest}
.split_last_mut() {
352 block
= if let Some(&e
) = scope
.cached_exits
.get(&(target
, extent
)) {
353 self.cfg
.terminate(block
, scope
.source_info(span
),
354 TerminatorKind
::Goto { target: e }
);
357 let b
= self.cfg
.start_new_block();
358 self.cfg
.terminate(block
, scope
.source_info(span
),
359 TerminatorKind
::Goto { target: b }
);
360 scope
.cached_exits
.insert((target
, extent
), b
);
363 unpack
!(block
= build_scope_drops(&mut self.cfg
, scope
, rest
, block
));
364 if let Some(ref free_data
) = scope
.free
{
365 let next
= self.cfg
.start_new_block();
366 let free
= build_free(self.hir
.tcx(), &tmp
, free_data
, next
);
367 self.cfg
.terminate(block
, scope
.source_info(span
), free
);
370 self.scope_auxiliary
[scope
.id
]
372 .push(self.cfg
.current_location(block
));
375 let scope
= &self.scopes
[len
- scope_count
];
376 self.cfg
.terminate(block
, scope
.source_info(span
),
377 TerminatorKind
::Goto { target: target }
);
380 /// Creates a new visibility scope, nested in the current one.
381 pub fn new_visibility_scope(&mut self, span
: Span
) -> VisibilityScope
{
382 let parent
= self.visibility_scope
;
383 let scope
= VisibilityScope
::new(self.visibility_scopes
.len());
384 self.visibility_scopes
.push(VisibilityScopeData
{
386 parent_scope
: Some(parent
),
393 /// Finds the loop scope for a given label. This is used for
394 /// resolving `break` and `continue`.
395 pub fn find_loop_scope(&mut self,
397 label
: Option
<CodeExtent
>)
399 let loop_scopes
= &mut self.loop_scopes
;
402 // no label? return the innermost loop scope
403 loop_scopes
.iter_mut().rev().next()
406 // otherwise, find the loop-scope with the correct id
407 loop_scopes
.iter_mut()
409 .filter(|loop_scope
| loop_scope
.extent
== label
)
412 }.unwrap_or_else(|| span_bug
!(span
, "no enclosing loop scope found?"))
415 /// Given a span and the current visibility scope, make a SourceInfo.
416 pub fn source_info(&self, span
: Span
) -> SourceInfo
{
419 scope
: self.visibility_scope
423 pub fn extent_of_innermost_scope(&self) -> CodeExtent
{
424 self.scopes
.last().map(|scope
| scope
.extent
).unwrap()
427 /// Returns the extent of the scope which should be exited by a
429 pub fn extent_of_return_scope(&self) -> CodeExtent
{
430 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
431 // We want `scopes[1]`, which is the `ParameterScope`.
432 assert
!(self.scopes
.len() >= 2);
433 assert
!(match self.hir
.tcx().region_maps
.code_extent_data(self.scopes
[1].extent
) {
434 CodeExtentData
::ParameterScope { .. }
=> true,
437 self.scopes
[1].extent
442 /// Indicates that `lvalue` should be dropped on exit from
444 pub fn schedule_drop(&mut self,
447 lvalue
: &Lvalue
<'tcx
>,
448 lvalue_ty
: Ty
<'tcx
>) {
449 let needs_drop
= self.hir
.needs_drop(lvalue_ty
);
450 let drop_kind
= if needs_drop
{
451 DropKind
::Value { cached_block: None }
453 // Only temps and vars need their storage dead.
455 Lvalue
::Temp(_
) | Lvalue
::Var(_
) => DropKind
::Storage
,
460 for scope
in self.scopes
.iter_mut().rev() {
461 let this_scope
= scope
.extent
== extent
;
462 // When building drops, we try to cache chains of drops in such a way so these drops
463 // could be reused by the drops which would branch into the cached (already built)
464 // blocks. This, however, means that whenever we add a drop into a scope which already
465 // had some blocks built (and thus, cached) for it, we must invalidate all caches which
466 // might branch into the scope which had a drop just added to it. This is necessary,
467 // because otherwise some other code might use the cache to branch into already built
468 // chain of drops, essentially ignoring the newly added drop.
470 // For example consider there’s two scopes with a drop in each. These are built and
471 // thus the caches are filled:
473 // +--------------------------------------------------------+
474 // | +---------------------------------+ |
475 // | | +--------+ +-------------+ | +---------------+ |
476 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
477 // | | +--------+ +-------------+ | +---------------+ |
478 // | +------------|outer_scope cache|--+ |
479 // +------------------------------|middle_scope cache|------+
481 // Now, a new, inner-most scope is added along with a new drop into both inner-most and
482 // outer-most scopes:
484 // +------------------------------------------------------------+
485 // | +----------------------------------+ |
486 // | | +--------+ +-------------+ | +---------------+ | +-------------+
487 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
488 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
489 // | | +-+ +-------------+ | |
490 // | +---|invalid outer_scope cache|----+ |
491 // +----=----------------|invalid middle_scope cache|-----------+
493 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
494 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
495 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
498 // The cache and its invalidation for unwind branch is somewhat special. The cache is
499 // per-drop, rather than per scope, which has a several different implications. Adding
500 // a new drop into a scope will not invalidate cached blocks of the prior drops in the
501 // scope. That is true, because none of the already existing drops will have an edge
502 // into a block with the newly added drop.
504 // Note that this code iterates scopes from the inner-most to the outer-most,
505 // invalidating caches of each scope visited. This way bare minimum of the
506 // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
507 // cache of outer scpoe stays intact.
508 let invalidate_unwind
= needs_drop
&& !this_scope
;
509 scope
.invalidate_cache(invalidate_unwind
);
511 if let DropKind
::Value { .. }
= drop_kind
{
512 scope
.needs_cleanup
= true;
514 scope
.drops
.push(DropData
{
516 location
: lvalue
.clone(),
522 span_bug
!(span
, "extent {:?} not in scope to drop {:?}", extent
, lvalue
);
525 /// Schedule dropping of a not-yet-fully-initialised box.
527 /// This cleanup will only be translated into unwind branch.
528 /// The extent should be for the `EXPR` inside `box EXPR`.
529 /// There may only be one “free” scheduled in any given scope.
530 pub fn schedule_box_free(&mut self,
533 value
: &Lvalue
<'tcx
>,
535 for scope
in self.scopes
.iter_mut().rev() {
536 // See the comment in schedule_drop above. The primary difference is that we invalidate
537 // the unwind blocks unconditionally. That’s because the box free may be considered
538 // outer-most cleanup within the scope.
539 scope
.invalidate_cache(true);
540 if scope
.extent
== extent
{
541 assert
!(scope
.free
.is_none(), "scope already has a scheduled free!");
542 scope
.needs_cleanup
= true;
543 scope
.free
= Some(FreeData
{
545 value
: value
.clone(),
552 span_bug
!(span
, "extent {:?} not in scope to free {:?}", extent
, value
);
557 /// Creates a path that performs all required cleanup for unwinding.
559 /// This path terminates in Resume. Returns the start of the path.
560 /// See module comment for more details. None indicates there’s no
561 /// cleanup to do at this point.
562 pub fn diverge_cleanup(&mut self) -> Option
<BasicBlock
> {
563 if !self.scopes
.iter().any(|scope
| scope
.needs_cleanup
) {
566 assert
!(!self.scopes
.is_empty()); // or `any` above would be false
568 let unit_temp
= self.get_unit_temp();
569 let Builder
{ ref mut hir
, ref mut cfg
, ref mut scopes
,
570 ref mut cached_resume_block
, .. } = *self;
572 // Build up the drops in **reverse** order. The end result will
575 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
577 // However, we build this in **reverse order**. That is, we
578 // process scopes[0], then scopes[1], etc, pointing each one at
579 // the result generates from the one before. Along the way, we
580 // store caches. If everything is cached, we'll just walk right
581 // to left reading the cached results but never created anything.
583 // To start, create the resume terminator.
584 let mut target
= if let Some(target
) = *cached_resume_block
{
587 let resumeblk
= cfg
.start_new_cleanup_block();
588 cfg
.terminate(resumeblk
,
589 scopes
[0].source_info(self.fn_span
),
590 TerminatorKind
::Resume
);
591 *cached_resume_block
= Some(resumeblk
);
595 for scope
in scopes
.iter_mut().filter(|s
| s
.needs_cleanup
) {
596 target
= build_diverge_scope(hir
.tcx(), cfg
, &unit_temp
, scope
, target
);
601 /// Utility function for *non*-scope code to build their own drops
602 pub fn build_drop(&mut self,
605 location
: Lvalue
<'tcx
>,
606 ty
: Ty
<'tcx
>) -> BlockAnd
<()> {
607 if !self.hir
.needs_drop(ty
) {
610 let source_info
= self.source_info(span
);
611 let next_target
= self.cfg
.start_new_block();
612 let diverge_target
= self.diverge_cleanup();
613 self.cfg
.terminate(block
, source_info
,
614 TerminatorKind
::Drop
{
617 unwind
: diverge_target
,
622 /// Utility function for *non*-scope code to build their own drops
623 pub fn build_drop_and_replace(&mut self,
626 location
: Lvalue
<'tcx
>,
627 value
: Operand
<'tcx
>) -> BlockAnd
<()> {
628 let source_info
= self.source_info(span
);
629 let next_target
= self.cfg
.start_new_block();
630 let diverge_target
= self.diverge_cleanup();
631 self.cfg
.terminate(block
, source_info
,
632 TerminatorKind
::DropAndReplace
{
636 unwind
: diverge_target
,
641 /// Create an Assert terminator and return the success block.
642 /// If the boolean condition operand is not the expected value,
643 /// a runtime panic will be caused with the given message.
644 pub fn assert(&mut self, block
: BasicBlock
,
647 msg
: AssertMessage
<'tcx
>,
650 let source_info
= self.source_info(span
);
652 let success_block
= self.cfg
.start_new_block();
653 let cleanup
= self.diverge_cleanup();
655 self.cfg
.terminate(block
, source_info
,
656 TerminatorKind
::Assert
{
660 target
: success_block
,
668 /// Builds drops for pop_scope and exit_scope.
669 fn build_scope_drops
<'tcx
>(cfg
: &mut CFG
<'tcx
>,
671 earlier_scopes
: &[Scope
<'tcx
>],
672 mut block
: BasicBlock
)
674 let mut iter
= scope
.drops
.iter().rev().peekable();
675 while let Some(drop_data
) = iter
.next() {
676 let source_info
= scope
.source_info(drop_data
.span
);
677 if let DropKind
::Value { .. }
= drop_data
.kind
{
678 // Try to find the next block with its cached block
679 // for us to diverge into in case the drop panics.
680 let on_diverge
= iter
.peek().iter().filter_map(|dd
| {
682 DropKind
::Value { cached_block }
=> cached_block
,
683 DropKind
::Storage
=> None
686 // If there’s no `cached_block`s within current scope,
687 // we must look for one in the enclosing scope.
688 let on_diverge
= on_diverge
.or_else(||{
689 earlier_scopes
.iter().rev().flat_map(|s
| s
.cached_block()).next()
691 let next
= cfg
.start_new_block();
692 cfg
.terminate(block
, source_info
, TerminatorKind
::Drop
{
693 location
: drop_data
.location
.clone(),
699 match drop_data
.kind
{
700 DropKind
::Value { .. }
|
701 DropKind
::Storage
=> {
702 // Only temps and vars need their storage dead.
703 match drop_data
.location
{
704 Lvalue
::Temp(_
) | Lvalue
::Var(_
) => {}
708 cfg
.push(block
, Statement
{
709 source_info
: source_info
,
710 kind
: StatementKind
::StorageDead(drop_data
.location
.clone())
718 fn build_diverge_scope
<'a
, 'gcx
, 'tcx
>(tcx
: TyCtxt
<'a
, 'gcx
, 'tcx
>,
720 unit_temp
: &Lvalue
<'tcx
>,
721 scope
: &mut Scope
<'tcx
>,
722 mut target
: BasicBlock
)
725 // Build up the drops in **reverse** order. The end result will
728 // [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
730 // +------------------------------------+
733 // The code in this function reads from right to left. At each
734 // point, we check for cached blocks representing the
735 // remainder. If everything is cached, we'll just walk right to
736 // left reading the cached results but never created anything.
738 let visibility_scope
= scope
.visibility_scope
;
739 let source_info
= |span
| SourceInfo
{
741 scope
: visibility_scope
744 // Next, build up any free.
745 if let Some(ref mut free_data
) = scope
.free
{
746 target
= if let Some(cached_block
) = free_data
.cached_block
{
749 let into
= cfg
.start_new_cleanup_block();
750 cfg
.terminate(into
, source_info(free_data
.span
),
751 build_free(tcx
, unit_temp
, free_data
, target
));
752 free_data
.cached_block
= Some(into
);
757 // Next, build up the drops. Here we iterate the vector in
758 // *forward* order, so that we generate drops[0] first (right to
759 // left in diagram above).
760 for drop_data
in &mut scope
.drops
{
761 // Only full value drops are emitted in the diverging path,
763 let cached_block
= match drop_data
.kind
{
764 DropKind
::Value { ref mut cached_block }
=> cached_block
,
765 DropKind
::Storage
=> continue
767 target
= if let Some(cached_block
) = *cached_block
{
770 let block
= cfg
.start_new_cleanup_block();
771 cfg
.terminate(block
, source_info(drop_data
.span
),
772 TerminatorKind
::Drop
{
773 location
: drop_data
.location
.clone(),
777 *cached_block
= Some(block
);
785 fn build_free
<'a
, 'gcx
, 'tcx
>(tcx
: TyCtxt
<'a
, 'gcx
, 'tcx
>,
786 unit_temp
: &Lvalue
<'tcx
>,
787 data
: &FreeData
<'tcx
>,
789 -> TerminatorKind
<'tcx
> {
790 let free_func
= tcx
.lang_items
.require(lang_items
::BoxFreeFnLangItem
)
791 .unwrap_or_else(|e
| tcx
.sess
.fatal(&e
));
792 let substs
= tcx
.mk_substs(Substs
::new(
793 VecPerParamSpace
::new(vec
![], vec
![], vec
![data
.item_ty
]),
794 VecPerParamSpace
::new(vec
![], vec
![], vec
![])
796 TerminatorKind
::Call
{
797 func
: Operand
::Constant(Constant
{
799 ty
: tcx
.lookup_item_type(free_func
).ty
.subst(tcx
, substs
),
800 literal
: Literal
::Item
{
805 args
: vec
![Operand
::Consume(data
.value
.clone())],
806 destination
: Some((unit_temp
.clone(), target
)),