]> git.proxmox.com Git - rustc.git/blob - src/librustc_mir/build/scope.rs
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / librustc_mir / build / scope.rs
1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
15 `CodeExtent`.
16
17 ### SEME Regions
18
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
25
26 For now, we keep a mapping from each `CodeExtent` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, three should
30 be no need to remember this mapping.
31
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
38
39 ### Drops
40
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
49
50 ```
51 loop {
52 let x = ...;
53 if cond { break; }
54 let y = ...;
55 }
56 ```
57
58 When processing the `let x`, we will add one drop to the scope for
59 `x`. The break will then insert a drop for `x`. When we process `let
60 y`, we will add another drop (in fact, to a subscope, but let's ignore
61 that for now); any later drops would also drop `y`.
62
63 ### Early exit
64
65 There are numerous "normal" ways to early exit a scope: `break`,
66 `continue`, `return` (panics are handled separately). Whenever an
67 early exit occurs, the method `exit_scope` is called. It is given the
68 current point in execution where the early exit occurs, as well as the
69 scope you want to branch to (note that all early exits from to some
70 other enclosing scope). `exit_scope` will record thid exit point and
71 also add all drops.
72
73 Panics are handled in a similar fashion, except that a panic always
74 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
75 `panic(p)` with the current point `p`. Or else you can call
76 `diverge_cleanup`, which will produce a block that you can branch to
77 which does the appropriate cleanup and then diverges. `panic(p)`
78 simply calls `diverge_cleanup()` and adds an edge from `p` to the
79 result.
80
81 ### Loop scopes
82
83 In addition to the normal scope stack, we track a loop scope stack
84 that contains only loops. It tracks where a `break` and `continue`
85 should go to.
86
87 */
88
89 use build::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary};
90 use rustc::middle::region::{CodeExtent, CodeExtentData};
91 use rustc::middle::lang_items;
92 use rustc::ty::subst::{Substs, Subst, VecPerParamSpace};
93 use rustc::ty::{self, Ty, TyCtxt};
94 use rustc::mir::repr::*;
95 use syntax::codemap::{Span, DUMMY_SP};
96 use syntax::parse::token::intern_and_get_ident;
97 use rustc::middle::const_val::ConstVal;
98 use rustc_const_math::ConstInt;
99
100 pub struct Scope<'tcx> {
101 /// the scope-id within the scope_datas
102 id: ScopeId,
103
104 /// the extent of this scope within source code; also stored in
105 /// `ScopeAuxiliary`, but kept here for convenience
106 extent: CodeExtent,
107
108 /// set of lvalues to drop when exiting this scope. This starts
109 /// out empty but grows as variables are declared during the
110 /// building process. This is a stack, so we always drop from the
111 /// end of the vector (top of the stack) first.
112 drops: Vec<DropData<'tcx>>,
113
114 /// A scope may only have one associated free, because:
115 ///
116 /// 1. We require a `free` to only be scheduled in the scope of
117 /// `EXPR` in `box EXPR`;
118 /// 2. It only makes sense to have it translated into the diverge-path.
119 ///
120 /// This kind of drop will be run *after* all the regular drops
121 /// scheduled onto this scope, because drops may have dependencies
122 /// on the allocated memory.
123 ///
124 /// This is expected to go away once `box EXPR` becomes a sugar
125 /// for placement protocol and gets desugared in some earlier
126 /// stage.
127 free: Option<FreeData<'tcx>>,
128
129 /// The cached block for the cleanups-on-diverge path. This block
130 /// contains a block that will just do a RESUME to an appropriate
131 /// place. This block does not execute any of the drops or free:
132 /// each of those has their own cached-blocks, which will branch
133 /// to this point.
134 cached_block: Option<BasicBlock>
135 }
136
137 struct DropData<'tcx> {
138 /// span where drop obligation was incurred (typically where lvalue was declared)
139 span: Span,
140
141 /// lvalue to drop
142 value: Lvalue<'tcx>,
143
144 /// The cached block for the cleanups-on-diverge path. This block
145 /// contains code to run the current drop and all the preceding
146 /// drops (i.e. those having lower index in Drop’s Scope drop
147 /// array)
148 cached_block: Option<BasicBlock>
149 }
150
151 struct FreeData<'tcx> {
152 /// span where free obligation was incurred
153 span: Span,
154
155 /// Lvalue containing the allocated box.
156 value: Lvalue<'tcx>,
157
158 /// type of item for which the box was allocated for (i.e. the T in Box<T>).
159 item_ty: Ty<'tcx>,
160
161 /// The cached block containing code to run the free. The block will also execute all the drops
162 /// in the scope.
163 cached_block: Option<BasicBlock>
164 }
165
166 #[derive(Clone, Debug)]
167 pub struct LoopScope {
168 /// Extent of the loop
169 pub extent: CodeExtent,
170 /// Where the body of the loop begins
171 pub continue_block: BasicBlock,
172 /// Block to branch into when the loop terminates (either by being `break`-en out from, or by
173 /// having its condition to become false)
174 pub break_block: BasicBlock, // where to go on a `break
175 /// Indicates the reachability of the break_block for this loop
176 pub might_break: bool
177 }
178
179 impl<'tcx> Scope<'tcx> {
180 /// Invalidate all the cached blocks in the scope.
181 ///
182 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
183 /// larger extent of code.
184 fn invalidate_cache(&mut self) {
185 self.cached_block = None;
186 for dropdata in &mut self.drops {
187 dropdata.cached_block = None;
188 }
189 if let Some(ref mut freedata) = self.free {
190 freedata.cached_block = None;
191 }
192 }
193
194 /// Returns the cached block for this scope.
195 ///
196 /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
197 /// this method to work correctly.
198 fn cached_block(&self) -> Option<BasicBlock> {
199 if let Some(data) = self.drops.last() {
200 Some(data.cached_block.expect("drop cache is not filled"))
201 } else if let Some(ref data) = self.free {
202 Some(data.cached_block.expect("free cache is not filled"))
203 } else {
204 None
205 }
206 }
207 }
208
209 impl<'a,'tcx> Builder<'a,'tcx> {
210 // Adding and removing scopes
211 // ==========================
212 /// Start a loop scope, which tracks where `continue` and `break`
213 /// should branch to. See module comment for more details.
214 ///
215 /// Returns the might_break attribute of the LoopScope used.
216 pub fn in_loop_scope<F>(&mut self,
217 loop_block: BasicBlock,
218 break_block: BasicBlock,
219 f: F)
220 -> bool
221 where F: FnOnce(&mut Builder<'a, 'tcx>)
222 {
223 let extent = self.extent_of_innermost_scope();
224 let loop_scope = LoopScope {
225 extent: extent.clone(),
226 continue_block: loop_block,
227 break_block: break_block,
228 might_break: false
229 };
230 self.loop_scopes.push(loop_scope);
231 f(self);
232 let loop_scope = self.loop_scopes.pop().unwrap();
233 assert!(loop_scope.extent == extent);
234 loop_scope.might_break
235 }
236
237 /// Convenience wrapper that pushes a scope and then executes `f`
238 /// to build its contents, popping the scope afterwards.
239 pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R>
240 where F: FnOnce(&mut Builder<'a, 'tcx>, ScopeId) -> BlockAnd<R>
241 {
242 debug!("in_scope(extent={:?}, block={:?})", extent, block);
243 let id = self.push_scope(extent, block);
244 let rv = unpack!(block = f(self, id));
245 unpack!(block = self.pop_scope(extent, block));
246 debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
247 block.and(rv)
248 }
249
250 /// Push a scope onto the stack. You can then build code in this
251 /// scope and call `pop_scope` afterwards. Note that these two
252 /// calls must be paired; using `in_scope` as a convenience
253 /// wrapper maybe preferable.
254 pub fn push_scope(&mut self, extent: CodeExtent, entry: BasicBlock) -> ScopeId {
255 debug!("push_scope({:?})", extent);
256 let parent_id = self.scopes.last().map(|s| s.id);
257 let id = ScopeId::new(self.scope_datas.len());
258 self.scope_datas.push(ScopeData {
259 parent_scope: parent_id,
260 });
261 self.scopes.push(Scope {
262 id: id,
263 extent: extent,
264 drops: vec![],
265 free: None,
266 cached_block: None,
267 });
268 self.scope_auxiliary.vec.push(ScopeAuxiliary {
269 extent: extent,
270 dom: self.cfg.current_location(entry),
271 postdoms: vec![]
272 });
273 id
274 }
275
276 /// Pops a scope, which should have extent `extent`, adding any
277 /// drops onto the end of `block` that are needed. This must
278 /// match 1-to-1 with `push_scope`.
279 pub fn pop_scope(&mut self,
280 extent: CodeExtent,
281 mut block: BasicBlock)
282 -> BlockAnd<()> {
283 debug!("pop_scope({:?}, {:?})", extent, block);
284 // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
285 // to make sure all the `cached_block`s are filled in.
286 self.diverge_cleanup();
287 let scope = self.scopes.pop().unwrap();
288 assert_eq!(scope.extent, extent);
289 unpack!(block = build_scope_drops(&mut self.cfg, &scope, &self.scopes, block));
290 self.scope_auxiliary[scope.id]
291 .postdoms
292 .push(self.cfg.current_location(block));
293 block.unit()
294 }
295
296
297 /// Branch out of `block` to `target`, exiting all scopes up to
298 /// and including `extent`. This will insert whatever drops are
299 /// needed, as well as tracking this exit for the SEME region. See
300 /// module comment for details.
301 pub fn exit_scope(&mut self,
302 span: Span,
303 extent: CodeExtent,
304 mut block: BasicBlock,
305 target: BasicBlock) {
306 debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target);
307 let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent)
308 .unwrap_or_else(||{
309 span_bug!(span, "extent {:?} does not enclose", extent)
310 });
311
312 let tmp = self.get_unit_temp();
313 for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) {
314 unpack!(block = build_scope_drops(&mut self.cfg,
315 scope,
316 &self.scopes[..idx],
317 block));
318 if let Some(ref free_data) = scope.free {
319 let next = self.cfg.start_new_block();
320 let free = build_free(self.hir.tcx(), &tmp, free_data, next);
321 self.cfg.terminate(block, scope.id, span, free);
322 block = next;
323 }
324 self.scope_auxiliary[scope.id]
325 .postdoms
326 .push(self.cfg.current_location(block));
327 }
328
329 assert!(scope_count < self.scopes.len(),
330 "should never use `exit_scope` to pop *ALL* scopes");
331 let scope = self.scopes.iter().rev().skip(scope_count)
332 .next()
333 .unwrap();
334 self.cfg.terminate(block,
335 scope.id,
336 span,
337 TerminatorKind::Goto { target: target });
338 }
339
340 // Finding scopes
341 // ==============
342 /// Finds the loop scope for a given label. This is used for
343 /// resolving `break` and `continue`.
344 pub fn find_loop_scope(&mut self,
345 span: Span,
346 label: Option<CodeExtent>)
347 -> &mut LoopScope {
348 let loop_scopes = &mut self.loop_scopes;
349 match label {
350 None => {
351 // no label? return the innermost loop scope
352 loop_scopes.iter_mut().rev().next()
353 }
354 Some(label) => {
355 // otherwise, find the loop-scope with the correct id
356 loop_scopes.iter_mut()
357 .rev()
358 .filter(|loop_scope| loop_scope.extent == label)
359 .next()
360 }
361 }.unwrap_or_else(|| span_bug!(span, "no enclosing loop scope found?"))
362 }
363
364 pub fn innermost_scope_id(&self) -> ScopeId {
365 self.scopes.last().map(|scope| scope.id).unwrap()
366 }
367
368 pub fn extent_of_innermost_scope(&self) -> CodeExtent {
369 self.scopes.last().map(|scope| scope.extent).unwrap()
370 }
371
372 /// Returns the extent of the scope which should be exited by a
373 /// return.
374 pub fn extent_of_return_scope(&self) -> CodeExtent {
375 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
376 // We want `scopes[1]`, which is the `ParameterScope`.
377 assert!(self.scopes.len() >= 2);
378 assert!(match self.hir.tcx().region_maps.code_extent_data(self.scopes[1].extent) {
379 CodeExtentData::ParameterScope { .. } => true,
380 _ => false,
381 });
382 self.scopes[1].extent
383 }
384
385 // Scheduling drops
386 // ================
387 /// Indicates that `lvalue` should be dropped on exit from
388 /// `extent`.
389 pub fn schedule_drop(&mut self,
390 span: Span,
391 extent: CodeExtent,
392 lvalue: &Lvalue<'tcx>,
393 lvalue_ty: Ty<'tcx>) {
394 if !self.hir.needs_drop(lvalue_ty) {
395 return
396 }
397 for scope in self.scopes.iter_mut().rev() {
398 if scope.extent == extent {
399 // No need to invalidate any caches here. The just-scheduled drop will branch into
400 // the drop that comes before it in the vector.
401 scope.drops.push(DropData {
402 span: span,
403 value: lvalue.clone(),
404 cached_block: None
405 });
406 return;
407 } else {
408 // We must invalidate all the cached_blocks leading up to the scope we’re
409 // looking for, because all of the blocks in the chain will become incorrect.
410 scope.invalidate_cache()
411 }
412 }
413 span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
414 }
415
416 /// Schedule dropping of a not-yet-fully-initialised box.
417 ///
418 /// This cleanup will only be translated into unwind branch.
419 /// The extent should be for the `EXPR` inside `box EXPR`.
420 /// There may only be one “free” scheduled in any given scope.
421 pub fn schedule_box_free(&mut self,
422 span: Span,
423 extent: CodeExtent,
424 value: &Lvalue<'tcx>,
425 item_ty: Ty<'tcx>) {
426 for scope in self.scopes.iter_mut().rev() {
427 if scope.extent == extent {
428 assert!(scope.free.is_none(), "scope already has a scheduled free!");
429 // We also must invalidate the caches in the scope for which the free is scheduled
430 // because the drops must branch into the free we schedule here.
431 scope.invalidate_cache();
432 scope.free = Some(FreeData {
433 span: span,
434 value: value.clone(),
435 item_ty: item_ty,
436 cached_block: None
437 });
438 return;
439 } else {
440 // We must invalidate all the cached_blocks leading up to the scope we’re looking
441 // for, because otherwise some/most of the blocks in the chain will become
442 // incorrect.
443 scope.invalidate_cache();
444 }
445 }
446 span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
447 }
448
449 // Other
450 // =====
451 /// Creates a path that performs all required cleanup for unwinding.
452 ///
453 /// This path terminates in Resume. Returns the start of the path.
454 /// See module comment for more details. None indicates there’s no
455 /// cleanup to do at this point.
456 pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
457 if self.scopes.iter().all(|scope| scope.drops.is_empty() && scope.free.is_none()) {
458 return None;
459 }
460 assert!(!self.scopes.is_empty()); // or `all` above would be true
461
462 let unit_temp = self.get_unit_temp();
463 let Builder { ref mut hir, ref mut cfg, ref mut scopes,
464 ref mut cached_resume_block, .. } = *self;
465
466 // Build up the drops in **reverse** order. The end result will
467 // look like:
468 //
469 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
470 //
471 // However, we build this in **reverse order**. That is, we
472 // process scopes[0], then scopes[1], etc, pointing each one at
473 // the result generates from the one before. Along the way, we
474 // store caches. If everything is cached, we'll just walk right
475 // to left reading the cached results but never created anything.
476
477 // To start, create the resume terminator.
478 let mut target = if let Some(target) = *cached_resume_block {
479 target
480 } else {
481 let resumeblk = cfg.start_new_cleanup_block();
482 cfg.terminate(resumeblk, scopes[0].id, self.fn_span, TerminatorKind::Resume);
483 *cached_resume_block = Some(resumeblk);
484 resumeblk
485 };
486
487 for scope in scopes {
488 target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target);
489 }
490
491 Some(target)
492 }
493
494 /// Utility function for *non*-scope code to build their own drops
495 pub fn build_drop(&mut self,
496 block: BasicBlock,
497 span: Span,
498 value: Lvalue<'tcx>)
499 -> BlockAnd<()> {
500 let scope_id = self.innermost_scope_id();
501 let next_target = self.cfg.start_new_block();
502 let diverge_target = self.diverge_cleanup();
503 self.cfg.terminate(block,
504 scope_id,
505 span,
506 TerminatorKind::Drop {
507 value: value,
508 target: next_target,
509 unwind: diverge_target,
510 });
511 next_target.unit()
512 }
513
514
515 // Panicking
516 // =========
517 // FIXME: should be moved into their own module
518 pub fn panic_bounds_check(&mut self,
519 block: BasicBlock,
520 index: Operand<'tcx>,
521 len: Operand<'tcx>,
522 span: Span) {
523 // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> !
524 let region = ty::ReStatic; // FIXME(mir-borrowck): use a better region?
525 let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem);
526 let args = self.hir.tcx().replace_late_bound_regions(&func.ty.fn_args(), |_| region).0;
527
528 let ref_ty = args[0];
529 let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty {
530 tyandmut.ty
531 } else {
532 span_bug!(span, "unexpected panic_bound_check type: {:?}", func.ty);
533 };
534
535 let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
536 let (file, line) = self.span_to_fileline_args(span);
537 let elems = vec![Operand::Constant(file), Operand::Constant(line)];
538 let scope_id = self.innermost_scope_id();
539 // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
540 // icache with cold branch code), however to achieve that we either have to rely on rvalue
541 // promotion or have some way, in MIR, to create constants.
542 self.cfg.push_assign(block, scope_id, span, &tuple, // tuple = (file_arg, line_arg);
543 Rvalue::Aggregate(AggregateKind::Tuple, elems));
544 // FIXME: is this region really correct here?
545 self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple;
546 Rvalue::Ref(region, BorrowKind::Shared, tuple));
547 let cleanup = self.diverge_cleanup();
548 self.cfg.terminate(block, scope_id, span, TerminatorKind::Call {
549 func: Operand::Constant(func),
550 args: vec![Operand::Consume(tuple_ref), index, len],
551 destination: None,
552 cleanup: cleanup,
553 });
554 }
555
556 /// Create diverge cleanup and branch to it from `block`.
557 pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) {
558 // fn(&(msg: &'static str filename: &'static str, line: u32)) -> !
559 let region = ty::ReStatic; // FIXME(mir-borrowck): use a better region?
560 let func = self.lang_function(lang_items::PanicFnLangItem);
561 let args = self.hir.tcx().replace_late_bound_regions(&func.ty.fn_args(), |_| region).0;
562
563 let ref_ty = args[0];
564 let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty {
565 tyandmut.ty
566 } else {
567 span_bug!(span, "unexpected panic type: {:?}", func.ty);
568 };
569
570 let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
571 let (file, line) = self.span_to_fileline_args(span);
572 let message = Constant {
573 span: span,
574 ty: self.hir.tcx().mk_static_str(),
575 literal: self.hir.str_literal(intern_and_get_ident(msg))
576 };
577 let elems = vec![Operand::Constant(message),
578 Operand::Constant(file),
579 Operand::Constant(line)];
580 let scope_id = self.innermost_scope_id();
581 // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
582 // icache with cold branch code), however to achieve that we either have to rely on rvalue
583 // promotion or have some way, in MIR, to create constants.
584 self.cfg.push_assign(block, scope_id, span, &tuple, // [1]
585 Rvalue::Aggregate(AggregateKind::Tuple, elems));
586 // [1] tuple = (message_arg, file_arg, line_arg);
587 // FIXME: is this region really correct here?
588 self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple;
589 Rvalue::Ref(region, BorrowKind::Shared, tuple));
590 let cleanup = self.diverge_cleanup();
591 self.cfg.terminate(block, scope_id, span, TerminatorKind::Call {
592 func: Operand::Constant(func),
593 args: vec![Operand::Consume(tuple_ref)],
594 cleanup: cleanup,
595 destination: None,
596 });
597 }
598
599 fn lang_function(&mut self, lang_item: lang_items::LangItem) -> Constant<'tcx> {
600 let funcdid = match self.hir.tcx().lang_items.require(lang_item) {
601 Ok(d) => d,
602 Err(m) => {
603 self.hir.tcx().sess.fatal(&m)
604 }
605 };
606 Constant {
607 span: DUMMY_SP,
608 ty: self.hir.tcx().lookup_item_type(funcdid).ty,
609 literal: Literal::Item {
610 def_id: funcdid,
611 substs: self.hir.tcx().mk_substs(Substs::empty())
612 }
613 }
614 }
615
616 fn span_to_fileline_args(&mut self, span: Span) -> (Constant<'tcx>, Constant<'tcx>) {
617 let span_lines = self.hir.tcx().sess.codemap().lookup_char_pos(span.lo);
618 (Constant {
619 span: span,
620 ty: self.hir.tcx().mk_static_str(),
621 literal: self.hir.str_literal(intern_and_get_ident(&span_lines.file.name))
622 }, Constant {
623 span: span,
624 ty: self.hir.tcx().types.u32,
625 literal: Literal::Value {
626 value: ConstVal::Integral(ConstInt::U32(span_lines.line as u32)),
627 },
628 })
629 }
630
631 }
632
633 /// Builds drops for pop_scope and exit_scope.
634 fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
635 scope: &Scope<'tcx>,
636 earlier_scopes: &[Scope<'tcx>],
637 mut block: BasicBlock)
638 -> BlockAnd<()> {
639 let mut iter = scope.drops.iter().rev().peekable();
640 while let Some(drop_data) = iter.next() {
641 // Try to find the next block with its cached block for us to diverge into in case the
642 // drop panics.
643 let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next();
644 // If there’s no `cached_block`s within current scope, we must look for one in the
645 // enclosing scope.
646 let on_diverge = on_diverge.or_else(||{
647 earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
648 });
649 let next = cfg.start_new_block();
650 cfg.terminate(block, scope.id, drop_data.span, TerminatorKind::Drop {
651 value: drop_data.value.clone(),
652 target: next,
653 unwind: on_diverge
654 });
655 block = next;
656 }
657 block.unit()
658 }
659
660 fn build_diverge_scope<'tcx>(tcx: &TyCtxt<'tcx>,
661 cfg: &mut CFG<'tcx>,
662 unit_temp: &Lvalue<'tcx>,
663 scope: &mut Scope<'tcx>,
664 mut target: BasicBlock)
665 -> BasicBlock
666 {
667 // Build up the drops in **reverse** order. The end result will
668 // look like:
669 //
670 // [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
671 // | |
672 // +------------------------------------+
673 // code for scope
674 //
675 // The code in this function reads from right to left. At each
676 // point, we check for cached blocks representing the
677 // remainder. If everything is cached, we'll just walk right to
678 // left reading the cached results but never created anything.
679
680 // Next, build up any free.
681 if let Some(ref mut free_data) = scope.free {
682 target = if let Some(cached_block) = free_data.cached_block {
683 cached_block
684 } else {
685 let into = cfg.start_new_cleanup_block();
686 cfg.terminate(into,
687 scope.id,
688 free_data.span,
689 build_free(tcx, unit_temp, free_data, target));
690 free_data.cached_block = Some(into);
691 into
692 };
693 }
694
695 // Next, build up the drops. Here we iterate the vector in
696 // *forward* order, so that we generate drops[0] first (right to
697 // left in diagram above).
698 for drop_data in &mut scope.drops {
699 target = if let Some(cached_block) = drop_data.cached_block {
700 cached_block
701 } else {
702 let block = cfg.start_new_cleanup_block();
703 cfg.terminate(block,
704 scope.id,
705 drop_data.span,
706 TerminatorKind::Drop {
707 value: drop_data.value.clone(),
708 target: target,
709 unwind: None
710 });
711 drop_data.cached_block = Some(block);
712 block
713 };
714 }
715
716 target
717 }
718
719 fn build_free<'tcx>(tcx: &TyCtxt<'tcx>,
720 unit_temp: &Lvalue<'tcx>,
721 data: &FreeData<'tcx>,
722 target: BasicBlock)
723 -> TerminatorKind<'tcx> {
724 let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
725 .unwrap_or_else(|e| tcx.sess.fatal(&e));
726 let substs = tcx.mk_substs(Substs::new(
727 VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]),
728 VecPerParamSpace::new(vec![], vec![], vec![])
729 ));
730 TerminatorKind::Call {
731 func: Operand::Constant(Constant {
732 span: data.span,
733 ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
734 literal: Literal::Item {
735 def_id: free_func,
736 substs: substs
737 }
738 }),
739 args: vec![Operand::Consume(data.value.clone())],
740 destination: Some((unit_temp.clone(), target)),
741 cleanup: None
742 }
743 }