]> git.proxmox.com Git - rustc.git/blame - src/librustc_mir/build/scope.rs
Imported Upstream version 1.10.0+dfsg1
[rustc.git] / src / librustc_mir / build / scope.rs
CommitLineData
e9174d1e
SL
1// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11/*!
12Managing the scope stack. The scopes are tied to lexical scopes, so as
13we descend the HAIR, we push a scope on the stack, translate ite
14contents, and then pop it off. Every scope is named by a
b039eaaf 15`CodeExtent`.
e9174d1e
SL
16
17### SEME Regions
18
19When pushing a new scope, we record the current point in the graph (a
20basic block); this marks the entry to the scope. We then generate more
21stuff in the control-flow graph. Whenever the scope is exited, either
22via a `break` or `return` or just by fallthrough, that marks an exit
23from the scope. Each lexical scope thus corresponds to a single-entry,
24multiple-exit (SEME) region in the control-flow graph.
25
b039eaaf 26For now, we keep a mapping from each `CodeExtent` to its
e9174d1e
SL
27corresponding SEME region for later reference (see caveat in next
28paragraph). This is because region scopes are tied to
29them. Eventually, when we shift to non-lexical lifetimes, three should
30be no need to remember this mapping.
31
32There is one additional wrinkle, actually, that I wanted to hide from
33you but duty compels me to mention. In the course of translating
34matches, it sometimes happen that certain code (namely guards) gets
35executed multiple times. This means that the scope lexical scope may
36in fact correspond to multiple, disjoint SEME regions. So in fact our
9cc50fc6 37mapping is from one scope to a vector of SEME regions.
e9174d1e
SL
38
39### Drops
40
41The primary purpose for scopes is to insert drops: while translating
42the contents, we also accumulate lvalues that need to be dropped upon
43exit from each scope. This is done by calling `schedule_drop`. Once a
44drop is scheduled, whenever we branch out we will insert drops of all
45those lvalues onto the outgoing edge. Note that we don't know the full
46set of scheduled drops up front, and so whenever we exit from the
47scope we only drop the values scheduled thus far. For example, consider
48the scope S corresponding to this loop:
49
a7813a04 50```rust,ignore
e9174d1e
SL
51loop {
52 let x = ...;
53 if cond { break; }
54 let y = ...;
55}
56```
57
58When processing the `let x`, we will add one drop to the scope for
59`x`. The break will then insert a drop for `x`. When we process `let
60y`, we will add another drop (in fact, to a subscope, but let's ignore
61that for now); any later drops would also drop `y`.
62
63### Early exit
64
65There are numerous "normal" ways to early exit a scope: `break`,
66`continue`, `return` (panics are handled separately). Whenever an
67early exit occurs, the method `exit_scope` is called. It is given the
68current point in execution where the early exit occurs, as well as the
69scope you want to branch to (note that all early exits from to some
70other enclosing scope). `exit_scope` will record thid exit point and
71also add all drops.
72
73Panics are handled in a similar fashion, except that a panic always
74returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
75`panic(p)` with the current point `p`. Or else you can call
76`diverge_cleanup`, which will produce a block that you can branch to
77which does the appropriate cleanup and then diverges. `panic(p)`
78simply calls `diverge_cleanup()` and adds an edge from `p` to the
79result.
80
81### Loop scopes
82
83In addition to the normal scope stack, we track a loop scope stack
84that contains only loops. It tracks where a `break` and `continue`
85should go to.
86
87*/
88
54a0048b
SL
89use build::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary};
90use rustc::middle::region::{CodeExtent, CodeExtentData};
9cc50fc6 91use rustc::middle::lang_items;
54a0048b
SL
92use rustc::ty::subst::{Substs, Subst, VecPerParamSpace};
93use rustc::ty::{self, Ty, TyCtxt};
92a42be0 94use rustc::mir::repr::*;
9cc50fc6
SL
95use syntax::codemap::{Span, DUMMY_SP};
96use syntax::parse::token::intern_and_get_ident;
54a0048b
SL
97use rustc::middle::const_val::ConstVal;
98use rustc_const_math::ConstInt;
e9174d1e 99
b039eaaf 100pub struct Scope<'tcx> {
54a0048b
SL
101 /// the scope-id within the scope_datas
102 id: ScopeId,
103
104 /// the extent of this scope within source code; also stored in
105 /// `ScopeAuxiliary`, but kept here for convenience
b039eaaf 106 extent: CodeExtent,
54a0048b
SL
107
108 /// set of lvalues to drop when exiting this scope. This starts
109 /// out empty but grows as variables are declared during the
110 /// building process. This is a stack, so we always drop from the
111 /// end of the vector (top of the stack) first.
7453a54e 112 drops: Vec<DropData<'tcx>>,
54a0048b
SL
113
114 /// A scope may only have one associated free, because:
115 ///
116 /// 1. We require a `free` to only be scheduled in the scope of
117 /// `EXPR` in `box EXPR`;
118 /// 2. It only makes sense to have it translated into the diverge-path.
119 ///
120 /// This kind of drop will be run *after* all the regular drops
121 /// scheduled onto this scope, because drops may have dependencies
122 /// on the allocated memory.
123 ///
124 /// This is expected to go away once `box EXPR` becomes a sugar
125 /// for placement protocol and gets desugared in some earlier
126 /// stage.
7453a54e 127 free: Option<FreeData<'tcx>>,
54a0048b
SL
128
129 /// The cached block for the cleanups-on-diverge path. This block
130 /// contains a block that will just do a RESUME to an appropriate
131 /// place. This block does not execute any of the drops or free:
132 /// each of those has their own cached-blocks, which will branch
133 /// to this point.
134 cached_block: Option<BasicBlock>
7453a54e
SL
135}
136
137struct DropData<'tcx> {
54a0048b
SL
138 /// span where drop obligation was incurred (typically where lvalue was declared)
139 span: Span,
140
141 /// lvalue to drop
7453a54e 142 value: Lvalue<'tcx>,
54a0048b
SL
143
144 /// The cached block for the cleanups-on-diverge path. This block
145 /// contains code to run the current drop and all the preceding
146 /// drops (i.e. those having lower index in Drop’s Scope drop
147 /// array)
7453a54e
SL
148 cached_block: Option<BasicBlock>
149}
150
151struct FreeData<'tcx> {
54a0048b 152 /// span where free obligation was incurred
7453a54e 153 span: Span,
54a0048b 154
7453a54e
SL
155 /// Lvalue containing the allocated box.
156 value: Lvalue<'tcx>,
54a0048b 157
7453a54e
SL
158 /// type of item for which the box was allocated for (i.e. the T in Box<T>).
159 item_ty: Ty<'tcx>,
54a0048b 160
7453a54e
SL
161 /// The cached block containing code to run the free. The block will also execute all the drops
162 /// in the scope.
163 cached_block: Option<BasicBlock>
e9174d1e
SL
164}
165
166#[derive(Clone, Debug)]
b039eaaf 167pub struct LoopScope {
7453a54e
SL
168 /// Extent of the loop
169 pub extent: CodeExtent,
170 /// Where the body of the loop begins
171 pub continue_block: BasicBlock,
172 /// Block to branch into when the loop terminates (either by being `break`-en out from, or by
173 /// having its condition to become false)
b039eaaf 174 pub break_block: BasicBlock, // where to go on a `break
7453a54e
SL
175 /// Indicates the reachability of the break_block for this loop
176 pub might_break: bool
177}
178
179impl<'tcx> Scope<'tcx> {
180 /// Invalidate all the cached blocks in the scope.
181 ///
182 /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
183 /// larger extent of code.
184 fn invalidate_cache(&mut self) {
54a0048b 185 self.cached_block = None;
7453a54e
SL
186 for dropdata in &mut self.drops {
187 dropdata.cached_block = None;
188 }
189 if let Some(ref mut freedata) = self.free {
190 freedata.cached_block = None;
191 }
192 }
193
194 /// Returns the cached block for this scope.
195 ///
196 /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
197 /// this method to work correctly.
198 fn cached_block(&self) -> Option<BasicBlock> {
199 if let Some(data) = self.drops.last() {
200 Some(data.cached_block.expect("drop cache is not filled"))
201 } else if let Some(ref data) = self.free {
202 Some(data.cached_block.expect("free cache is not filled"))
203 } else {
204 None
205 }
206 }
e9174d1e
SL
207}
208
a7813a04 209impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
7453a54e
SL
210 // Adding and removing scopes
211 // ==========================
e9174d1e
SL
212 /// Start a loop scope, which tracks where `continue` and `break`
213 /// should branch to. See module comment for more details.
7453a54e
SL
214 ///
215 /// Returns the might_break attribute of the LoopScope used.
216 pub fn in_loop_scope<F>(&mut self,
b039eaaf
SL
217 loop_block: BasicBlock,
218 break_block: BasicBlock,
219 f: F)
7453a54e 220 -> bool
a7813a04 221 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>)
e9174d1e 222 {
92a42be0 223 let extent = self.extent_of_innermost_scope();
b039eaaf
SL
224 let loop_scope = LoopScope {
225 extent: extent.clone(),
226 continue_block: loop_block,
227 break_block: break_block,
7453a54e 228 might_break: false
b039eaaf 229 };
e9174d1e 230 self.loop_scopes.push(loop_scope);
7453a54e
SL
231 f(self);
232 let loop_scope = self.loop_scopes.pop().unwrap();
233 assert!(loop_scope.extent == extent);
234 loop_scope.might_break
e9174d1e
SL
235 }
236
92a42be0
SL
237 /// Convenience wrapper that pushes a scope and then executes `f`
238 /// to build its contents, popping the scope afterwards.
239 pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R>
a7813a04 240 where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>, ScopeId) -> BlockAnd<R>
e9174d1e
SL
241 {
242 debug!("in_scope(extent={:?}, block={:?})", extent, block);
54a0048b
SL
243 let id = self.push_scope(extent, block);
244 let rv = unpack!(block = f(self, id));
7453a54e 245 unpack!(block = self.pop_scope(extent, block));
92a42be0
SL
246 debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
247 block.and(rv)
248 }
e9174d1e 249
92a42be0
SL
250 /// Push a scope onto the stack. You can then build code in this
251 /// scope and call `pop_scope` afterwards. Note that these two
252 /// calls must be paired; using `in_scope` as a convenience
253 /// wrapper maybe preferable.
54a0048b 254 pub fn push_scope(&mut self, extent: CodeExtent, entry: BasicBlock) -> ScopeId {
7453a54e 255 debug!("push_scope({:?})", extent);
54a0048b
SL
256 let parent_id = self.scopes.last().map(|s| s.id);
257 let id = ScopeId::new(self.scope_datas.len());
a7813a04 258 let tcx = self.hir.tcx();
54a0048b 259 self.scope_datas.push(ScopeData {
a7813a04 260 span: extent.span(&tcx.region_maps, &tcx.map).unwrap_or(DUMMY_SP),
54a0048b
SL
261 parent_scope: parent_id,
262 });
e9174d1e 263 self.scopes.push(Scope {
54a0048b
SL
264 id: id,
265 extent: extent,
e9174d1e 266 drops: vec![],
54a0048b
SL
267 free: None,
268 cached_block: None,
e9174d1e 269 });
54a0048b
SL
270 self.scope_auxiliary.vec.push(ScopeAuxiliary {
271 extent: extent,
272 dom: self.cfg.current_location(entry),
273 postdoms: vec![]
274 });
275 id
92a42be0
SL
276 }
277
278 /// Pops a scope, which should have extent `extent`, adding any
279 /// drops onto the end of `block` that are needed. This must
280 /// match 1-to-1 with `push_scope`.
54a0048b
SL
281 pub fn pop_scope(&mut self,
282 extent: CodeExtent,
283 mut block: BasicBlock)
284 -> BlockAnd<()> {
92a42be0 285 debug!("pop_scope({:?}, {:?})", extent, block);
7453a54e
SL
286 // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup
287 // to make sure all the `cached_block`s are filled in.
288 self.diverge_cleanup();
92a42be0 289 let scope = self.scopes.pop().unwrap();
92a42be0 290 assert_eq!(scope.extent, extent);
54a0048b
SL
291 unpack!(block = build_scope_drops(&mut self.cfg, &scope, &self.scopes, block));
292 self.scope_auxiliary[scope.id]
293 .postdoms
294 .push(self.cfg.current_location(block));
295 block.unit()
e9174d1e
SL
296 }
297
e9174d1e 298
e9174d1e
SL
299 /// Branch out of `block` to `target`, exiting all scopes up to
300 /// and including `extent`. This will insert whatever drops are
301 /// needed, as well as tracking this exit for the SEME region. See
302 /// module comment for details.
303 pub fn exit_scope(&mut self,
b039eaaf
SL
304 span: Span,
305 extent: CodeExtent,
7453a54e 306 mut block: BasicBlock,
e9174d1e 307 target: BasicBlock) {
54a0048b 308 debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target);
7453a54e
SL
309 let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent)
310 .unwrap_or_else(||{
54a0048b 311 span_bug!(span, "extent {:?} does not enclose", extent)
7453a54e
SL
312 });
313
314 let tmp = self.get_unit_temp();
315 for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) {
316 unpack!(block = build_scope_drops(&mut self.cfg,
317 scope,
318 &self.scopes[..idx],
319 block));
320 if let Some(ref free_data) = scope.free {
321 let next = self.cfg.start_new_block();
54a0048b
SL
322 let free = build_free(self.hir.tcx(), &tmp, free_data, next);
323 self.cfg.terminate(block, scope.id, span, free);
7453a54e 324 block = next;
e9174d1e 325 }
54a0048b
SL
326 self.scope_auxiliary[scope.id]
327 .postdoms
328 .push(self.cfg.current_location(block));
e9174d1e 329 }
54a0048b
SL
330
331 assert!(scope_count < self.scopes.len(),
332 "should never use `exit_scope` to pop *ALL* scopes");
333 let scope = self.scopes.iter().rev().skip(scope_count)
334 .next()
335 .unwrap();
336 self.cfg.terminate(block,
337 scope.id,
338 span,
339 TerminatorKind::Goto { target: target });
e9174d1e
SL
340 }
341
7453a54e
SL
342 // Finding scopes
343 // ==============
344 /// Finds the loop scope for a given label. This is used for
345 /// resolving `break` and `continue`.
346 pub fn find_loop_scope(&mut self,
347 span: Span,
348 label: Option<CodeExtent>)
349 -> &mut LoopScope {
54a0048b 350 let loop_scopes = &mut self.loop_scopes;
7453a54e
SL
351 match label {
352 None => {
353 // no label? return the innermost loop scope
354 loop_scopes.iter_mut().rev().next()
9cc50fc6 355 }
7453a54e
SL
356 Some(label) => {
357 // otherwise, find the loop-scope with the correct id
358 loop_scopes.iter_mut()
359 .rev()
360 .filter(|loop_scope| loop_scope.extent == label)
361 .next()
362 }
54a0048b
SL
363 }.unwrap_or_else(|| span_bug!(span, "no enclosing loop scope found?"))
364 }
365
366 pub fn innermost_scope_id(&self) -> ScopeId {
367 self.scopes.last().map(|scope| scope.id).unwrap()
e9174d1e
SL
368 }
369
7453a54e
SL
370 pub fn extent_of_innermost_scope(&self) -> CodeExtent {
371 self.scopes.last().map(|scope| scope.extent).unwrap()
372 }
373
54a0048b
SL
374 /// Returns the extent of the scope which should be exited by a
375 /// return.
376 pub fn extent_of_return_scope(&self) -> CodeExtent {
377 // The outermost scope (`scopes[0]`) will be the `CallSiteScope`.
378 // We want `scopes[1]`, which is the `ParameterScope`.
379 assert!(self.scopes.len() >= 2);
380 assert!(match self.hir.tcx().region_maps.code_extent_data(self.scopes[1].extent) {
381 CodeExtentData::ParameterScope { .. } => true,
382 _ => false,
383 });
384 self.scopes[1].extent
7453a54e
SL
385 }
386
387 // Scheduling drops
388 // ================
e9174d1e
SL
389 /// Indicates that `lvalue` should be dropped on exit from
390 /// `extent`.
391 pub fn schedule_drop(&mut self,
b039eaaf
SL
392 span: Span,
393 extent: CodeExtent,
b039eaaf
SL
394 lvalue: &Lvalue<'tcx>,
395 lvalue_ty: Ty<'tcx>) {
7453a54e
SL
396 if !self.hir.needs_drop(lvalue_ty) {
397 return
398 }
399 for scope in self.scopes.iter_mut().rev() {
400 if scope.extent == extent {
401 // No need to invalidate any caches here. The just-scheduled drop will branch into
402 // the drop that comes before it in the vector.
403 scope.drops.push(DropData {
54a0048b 404 span: span,
7453a54e
SL
405 value: lvalue.clone(),
406 cached_block: None
407 });
408 return;
409 } else {
410 // We must invalidate all the cached_blocks leading up to the scope we’re
411 // looking for, because all of the blocks in the chain will become incorrect.
412 scope.invalidate_cache()
413 }
414 }
54a0048b 415 span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
7453a54e
SL
416 }
417
418 /// Schedule dropping of a not-yet-fully-initialised box.
419 ///
420 /// This cleanup will only be translated into unwind branch.
421 /// The extent should be for the `EXPR` inside `box EXPR`.
422 /// There may only be one “free” scheduled in any given scope.
423 pub fn schedule_box_free(&mut self,
424 span: Span,
425 extent: CodeExtent,
426 value: &Lvalue<'tcx>,
427 item_ty: Ty<'tcx>) {
428 for scope in self.scopes.iter_mut().rev() {
429 if scope.extent == extent {
430 assert!(scope.free.is_none(), "scope already has a scheduled free!");
431 // We also must invalidate the caches in the scope for which the free is scheduled
432 // because the drops must branch into the free we schedule here.
433 scope.invalidate_cache();
434 scope.free = Some(FreeData {
435 span: span,
436 value: value.clone(),
437 item_ty: item_ty,
438 cached_block: None
439 });
440 return;
441 } else {
9cc50fc6 442 // We must invalidate all the cached_blocks leading up to the scope we’re looking
7453a54e
SL
443 // for, because otherwise some/most of the blocks in the chain will become
444 // incorrect.
445 scope.invalidate_cache();
e9174d1e
SL
446 }
447 }
54a0048b 448 span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
e9174d1e
SL
449 }
450
7453a54e
SL
451 // Other
452 // =====
453 /// Creates a path that performs all required cleanup for unwinding.
454 ///
455 /// This path terminates in Resume. Returns the start of the path.
456 /// See module comment for more details. None indicates there’s no
457 /// cleanup to do at this point.
458 pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
54a0048b 459 if self.scopes.iter().all(|scope| scope.drops.is_empty() && scope.free.is_none()) {
7453a54e
SL
460 return None;
461 }
54a0048b
SL
462 assert!(!self.scopes.is_empty()); // or `all` above would be true
463
7453a54e 464 let unit_temp = self.get_unit_temp();
54a0048b
SL
465 let Builder { ref mut hir, ref mut cfg, ref mut scopes,
466 ref mut cached_resume_block, .. } = *self;
467
468 // Build up the drops in **reverse** order. The end result will
469 // look like:
470 //
471 // scopes[n] -> scopes[n-1] -> ... -> scopes[0]
472 //
473 // However, we build this in **reverse order**. That is, we
474 // process scopes[0], then scopes[1], etc, pointing each one at
475 // the result generates from the one before. Along the way, we
476 // store caches. If everything is cached, we'll just walk right
477 // to left reading the cached results but never created anything.
478
479 // To start, create the resume terminator.
480 let mut target = if let Some(target) = *cached_resume_block {
481 target
482 } else {
483 let resumeblk = cfg.start_new_cleanup_block();
484 cfg.terminate(resumeblk, scopes[0].id, self.fn_span, TerminatorKind::Resume);
485 *cached_resume_block = Some(resumeblk);
486 resumeblk
487 };
488
489 for scope in scopes {
490 target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target);
7453a54e 491 }
54a0048b
SL
492
493 Some(target)
e9174d1e
SL
494 }
495
7453a54e 496 /// Utility function for *non*-scope code to build their own drops
54a0048b
SL
497 pub fn build_drop(&mut self,
498 block: BasicBlock,
499 span: Span,
a7813a04
XL
500 value: Lvalue<'tcx>,
501 ty: Ty<'tcx>) -> BlockAnd<()> {
502 if !self.hir.needs_drop(ty) {
503 return block.unit();
504 }
54a0048b 505 let scope_id = self.innermost_scope_id();
7453a54e
SL
506 let next_target = self.cfg.start_new_block();
507 let diverge_target = self.diverge_cleanup();
54a0048b
SL
508 self.cfg.terminate(block,
509 scope_id,
510 span,
511 TerminatorKind::Drop {
512 value: value,
513 target: next_target,
514 unwind: diverge_target,
515 });
7453a54e 516 next_target.unit()
e9174d1e 517 }
e9174d1e 518
7453a54e
SL
519
520 // Panicking
521 // =========
522 // FIXME: should be moved into their own module
9cc50fc6 523 pub fn panic_bounds_check(&mut self,
54a0048b
SL
524 block: BasicBlock,
525 index: Operand<'tcx>,
526 len: Operand<'tcx>,
527 span: Span) {
9cc50fc6 528 // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> !
7453a54e 529 let region = ty::ReStatic; // FIXME(mir-borrowck): use a better region?
9cc50fc6 530 let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem);
7453a54e
SL
531 let args = self.hir.tcx().replace_late_bound_regions(&func.ty.fn_args(), |_| region).0;
532
533 let ref_ty = args[0];
534 let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty {
535 tyandmut.ty
9cc50fc6 536 } else {
54a0048b 537 span_bug!(span, "unexpected panic_bound_check type: {:?}", func.ty);
9cc50fc6 538 };
7453a54e 539
9cc50fc6
SL
540 let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
541 let (file, line) = self.span_to_fileline_args(span);
542 let elems = vec![Operand::Constant(file), Operand::Constant(line)];
54a0048b 543 let scope_id = self.innermost_scope_id();
9cc50fc6
SL
544 // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
545 // icache with cold branch code), however to achieve that we either have to rely on rvalue
546 // promotion or have some way, in MIR, to create constants.
54a0048b 547 self.cfg.push_assign(block, scope_id, span, &tuple, // tuple = (file_arg, line_arg);
9cc50fc6
SL
548 Rvalue::Aggregate(AggregateKind::Tuple, elems));
549 // FIXME: is this region really correct here?
54a0048b 550 self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple;
7453a54e 551 Rvalue::Ref(region, BorrowKind::Shared, tuple));
9cc50fc6 552 let cleanup = self.diverge_cleanup();
54a0048b 553 self.cfg.terminate(block, scope_id, span, TerminatorKind::Call {
9cc50fc6
SL
554 func: Operand::Constant(func),
555 args: vec![Operand::Consume(tuple_ref), index, len],
7453a54e
SL
556 destination: None,
557 cleanup: cleanup,
9cc50fc6 558 });
e9174d1e
SL
559 }
560
9cc50fc6
SL
561 /// Create diverge cleanup and branch to it from `block`.
562 pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) {
563 // fn(&(msg: &'static str filename: &'static str, line: u32)) -> !
7453a54e 564 let region = ty::ReStatic; // FIXME(mir-borrowck): use a better region?
9cc50fc6 565 let func = self.lang_function(lang_items::PanicFnLangItem);
7453a54e
SL
566 let args = self.hir.tcx().replace_late_bound_regions(&func.ty.fn_args(), |_| region).0;
567
568 let ref_ty = args[0];
569 let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty {
570 tyandmut.ty
9cc50fc6 571 } else {
54a0048b 572 span_bug!(span, "unexpected panic type: {:?}", func.ty);
9cc50fc6 573 };
7453a54e 574
9cc50fc6
SL
575 let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
576 let (file, line) = self.span_to_fileline_args(span);
577 let message = Constant {
7453a54e 578 span: span,
9cc50fc6
SL
579 ty: self.hir.tcx().mk_static_str(),
580 literal: self.hir.str_literal(intern_and_get_ident(msg))
581 };
582 let elems = vec![Operand::Constant(message),
583 Operand::Constant(file),
584 Operand::Constant(line)];
54a0048b 585 let scope_id = self.innermost_scope_id();
9cc50fc6
SL
586 // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
587 // icache with cold branch code), however to achieve that we either have to rely on rvalue
588 // promotion or have some way, in MIR, to create constants.
54a0048b 589 self.cfg.push_assign(block, scope_id, span, &tuple, // [1]
9cc50fc6 590 Rvalue::Aggregate(AggregateKind::Tuple, elems));
54a0048b 591 // [1] tuple = (message_arg, file_arg, line_arg);
9cc50fc6 592 // FIXME: is this region really correct here?
54a0048b 593 self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple;
7453a54e 594 Rvalue::Ref(region, BorrowKind::Shared, tuple));
9cc50fc6 595 let cleanup = self.diverge_cleanup();
54a0048b 596 self.cfg.terminate(block, scope_id, span, TerminatorKind::Call {
9cc50fc6
SL
597 func: Operand::Constant(func),
598 args: vec![Operand::Consume(tuple_ref)],
7453a54e
SL
599 cleanup: cleanup,
600 destination: None,
9cc50fc6 601 });
e9174d1e
SL
602 }
603
9cc50fc6
SL
604 fn lang_function(&mut self, lang_item: lang_items::LangItem) -> Constant<'tcx> {
605 let funcdid = match self.hir.tcx().lang_items.require(lang_item) {
606 Ok(d) => d,
607 Err(m) => {
7453a54e 608 self.hir.tcx().sess.fatal(&m)
9cc50fc6
SL
609 }
610 };
611 Constant {
612 span: DUMMY_SP,
613 ty: self.hir.tcx().lookup_item_type(funcdid).ty,
614 literal: Literal::Item {
615 def_id: funcdid,
9cc50fc6
SL
616 substs: self.hir.tcx().mk_substs(Substs::empty())
617 }
618 }
e9174d1e 619 }
e9174d1e 620
9cc50fc6
SL
621 fn span_to_fileline_args(&mut self, span: Span) -> (Constant<'tcx>, Constant<'tcx>) {
622 let span_lines = self.hir.tcx().sess.codemap().lookup_char_pos(span.lo);
623 (Constant {
7453a54e 624 span: span,
9cc50fc6
SL
625 ty: self.hir.tcx().mk_static_str(),
626 literal: self.hir.str_literal(intern_and_get_ident(&span_lines.file.name))
627 }, Constant {
7453a54e 628 span: span,
9cc50fc6 629 ty: self.hir.tcx().types.u32,
54a0048b
SL
630 literal: Literal::Value {
631 value: ConstVal::Integral(ConstInt::U32(span_lines.line as u32)),
632 },
9cc50fc6
SL
633 })
634 }
7453a54e
SL
635
636}
637
638/// Builds drops for pop_scope and exit_scope.
639fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
640 scope: &Scope<'tcx>,
641 earlier_scopes: &[Scope<'tcx>],
642 mut block: BasicBlock)
643 -> BlockAnd<()> {
644 let mut iter = scope.drops.iter().rev().peekable();
645 while let Some(drop_data) = iter.next() {
646 // Try to find the next block with its cached block for us to diverge into in case the
647 // drop panics.
648 let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next();
649 // If there’s no `cached_block`s within current scope, we must look for one in the
650 // enclosing scope.
651 let on_diverge = on_diverge.or_else(||{
652 earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next()
653 });
654 let next = cfg.start_new_block();
54a0048b 655 cfg.terminate(block, scope.id, drop_data.span, TerminatorKind::Drop {
7453a54e
SL
656 value: drop_data.value.clone(),
657 target: next,
658 unwind: on_diverge
659 });
660 block = next;
661 }
662 block.unit()
663}
664
a7813a04
XL
665fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
666 cfg: &mut CFG<'tcx>,
667 unit_temp: &Lvalue<'tcx>,
668 scope: &mut Scope<'tcx>,
669 mut target: BasicBlock)
670 -> BasicBlock
54a0048b
SL
671{
672 // Build up the drops in **reverse** order. The end result will
673 // look like:
674 //
675 // [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
676 // | |
677 // +------------------------------------+
678 // code for scope
679 //
680 // The code in this function reads from right to left. At each
681 // point, we check for cached blocks representing the
682 // remainder. If everything is cached, we'll just walk right to
683 // left reading the cached results but never created anything.
7453a54e 684
54a0048b 685 // Next, build up any free.
7453a54e
SL
686 if let Some(ref mut free_data) = scope.free {
687 target = if let Some(cached_block) = free_data.cached_block {
688 cached_block
689 } else {
690 let into = cfg.start_new_cleanup_block();
54a0048b
SL
691 cfg.terminate(into,
692 scope.id,
693 free_data.span,
694 build_free(tcx, unit_temp, free_data, target));
7453a54e
SL
695 free_data.cached_block = Some(into);
696 into
54a0048b
SL
697 };
698 }
699
700 // Next, build up the drops. Here we iterate the vector in
701 // *forward* order, so that we generate drops[0] first (right to
702 // left in diagram above).
703 for drop_data in &mut scope.drops {
704 target = if let Some(cached_block) = drop_data.cached_block {
705 cached_block
706 } else {
707 let block = cfg.start_new_cleanup_block();
708 cfg.terminate(block,
709 scope.id,
710 drop_data.span,
711 TerminatorKind::Drop {
712 value: drop_data.value.clone(),
713 target: target,
714 unwind: None
715 });
716 drop_data.cached_block = Some(block);
717 block
718 };
7453a54e 719 }
54a0048b
SL
720
721 target
7453a54e
SL
722}
723
a7813a04
XL
724fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
725 unit_temp: &Lvalue<'tcx>,
726 data: &FreeData<'tcx>,
727 target: BasicBlock)
728 -> TerminatorKind<'tcx> {
7453a54e
SL
729 let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
730 .unwrap_or_else(|e| tcx.sess.fatal(&e));
731 let substs = tcx.mk_substs(Substs::new(
732 VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]),
733 VecPerParamSpace::new(vec![], vec![], vec![])
734 ));
54a0048b 735 TerminatorKind::Call {
7453a54e
SL
736 func: Operand::Constant(Constant {
737 span: data.span,
738 ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs),
739 literal: Literal::Item {
740 def_id: free_func,
7453a54e
SL
741 substs: substs
742 }
743 }),
744 args: vec![Operand::Consume(data.value.clone())],
54a0048b 745 destination: Some((unit_temp.clone(), target)),
7453a54e
SL
746 cleanup: None
747 }
e9174d1e 748}