]>
Commit | Line | Data |
---|---|---|
e9174d1e SL |
1 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | /*! | |
12 | Managing the scope stack. The scopes are tied to lexical scopes, so as | |
13 | we descend the HAIR, we push a scope on the stack, translate ite | |
14 | contents, and then pop it off. Every scope is named by a | |
b039eaaf | 15 | `CodeExtent`. |
e9174d1e SL |
16 | |
17 | ### SEME Regions | |
18 | ||
19 | When pushing a new scope, we record the current point in the graph (a | |
20 | basic block); this marks the entry to the scope. We then generate more | |
21 | stuff in the control-flow graph. Whenever the scope is exited, either | |
22 | via a `break` or `return` or just by fallthrough, that marks an exit | |
23 | from the scope. Each lexical scope thus corresponds to a single-entry, | |
24 | multiple-exit (SEME) region in the control-flow graph. | |
25 | ||
b039eaaf | 26 | For now, we keep a mapping from each `CodeExtent` to its |
e9174d1e SL |
27 | corresponding SEME region for later reference (see caveat in next |
28 | paragraph). This is because region scopes are tied to | |
29 | them. Eventually, when we shift to non-lexical lifetimes, three should | |
30 | be no need to remember this mapping. | |
31 | ||
32 | There is one additional wrinkle, actually, that I wanted to hide from | |
33 | you but duty compels me to mention. In the course of translating | |
34 | matches, it sometimes happen that certain code (namely guards) gets | |
35 | executed multiple times. This means that the scope lexical scope may | |
36 | in fact correspond to multiple, disjoint SEME regions. So in fact our | |
9cc50fc6 | 37 | mapping is from one scope to a vector of SEME regions. |
e9174d1e SL |
38 | |
39 | ### Drops | |
40 | ||
41 | The primary purpose for scopes is to insert drops: while translating | |
42 | the contents, we also accumulate lvalues that need to be dropped upon | |
43 | exit from each scope. This is done by calling `schedule_drop`. Once a | |
44 | drop is scheduled, whenever we branch out we will insert drops of all | |
45 | those lvalues onto the outgoing edge. Note that we don't know the full | |
46 | set of scheduled drops up front, and so whenever we exit from the | |
47 | scope we only drop the values scheduled thus far. For example, consider | |
48 | the scope S corresponding to this loop: | |
49 | ||
50 | ``` | |
51 | loop { | |
52 | let x = ...; | |
53 | if cond { break; } | |
54 | let y = ...; | |
55 | } | |
56 | ``` | |
57 | ||
58 | When processing the `let x`, we will add one drop to the scope for | |
59 | `x`. The break will then insert a drop for `x`. When we process `let | |
60 | y`, we will add another drop (in fact, to a subscope, but let's ignore | |
61 | that for now); any later drops would also drop `y`. | |
62 | ||
63 | ### Early exit | |
64 | ||
65 | There are numerous "normal" ways to early exit a scope: `break`, | |
66 | `continue`, `return` (panics are handled separately). Whenever an | |
67 | early exit occurs, the method `exit_scope` is called. It is given the | |
68 | current point in execution where the early exit occurs, as well as the | |
69 | scope you want to branch to (note that all early exits from to some | |
70 | other enclosing scope). `exit_scope` will record thid exit point and | |
71 | also add all drops. | |
72 | ||
73 | Panics are handled in a similar fashion, except that a panic always | |
74 | returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call | |
75 | `panic(p)` with the current point `p`. Or else you can call | |
76 | `diverge_cleanup`, which will produce a block that you can branch to | |
77 | which does the appropriate cleanup and then diverges. `panic(p)` | |
78 | simply calls `diverge_cleanup()` and adds an edge from `p` to the | |
79 | result. | |
80 | ||
81 | ### Loop scopes | |
82 | ||
83 | In addition to the normal scope stack, we track a loop scope stack | |
84 | that contains only loops. It tracks where a `break` and `continue` | |
85 | should go to. | |
86 | ||
87 | */ | |
88 | ||
54a0048b SL |
89 | use build::{BlockAnd, BlockAndExtension, Builder, CFG, ScopeAuxiliary}; |
90 | use rustc::middle::region::{CodeExtent, CodeExtentData}; | |
9cc50fc6 | 91 | use rustc::middle::lang_items; |
54a0048b SL |
92 | use rustc::ty::subst::{Substs, Subst, VecPerParamSpace}; |
93 | use rustc::ty::{self, Ty, TyCtxt}; | |
92a42be0 | 94 | use rustc::mir::repr::*; |
9cc50fc6 SL |
95 | use syntax::codemap::{Span, DUMMY_SP}; |
96 | use syntax::parse::token::intern_and_get_ident; | |
54a0048b SL |
97 | use rustc::middle::const_val::ConstVal; |
98 | use rustc_const_math::ConstInt; | |
e9174d1e | 99 | |
b039eaaf | 100 | pub struct Scope<'tcx> { |
54a0048b SL |
101 | /// the scope-id within the scope_datas |
102 | id: ScopeId, | |
103 | ||
104 | /// the extent of this scope within source code; also stored in | |
105 | /// `ScopeAuxiliary`, but kept here for convenience | |
b039eaaf | 106 | extent: CodeExtent, |
54a0048b SL |
107 | |
108 | /// set of lvalues to drop when exiting this scope. This starts | |
109 | /// out empty but grows as variables are declared during the | |
110 | /// building process. This is a stack, so we always drop from the | |
111 | /// end of the vector (top of the stack) first. | |
7453a54e | 112 | drops: Vec<DropData<'tcx>>, |
54a0048b SL |
113 | |
114 | /// A scope may only have one associated free, because: | |
115 | /// | |
116 | /// 1. We require a `free` to only be scheduled in the scope of | |
117 | /// `EXPR` in `box EXPR`; | |
118 | /// 2. It only makes sense to have it translated into the diverge-path. | |
119 | /// | |
120 | /// This kind of drop will be run *after* all the regular drops | |
121 | /// scheduled onto this scope, because drops may have dependencies | |
122 | /// on the allocated memory. | |
123 | /// | |
124 | /// This is expected to go away once `box EXPR` becomes a sugar | |
125 | /// for placement protocol and gets desugared in some earlier | |
126 | /// stage. | |
7453a54e | 127 | free: Option<FreeData<'tcx>>, |
54a0048b SL |
128 | |
129 | /// The cached block for the cleanups-on-diverge path. This block | |
130 | /// contains a block that will just do a RESUME to an appropriate | |
131 | /// place. This block does not execute any of the drops or free: | |
132 | /// each of those has their own cached-blocks, which will branch | |
133 | /// to this point. | |
134 | cached_block: Option<BasicBlock> | |
7453a54e SL |
135 | } |
136 | ||
137 | struct DropData<'tcx> { | |
54a0048b SL |
138 | /// span where drop obligation was incurred (typically where lvalue was declared) |
139 | span: Span, | |
140 | ||
141 | /// lvalue to drop | |
7453a54e | 142 | value: Lvalue<'tcx>, |
54a0048b SL |
143 | |
144 | /// The cached block for the cleanups-on-diverge path. This block | |
145 | /// contains code to run the current drop and all the preceding | |
146 | /// drops (i.e. those having lower index in Drop’s Scope drop | |
147 | /// array) | |
7453a54e SL |
148 | cached_block: Option<BasicBlock> |
149 | } | |
150 | ||
151 | struct FreeData<'tcx> { | |
54a0048b | 152 | /// span where free obligation was incurred |
7453a54e | 153 | span: Span, |
54a0048b | 154 | |
7453a54e SL |
155 | /// Lvalue containing the allocated box. |
156 | value: Lvalue<'tcx>, | |
54a0048b | 157 | |
7453a54e SL |
158 | /// type of item for which the box was allocated for (i.e. the T in Box<T>). |
159 | item_ty: Ty<'tcx>, | |
54a0048b | 160 | |
7453a54e SL |
161 | /// The cached block containing code to run the free. The block will also execute all the drops |
162 | /// in the scope. | |
163 | cached_block: Option<BasicBlock> | |
e9174d1e SL |
164 | } |
165 | ||
166 | #[derive(Clone, Debug)] | |
b039eaaf | 167 | pub struct LoopScope { |
7453a54e SL |
168 | /// Extent of the loop |
169 | pub extent: CodeExtent, | |
170 | /// Where the body of the loop begins | |
171 | pub continue_block: BasicBlock, | |
172 | /// Block to branch into when the loop terminates (either by being `break`-en out from, or by | |
173 | /// having its condition to become false) | |
b039eaaf | 174 | pub break_block: BasicBlock, // where to go on a `break |
7453a54e SL |
175 | /// Indicates the reachability of the break_block for this loop |
176 | pub might_break: bool | |
177 | } | |
178 | ||
179 | impl<'tcx> Scope<'tcx> { | |
180 | /// Invalidate all the cached blocks in the scope. | |
181 | /// | |
182 | /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a | |
183 | /// larger extent of code. | |
184 | fn invalidate_cache(&mut self) { | |
54a0048b | 185 | self.cached_block = None; |
7453a54e SL |
186 | for dropdata in &mut self.drops { |
187 | dropdata.cached_block = None; | |
188 | } | |
189 | if let Some(ref mut freedata) = self.free { | |
190 | freedata.cached_block = None; | |
191 | } | |
192 | } | |
193 | ||
194 | /// Returns the cached block for this scope. | |
195 | /// | |
196 | /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for | |
197 | /// this method to work correctly. | |
198 | fn cached_block(&self) -> Option<BasicBlock> { | |
199 | if let Some(data) = self.drops.last() { | |
200 | Some(data.cached_block.expect("drop cache is not filled")) | |
201 | } else if let Some(ref data) = self.free { | |
202 | Some(data.cached_block.expect("free cache is not filled")) | |
203 | } else { | |
204 | None | |
205 | } | |
206 | } | |
e9174d1e SL |
207 | } |
208 | ||
b039eaaf | 209 | impl<'a,'tcx> Builder<'a,'tcx> { |
7453a54e SL |
210 | // Adding and removing scopes |
211 | // ========================== | |
e9174d1e SL |
212 | /// Start a loop scope, which tracks where `continue` and `break` |
213 | /// should branch to. See module comment for more details. | |
7453a54e SL |
214 | /// |
215 | /// Returns the might_break attribute of the LoopScope used. | |
216 | pub fn in_loop_scope<F>(&mut self, | |
b039eaaf SL |
217 | loop_block: BasicBlock, |
218 | break_block: BasicBlock, | |
219 | f: F) | |
7453a54e SL |
220 | -> bool |
221 | where F: FnOnce(&mut Builder<'a, 'tcx>) | |
e9174d1e | 222 | { |
92a42be0 | 223 | let extent = self.extent_of_innermost_scope(); |
b039eaaf SL |
224 | let loop_scope = LoopScope { |
225 | extent: extent.clone(), | |
226 | continue_block: loop_block, | |
227 | break_block: break_block, | |
7453a54e | 228 | might_break: false |
b039eaaf | 229 | }; |
e9174d1e | 230 | self.loop_scopes.push(loop_scope); |
7453a54e SL |
231 | f(self); |
232 | let loop_scope = self.loop_scopes.pop().unwrap(); | |
233 | assert!(loop_scope.extent == extent); | |
234 | loop_scope.might_break | |
e9174d1e SL |
235 | } |
236 | ||
92a42be0 SL |
237 | /// Convenience wrapper that pushes a scope and then executes `f` |
238 | /// to build its contents, popping the scope afterwards. | |
239 | pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R> | |
54a0048b | 240 | where F: FnOnce(&mut Builder<'a, 'tcx>, ScopeId) -> BlockAnd<R> |
e9174d1e SL |
241 | { |
242 | debug!("in_scope(extent={:?}, block={:?})", extent, block); | |
54a0048b SL |
243 | let id = self.push_scope(extent, block); |
244 | let rv = unpack!(block = f(self, id)); | |
7453a54e | 245 | unpack!(block = self.pop_scope(extent, block)); |
92a42be0 SL |
246 | debug!("in_scope: exiting extent={:?} block={:?}", extent, block); |
247 | block.and(rv) | |
248 | } | |
e9174d1e | 249 | |
92a42be0 SL |
250 | /// Push a scope onto the stack. You can then build code in this |
251 | /// scope and call `pop_scope` afterwards. Note that these two | |
252 | /// calls must be paired; using `in_scope` as a convenience | |
253 | /// wrapper maybe preferable. | |
54a0048b | 254 | pub fn push_scope(&mut self, extent: CodeExtent, entry: BasicBlock) -> ScopeId { |
7453a54e | 255 | debug!("push_scope({:?})", extent); |
54a0048b SL |
256 | let parent_id = self.scopes.last().map(|s| s.id); |
257 | let id = ScopeId::new(self.scope_datas.len()); | |
258 | self.scope_datas.push(ScopeData { | |
259 | parent_scope: parent_id, | |
260 | }); | |
e9174d1e | 261 | self.scopes.push(Scope { |
54a0048b SL |
262 | id: id, |
263 | extent: extent, | |
e9174d1e | 264 | drops: vec![], |
54a0048b SL |
265 | free: None, |
266 | cached_block: None, | |
e9174d1e | 267 | }); |
54a0048b SL |
268 | self.scope_auxiliary.vec.push(ScopeAuxiliary { |
269 | extent: extent, | |
270 | dom: self.cfg.current_location(entry), | |
271 | postdoms: vec![] | |
272 | }); | |
273 | id | |
92a42be0 SL |
274 | } |
275 | ||
276 | /// Pops a scope, which should have extent `extent`, adding any | |
277 | /// drops onto the end of `block` that are needed. This must | |
278 | /// match 1-to-1 with `push_scope`. | |
54a0048b SL |
279 | pub fn pop_scope(&mut self, |
280 | extent: CodeExtent, | |
281 | mut block: BasicBlock) | |
282 | -> BlockAnd<()> { | |
92a42be0 | 283 | debug!("pop_scope({:?}, {:?})", extent, block); |
7453a54e SL |
284 | // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup |
285 | // to make sure all the `cached_block`s are filled in. | |
286 | self.diverge_cleanup(); | |
92a42be0 | 287 | let scope = self.scopes.pop().unwrap(); |
92a42be0 | 288 | assert_eq!(scope.extent, extent); |
54a0048b SL |
289 | unpack!(block = build_scope_drops(&mut self.cfg, &scope, &self.scopes, block)); |
290 | self.scope_auxiliary[scope.id] | |
291 | .postdoms | |
292 | .push(self.cfg.current_location(block)); | |
293 | block.unit() | |
e9174d1e SL |
294 | } |
295 | ||
e9174d1e | 296 | |
e9174d1e SL |
297 | /// Branch out of `block` to `target`, exiting all scopes up to |
298 | /// and including `extent`. This will insert whatever drops are | |
299 | /// needed, as well as tracking this exit for the SEME region. See | |
300 | /// module comment for details. | |
301 | pub fn exit_scope(&mut self, | |
b039eaaf SL |
302 | span: Span, |
303 | extent: CodeExtent, | |
7453a54e | 304 | mut block: BasicBlock, |
e9174d1e | 305 | target: BasicBlock) { |
54a0048b | 306 | debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target); |
7453a54e SL |
307 | let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent) |
308 | .unwrap_or_else(||{ | |
54a0048b | 309 | span_bug!(span, "extent {:?} does not enclose", extent) |
7453a54e SL |
310 | }); |
311 | ||
312 | let tmp = self.get_unit_temp(); | |
313 | for (idx, ref scope) in self.scopes.iter().enumerate().rev().take(scope_count) { | |
314 | unpack!(block = build_scope_drops(&mut self.cfg, | |
315 | scope, | |
316 | &self.scopes[..idx], | |
317 | block)); | |
318 | if let Some(ref free_data) = scope.free { | |
319 | let next = self.cfg.start_new_block(); | |
54a0048b SL |
320 | let free = build_free(self.hir.tcx(), &tmp, free_data, next); |
321 | self.cfg.terminate(block, scope.id, span, free); | |
7453a54e | 322 | block = next; |
e9174d1e | 323 | } |
54a0048b SL |
324 | self.scope_auxiliary[scope.id] |
325 | .postdoms | |
326 | .push(self.cfg.current_location(block)); | |
e9174d1e | 327 | } |
54a0048b SL |
328 | |
329 | assert!(scope_count < self.scopes.len(), | |
330 | "should never use `exit_scope` to pop *ALL* scopes"); | |
331 | let scope = self.scopes.iter().rev().skip(scope_count) | |
332 | .next() | |
333 | .unwrap(); | |
334 | self.cfg.terminate(block, | |
335 | scope.id, | |
336 | span, | |
337 | TerminatorKind::Goto { target: target }); | |
e9174d1e SL |
338 | } |
339 | ||
7453a54e SL |
340 | // Finding scopes |
341 | // ============== | |
342 | /// Finds the loop scope for a given label. This is used for | |
343 | /// resolving `break` and `continue`. | |
344 | pub fn find_loop_scope(&mut self, | |
345 | span: Span, | |
346 | label: Option<CodeExtent>) | |
347 | -> &mut LoopScope { | |
54a0048b | 348 | let loop_scopes = &mut self.loop_scopes; |
7453a54e SL |
349 | match label { |
350 | None => { | |
351 | // no label? return the innermost loop scope | |
352 | loop_scopes.iter_mut().rev().next() | |
9cc50fc6 | 353 | } |
7453a54e SL |
354 | Some(label) => { |
355 | // otherwise, find the loop-scope with the correct id | |
356 | loop_scopes.iter_mut() | |
357 | .rev() | |
358 | .filter(|loop_scope| loop_scope.extent == label) | |
359 | .next() | |
360 | } | |
54a0048b SL |
361 | }.unwrap_or_else(|| span_bug!(span, "no enclosing loop scope found?")) |
362 | } | |
363 | ||
364 | pub fn innermost_scope_id(&self) -> ScopeId { | |
365 | self.scopes.last().map(|scope| scope.id).unwrap() | |
e9174d1e SL |
366 | } |
367 | ||
7453a54e SL |
368 | pub fn extent_of_innermost_scope(&self) -> CodeExtent { |
369 | self.scopes.last().map(|scope| scope.extent).unwrap() | |
370 | } | |
371 | ||
54a0048b SL |
372 | /// Returns the extent of the scope which should be exited by a |
373 | /// return. | |
374 | pub fn extent_of_return_scope(&self) -> CodeExtent { | |
375 | // The outermost scope (`scopes[0]`) will be the `CallSiteScope`. | |
376 | // We want `scopes[1]`, which is the `ParameterScope`. | |
377 | assert!(self.scopes.len() >= 2); | |
378 | assert!(match self.hir.tcx().region_maps.code_extent_data(self.scopes[1].extent) { | |
379 | CodeExtentData::ParameterScope { .. } => true, | |
380 | _ => false, | |
381 | }); | |
382 | self.scopes[1].extent | |
7453a54e SL |
383 | } |
384 | ||
385 | // Scheduling drops | |
386 | // ================ | |
e9174d1e SL |
387 | /// Indicates that `lvalue` should be dropped on exit from |
388 | /// `extent`. | |
389 | pub fn schedule_drop(&mut self, | |
b039eaaf SL |
390 | span: Span, |
391 | extent: CodeExtent, | |
b039eaaf SL |
392 | lvalue: &Lvalue<'tcx>, |
393 | lvalue_ty: Ty<'tcx>) { | |
7453a54e SL |
394 | if !self.hir.needs_drop(lvalue_ty) { |
395 | return | |
396 | } | |
397 | for scope in self.scopes.iter_mut().rev() { | |
398 | if scope.extent == extent { | |
399 | // No need to invalidate any caches here. The just-scheduled drop will branch into | |
400 | // the drop that comes before it in the vector. | |
401 | scope.drops.push(DropData { | |
54a0048b | 402 | span: span, |
7453a54e SL |
403 | value: lvalue.clone(), |
404 | cached_block: None | |
405 | }); | |
406 | return; | |
407 | } else { | |
408 | // We must invalidate all the cached_blocks leading up to the scope we’re | |
409 | // looking for, because all of the blocks in the chain will become incorrect. | |
410 | scope.invalidate_cache() | |
411 | } | |
412 | } | |
54a0048b | 413 | span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue); |
7453a54e SL |
414 | } |
415 | ||
416 | /// Schedule dropping of a not-yet-fully-initialised box. | |
417 | /// | |
418 | /// This cleanup will only be translated into unwind branch. | |
419 | /// The extent should be for the `EXPR` inside `box EXPR`. | |
420 | /// There may only be one “free” scheduled in any given scope. | |
421 | pub fn schedule_box_free(&mut self, | |
422 | span: Span, | |
423 | extent: CodeExtent, | |
424 | value: &Lvalue<'tcx>, | |
425 | item_ty: Ty<'tcx>) { | |
426 | for scope in self.scopes.iter_mut().rev() { | |
427 | if scope.extent == extent { | |
428 | assert!(scope.free.is_none(), "scope already has a scheduled free!"); | |
429 | // We also must invalidate the caches in the scope for which the free is scheduled | |
430 | // because the drops must branch into the free we schedule here. | |
431 | scope.invalidate_cache(); | |
432 | scope.free = Some(FreeData { | |
433 | span: span, | |
434 | value: value.clone(), | |
435 | item_ty: item_ty, | |
436 | cached_block: None | |
437 | }); | |
438 | return; | |
439 | } else { | |
9cc50fc6 | 440 | // We must invalidate all the cached_blocks leading up to the scope we’re looking |
7453a54e SL |
441 | // for, because otherwise some/most of the blocks in the chain will become |
442 | // incorrect. | |
443 | scope.invalidate_cache(); | |
e9174d1e SL |
444 | } |
445 | } | |
54a0048b | 446 | span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value); |
e9174d1e SL |
447 | } |
448 | ||
7453a54e SL |
449 | // Other |
450 | // ===== | |
451 | /// Creates a path that performs all required cleanup for unwinding. | |
452 | /// | |
453 | /// This path terminates in Resume. Returns the start of the path. | |
454 | /// See module comment for more details. None indicates there’s no | |
455 | /// cleanup to do at this point. | |
456 | pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> { | |
54a0048b | 457 | if self.scopes.iter().all(|scope| scope.drops.is_empty() && scope.free.is_none()) { |
7453a54e SL |
458 | return None; |
459 | } | |
54a0048b SL |
460 | assert!(!self.scopes.is_empty()); // or `all` above would be true |
461 | ||
7453a54e | 462 | let unit_temp = self.get_unit_temp(); |
54a0048b SL |
463 | let Builder { ref mut hir, ref mut cfg, ref mut scopes, |
464 | ref mut cached_resume_block, .. } = *self; | |
465 | ||
466 | // Build up the drops in **reverse** order. The end result will | |
467 | // look like: | |
468 | // | |
469 | // scopes[n] -> scopes[n-1] -> ... -> scopes[0] | |
470 | // | |
471 | // However, we build this in **reverse order**. That is, we | |
472 | // process scopes[0], then scopes[1], etc, pointing each one at | |
473 | // the result generates from the one before. Along the way, we | |
474 | // store caches. If everything is cached, we'll just walk right | |
475 | // to left reading the cached results but never created anything. | |
476 | ||
477 | // To start, create the resume terminator. | |
478 | let mut target = if let Some(target) = *cached_resume_block { | |
479 | target | |
480 | } else { | |
481 | let resumeblk = cfg.start_new_cleanup_block(); | |
482 | cfg.terminate(resumeblk, scopes[0].id, self.fn_span, TerminatorKind::Resume); | |
483 | *cached_resume_block = Some(resumeblk); | |
484 | resumeblk | |
485 | }; | |
486 | ||
487 | for scope in scopes { | |
488 | target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target); | |
7453a54e | 489 | } |
54a0048b SL |
490 | |
491 | Some(target) | |
e9174d1e SL |
492 | } |
493 | ||
7453a54e | 494 | /// Utility function for *non*-scope code to build their own drops |
54a0048b SL |
495 | pub fn build_drop(&mut self, |
496 | block: BasicBlock, | |
497 | span: Span, | |
498 | value: Lvalue<'tcx>) | |
499 | -> BlockAnd<()> { | |
500 | let scope_id = self.innermost_scope_id(); | |
7453a54e SL |
501 | let next_target = self.cfg.start_new_block(); |
502 | let diverge_target = self.diverge_cleanup(); | |
54a0048b SL |
503 | self.cfg.terminate(block, |
504 | scope_id, | |
505 | span, | |
506 | TerminatorKind::Drop { | |
507 | value: value, | |
508 | target: next_target, | |
509 | unwind: diverge_target, | |
510 | }); | |
7453a54e | 511 | next_target.unit() |
e9174d1e | 512 | } |
e9174d1e | 513 | |
7453a54e SL |
514 | |
515 | // Panicking | |
516 | // ========= | |
517 | // FIXME: should be moved into their own module | |
9cc50fc6 | 518 | pub fn panic_bounds_check(&mut self, |
54a0048b SL |
519 | block: BasicBlock, |
520 | index: Operand<'tcx>, | |
521 | len: Operand<'tcx>, | |
522 | span: Span) { | |
9cc50fc6 | 523 | // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> ! |
7453a54e | 524 | let region = ty::ReStatic; // FIXME(mir-borrowck): use a better region? |
9cc50fc6 | 525 | let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem); |
7453a54e SL |
526 | let args = self.hir.tcx().replace_late_bound_regions(&func.ty.fn_args(), |_| region).0; |
527 | ||
528 | let ref_ty = args[0]; | |
529 | let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty { | |
530 | tyandmut.ty | |
9cc50fc6 | 531 | } else { |
54a0048b | 532 | span_bug!(span, "unexpected panic_bound_check type: {:?}", func.ty); |
9cc50fc6 | 533 | }; |
7453a54e | 534 | |
9cc50fc6 SL |
535 | let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); |
536 | let (file, line) = self.span_to_fileline_args(span); | |
537 | let elems = vec![Operand::Constant(file), Operand::Constant(line)]; | |
54a0048b | 538 | let scope_id = self.innermost_scope_id(); |
9cc50fc6 SL |
539 | // FIXME: We should have this as a constant, rather than a stack variable (to not pollute |
540 | // icache with cold branch code), however to achieve that we either have to rely on rvalue | |
541 | // promotion or have some way, in MIR, to create constants. | |
54a0048b | 542 | self.cfg.push_assign(block, scope_id, span, &tuple, // tuple = (file_arg, line_arg); |
9cc50fc6 SL |
543 | Rvalue::Aggregate(AggregateKind::Tuple, elems)); |
544 | // FIXME: is this region really correct here? | |
54a0048b | 545 | self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple; |
7453a54e | 546 | Rvalue::Ref(region, BorrowKind::Shared, tuple)); |
9cc50fc6 | 547 | let cleanup = self.diverge_cleanup(); |
54a0048b | 548 | self.cfg.terminate(block, scope_id, span, TerminatorKind::Call { |
9cc50fc6 SL |
549 | func: Operand::Constant(func), |
550 | args: vec![Operand::Consume(tuple_ref), index, len], | |
7453a54e SL |
551 | destination: None, |
552 | cleanup: cleanup, | |
9cc50fc6 | 553 | }); |
e9174d1e SL |
554 | } |
555 | ||
9cc50fc6 SL |
556 | /// Create diverge cleanup and branch to it from `block`. |
557 | pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) { | |
558 | // fn(&(msg: &'static str filename: &'static str, line: u32)) -> ! | |
7453a54e | 559 | let region = ty::ReStatic; // FIXME(mir-borrowck): use a better region? |
9cc50fc6 | 560 | let func = self.lang_function(lang_items::PanicFnLangItem); |
7453a54e SL |
561 | let args = self.hir.tcx().replace_late_bound_regions(&func.ty.fn_args(), |_| region).0; |
562 | ||
563 | let ref_ty = args[0]; | |
564 | let tup_ty = if let ty::TyRef(_, tyandmut) = ref_ty.sty { | |
565 | tyandmut.ty | |
9cc50fc6 | 566 | } else { |
54a0048b | 567 | span_bug!(span, "unexpected panic type: {:?}", func.ty); |
9cc50fc6 | 568 | }; |
7453a54e | 569 | |
9cc50fc6 SL |
570 | let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); |
571 | let (file, line) = self.span_to_fileline_args(span); | |
572 | let message = Constant { | |
7453a54e | 573 | span: span, |
9cc50fc6 SL |
574 | ty: self.hir.tcx().mk_static_str(), |
575 | literal: self.hir.str_literal(intern_and_get_ident(msg)) | |
576 | }; | |
577 | let elems = vec![Operand::Constant(message), | |
578 | Operand::Constant(file), | |
579 | Operand::Constant(line)]; | |
54a0048b | 580 | let scope_id = self.innermost_scope_id(); |
9cc50fc6 SL |
581 | // FIXME: We should have this as a constant, rather than a stack variable (to not pollute |
582 | // icache with cold branch code), however to achieve that we either have to rely on rvalue | |
583 | // promotion or have some way, in MIR, to create constants. | |
54a0048b | 584 | self.cfg.push_assign(block, scope_id, span, &tuple, // [1] |
9cc50fc6 | 585 | Rvalue::Aggregate(AggregateKind::Tuple, elems)); |
54a0048b | 586 | // [1] tuple = (message_arg, file_arg, line_arg); |
9cc50fc6 | 587 | // FIXME: is this region really correct here? |
54a0048b | 588 | self.cfg.push_assign(block, scope_id, span, &tuple_ref, // tuple_ref = &tuple; |
7453a54e | 589 | Rvalue::Ref(region, BorrowKind::Shared, tuple)); |
9cc50fc6 | 590 | let cleanup = self.diverge_cleanup(); |
54a0048b | 591 | self.cfg.terminate(block, scope_id, span, TerminatorKind::Call { |
9cc50fc6 SL |
592 | func: Operand::Constant(func), |
593 | args: vec![Operand::Consume(tuple_ref)], | |
7453a54e SL |
594 | cleanup: cleanup, |
595 | destination: None, | |
9cc50fc6 | 596 | }); |
e9174d1e SL |
597 | } |
598 | ||
9cc50fc6 SL |
599 | fn lang_function(&mut self, lang_item: lang_items::LangItem) -> Constant<'tcx> { |
600 | let funcdid = match self.hir.tcx().lang_items.require(lang_item) { | |
601 | Ok(d) => d, | |
602 | Err(m) => { | |
7453a54e | 603 | self.hir.tcx().sess.fatal(&m) |
9cc50fc6 SL |
604 | } |
605 | }; | |
606 | Constant { | |
607 | span: DUMMY_SP, | |
608 | ty: self.hir.tcx().lookup_item_type(funcdid).ty, | |
609 | literal: Literal::Item { | |
610 | def_id: funcdid, | |
9cc50fc6 SL |
611 | substs: self.hir.tcx().mk_substs(Substs::empty()) |
612 | } | |
613 | } | |
e9174d1e | 614 | } |
e9174d1e | 615 | |
9cc50fc6 SL |
616 | fn span_to_fileline_args(&mut self, span: Span) -> (Constant<'tcx>, Constant<'tcx>) { |
617 | let span_lines = self.hir.tcx().sess.codemap().lookup_char_pos(span.lo); | |
618 | (Constant { | |
7453a54e | 619 | span: span, |
9cc50fc6 SL |
620 | ty: self.hir.tcx().mk_static_str(), |
621 | literal: self.hir.str_literal(intern_and_get_ident(&span_lines.file.name)) | |
622 | }, Constant { | |
7453a54e | 623 | span: span, |
9cc50fc6 | 624 | ty: self.hir.tcx().types.u32, |
54a0048b SL |
625 | literal: Literal::Value { |
626 | value: ConstVal::Integral(ConstInt::U32(span_lines.line as u32)), | |
627 | }, | |
9cc50fc6 SL |
628 | }) |
629 | } | |
7453a54e SL |
630 | |
631 | } | |
632 | ||
633 | /// Builds drops for pop_scope and exit_scope. | |
634 | fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, | |
635 | scope: &Scope<'tcx>, | |
636 | earlier_scopes: &[Scope<'tcx>], | |
637 | mut block: BasicBlock) | |
638 | -> BlockAnd<()> { | |
639 | let mut iter = scope.drops.iter().rev().peekable(); | |
640 | while let Some(drop_data) = iter.next() { | |
641 | // Try to find the next block with its cached block for us to diverge into in case the | |
642 | // drop panics. | |
643 | let on_diverge = iter.peek().iter().flat_map(|dd| dd.cached_block.into_iter()).next(); | |
644 | // If there’s no `cached_block`s within current scope, we must look for one in the | |
645 | // enclosing scope. | |
646 | let on_diverge = on_diverge.or_else(||{ | |
647 | earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() | |
648 | }); | |
649 | let next = cfg.start_new_block(); | |
54a0048b | 650 | cfg.terminate(block, scope.id, drop_data.span, TerminatorKind::Drop { |
7453a54e SL |
651 | value: drop_data.value.clone(), |
652 | target: next, | |
653 | unwind: on_diverge | |
654 | }); | |
655 | block = next; | |
656 | } | |
657 | block.unit() | |
658 | } | |
659 | ||
54a0048b | 660 | fn build_diverge_scope<'tcx>(tcx: &TyCtxt<'tcx>, |
7453a54e | 661 | cfg: &mut CFG<'tcx>, |
54a0048b | 662 | unit_temp: &Lvalue<'tcx>, |
7453a54e | 663 | scope: &mut Scope<'tcx>, |
54a0048b SL |
664 | mut target: BasicBlock) |
665 | -> BasicBlock | |
666 | { | |
667 | // Build up the drops in **reverse** order. The end result will | |
668 | // look like: | |
669 | // | |
670 | // [drops[n]] -...-> [drops[0]] -> [Free] -> [target] | |
671 | // | | | |
672 | // +------------------------------------+ | |
673 | // code for scope | |
674 | // | |
675 | // The code in this function reads from right to left. At each | |
676 | // point, we check for cached blocks representing the | |
677 | // remainder. If everything is cached, we'll just walk right to | |
678 | // left reading the cached results but never created anything. | |
7453a54e | 679 | |
54a0048b | 680 | // Next, build up any free. |
7453a54e SL |
681 | if let Some(ref mut free_data) = scope.free { |
682 | target = if let Some(cached_block) = free_data.cached_block { | |
683 | cached_block | |
684 | } else { | |
685 | let into = cfg.start_new_cleanup_block(); | |
54a0048b SL |
686 | cfg.terminate(into, |
687 | scope.id, | |
688 | free_data.span, | |
689 | build_free(tcx, unit_temp, free_data, target)); | |
7453a54e SL |
690 | free_data.cached_block = Some(into); |
691 | into | |
54a0048b SL |
692 | }; |
693 | } | |
694 | ||
695 | // Next, build up the drops. Here we iterate the vector in | |
696 | // *forward* order, so that we generate drops[0] first (right to | |
697 | // left in diagram above). | |
698 | for drop_data in &mut scope.drops { | |
699 | target = if let Some(cached_block) = drop_data.cached_block { | |
700 | cached_block | |
701 | } else { | |
702 | let block = cfg.start_new_cleanup_block(); | |
703 | cfg.terminate(block, | |
704 | scope.id, | |
705 | drop_data.span, | |
706 | TerminatorKind::Drop { | |
707 | value: drop_data.value.clone(), | |
708 | target: target, | |
709 | unwind: None | |
710 | }); | |
711 | drop_data.cached_block = Some(block); | |
712 | block | |
713 | }; | |
7453a54e | 714 | } |
54a0048b SL |
715 | |
716 | target | |
7453a54e SL |
717 | } |
718 | ||
54a0048b SL |
719 | fn build_free<'tcx>(tcx: &TyCtxt<'tcx>, |
720 | unit_temp: &Lvalue<'tcx>, | |
7453a54e | 721 | data: &FreeData<'tcx>, |
54a0048b SL |
722 | target: BasicBlock) |
723 | -> TerminatorKind<'tcx> { | |
7453a54e SL |
724 | let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem) |
725 | .unwrap_or_else(|e| tcx.sess.fatal(&e)); | |
726 | let substs = tcx.mk_substs(Substs::new( | |
727 | VecPerParamSpace::new(vec![], vec![], vec![data.item_ty]), | |
728 | VecPerParamSpace::new(vec![], vec![], vec![]) | |
729 | )); | |
54a0048b | 730 | TerminatorKind::Call { |
7453a54e SL |
731 | func: Operand::Constant(Constant { |
732 | span: data.span, | |
733 | ty: tcx.lookup_item_type(free_func).ty.subst(tcx, substs), | |
734 | literal: Literal::Item { | |
735 | def_id: free_func, | |
7453a54e SL |
736 | substs: substs |
737 | } | |
738 | }), | |
739 | args: vec![Operand::Consume(data.value.clone())], | |
54a0048b | 740 | destination: Some((unit_temp.clone(), target)), |
7453a54e SL |
741 | cleanup: None |
742 | } | |
e9174d1e | 743 | } |