]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
85aaf69f SL |
11 | //! ## The Cleanup module |
12 | //! | |
13 | //! The cleanup module tracks what values need to be cleaned up as scopes | |
14 | //! are exited, either via panic or just normal control flow. The basic | |
15 | //! idea is that the function context maintains a stack of cleanup scopes | |
16 | //! that are pushed/popped as we traverse the AST tree. There is typically | |
17 | //! at least one cleanup scope per AST node; some AST nodes may introduce | |
18 | //! additional temporary scopes. | |
19 | //! | |
20 | //! Cleanup items can be scheduled into any of the scopes on the stack. | |
21 | //! Typically, when a scope is popped, we will also generate the code for | |
22 | //! each of its cleanups at that time. This corresponds to a normal exit | |
23 | //! from a block (for example, an expression completing evaluation | |
24 | //! successfully without panic). However, it is also possible to pop a | |
25 | //! block *without* executing its cleanups; this is typically used to | |
26 | //! guard intermediate values that must be cleaned up on panic, but not | |
27 | //! if everything goes right. See the section on custom scopes below for | |
28 | //! more details. | |
29 | //! | |
30 | //! Cleanup scopes come in three kinds: | |
31 | //! | |
32 | //! - **AST scopes:** each AST node in a function body has a corresponding | |
33 | //! AST scope. We push the AST scope when we start generate code for an AST | |
34 | //! node and pop it once the AST node has been fully generated. | |
35 | //! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are | |
36 | //! never scheduled into loop scopes; instead, they are used to record the | |
37 | //! basic blocks that we should branch to when a `continue` or `break` statement | |
38 | //! is encountered. | |
39 | //! - **Custom scopes:** custom scopes are typically used to ensure cleanup | |
40 | //! of intermediate values. | |
41 | //! | |
42 | //! ### When to schedule cleanup | |
43 | //! | |
44 | //! Although the cleanup system is intended to *feel* fairly declarative, | |
45 | //! it's still important to time calls to `schedule_clean()` correctly. | |
46 | //! Basically, you should not schedule cleanup for memory until it has | |
47 | //! been initialized, because if an unwind should occur before the memory | |
48 | //! is fully initialized, then the cleanup will run and try to free or | |
49 | //! drop uninitialized memory. If the initialization itself produces | |
50 | //! byproducts that need to be freed, then you should use temporary custom | |
51 | //! scopes to ensure that those byproducts will get freed on unwind. For | |
52 | //! example, an expression like `box foo()` will first allocate a box in the | |
53 | //! heap and then call `foo()` -- if `foo()` should panic, this box needs | |
54 | //! to be *shallowly* freed. | |
55 | //! | |
56 | //! ### Long-distance jumps | |
57 | //! | |
58 | //! In addition to popping a scope, which corresponds to normal control | |
59 | //! flow exiting the scope, we may also *jump out* of a scope into some | |
60 | //! earlier scope on the stack. This can occur in response to a `return`, | |
61 | //! `break`, or `continue` statement, but also in response to panic. In | |
62 | //! any of these cases, we will generate a series of cleanup blocks for | |
63 | //! each of the scopes that is exited. So, if the stack contains scopes A | |
64 | //! ... Z, and we break out of a loop whose corresponding cleanup scope is | |
65 | //! X, we would generate cleanup blocks for the cleanups in X, Y, and Z. | |
66 | //! After cleanup is done we would branch to the exit point for scope X. | |
67 | //! But if panic should occur, we would generate cleanups for all the | |
68 | //! scopes from A to Z and then resume the unwind process afterwards. | |
69 | //! | |
70 | //! To avoid generating tons of code, we cache the cleanup blocks that we | |
71 | //! create for breaks, returns, unwinds, and other jumps. Whenever a new | |
72 | //! cleanup is scheduled, though, we must clear these cached blocks. A | |
73 | //! possible improvement would be to keep the cached blocks but simply | |
74 | //! generate a new block which performs the additional cleanup and then | |
75 | //! branches to the existing cached blocks. | |
76 | //! | |
77 | //! ### AST and loop cleanup scopes | |
78 | //! | |
79 | //! AST cleanup scopes are pushed when we begin and end processing an AST | |
80 | //! node. They are used to house cleanups related to rvalue temporary that | |
81 | //! get referenced (e.g., due to an expression like `&Foo()`). Whenever an | |
82 | //! AST scope is popped, we always trans all the cleanups, adding the cleanup | |
83 | //! code after the postdominator of the AST node. | |
84 | //! | |
85 | //! AST nodes that represent breakable loops also push a loop scope; the | |
86 | //! loop scope never has any actual cleanups, it's just used to point to | |
87 | //! the basic blocks where control should flow after a "continue" or | |
88 | //! "break" statement. Popping a loop scope never generates code. | |
89 | //! | |
90 | //! ### Custom cleanup scopes | |
91 | //! | |
92 | //! Custom cleanup scopes are used for a variety of purposes. The most | |
93 | //! common though is to handle temporary byproducts, where cleanup only | |
94 | //! needs to occur on panic. The general strategy is to push a custom | |
95 | //! cleanup scope, schedule *shallow* cleanups into the custom scope, and | |
96 | //! then pop the custom scope (without transing the cleanups) when | |
97 | //! execution succeeds normally. This way the cleanups are only trans'd on | |
98 | //! unwind, and only up until the point where execution succeeded, at | |
99 | //! which time the complete value should be stored in an lvalue or some | |
100 | //! other place where normal cleanup applies. | |
101 | //! | |
102 | //! To spell it out, here is an example. Imagine an expression `box expr`. | |
103 | //! We would basically: | |
104 | //! | |
105 | //! 1. Push a custom cleanup scope C. | |
106 | //! 2. Allocate the box. | |
107 | //! 3. Schedule a shallow free in the scope C. | |
108 | //! 4. Trans `expr` into the box. | |
109 | //! 5. Pop the scope C. | |
110 | //! 6. Return the box as an rvalue. | |
111 | //! | |
112 | //! This way, if a panic occurs while transing `expr`, the custom | |
113 | //! cleanup scope C is pushed and hence the box will be freed. The trans | |
114 | //! code for `expr` itself is responsible for freeing any other byproducts | |
115 | //! that may be in play. | |
1a4d82fc JJ |
116 | |
117 | pub use self::ScopeId::*; | |
118 | pub use self::CleanupScopeKind::*; | |
119 | pub use self::EarlyExitLabel::*; | |
120 | pub use self::Heap::*; | |
121 | ||
122 | use llvm::{BasicBlockRef, ValueRef}; | |
123 | use trans::base; | |
124 | use trans::build; | |
125 | use trans::callee; | |
126 | use trans::common; | |
85aaf69f SL |
127 | use trans::common::{Block, FunctionContext, ExprId, NodeIdAndSpan}; |
128 | use trans::debuginfo::{DebugLoc, ToDebugLoc}; | |
9346a6ac | 129 | use trans::declare; |
1a4d82fc JJ |
130 | use trans::glue; |
131 | use middle::region; | |
132 | use trans::type_::Type; | |
133 | use middle::ty::{self, Ty}; | |
134 | use std::fmt; | |
135 | use syntax::ast; | |
1a4d82fc JJ |
136 | |
137 | pub struct CleanupScope<'blk, 'tcx: 'blk> { | |
138 | // The id of this cleanup scope. If the id is None, | |
139 | // this is a *temporary scope* that is pushed during trans to | |
140 | // cleanup miscellaneous garbage that trans may generate whose | |
141 | // lifetime is a subset of some expression. See module doc for | |
142 | // more details. | |
143 | kind: CleanupScopeKind<'blk, 'tcx>, | |
144 | ||
145 | // Cleanups to run upon scope exit. | |
146 | cleanups: Vec<CleanupObj<'tcx>>, | |
147 | ||
148 | // The debug location any drop calls generated for this scope will be | |
149 | // associated with. | |
85aaf69f | 150 | debug_loc: DebugLoc, |
1a4d82fc JJ |
151 | |
152 | cached_early_exits: Vec<CachedEarlyExit>, | |
153 | cached_landing_pad: Option<BasicBlockRef>, | |
154 | } | |
155 | ||
c34b1796 | 156 | #[derive(Copy, Clone, Debug)] |
1a4d82fc | 157 | pub struct CustomScopeIndex { |
c34b1796 | 158 | index: usize |
1a4d82fc JJ |
159 | } |
160 | ||
c34b1796 AL |
161 | pub const EXIT_BREAK: usize = 0; |
162 | pub const EXIT_LOOP: usize = 1; | |
163 | pub const EXIT_MAX: usize = 2; | |
1a4d82fc JJ |
164 | |
165 | pub enum CleanupScopeKind<'blk, 'tcx: 'blk> { | |
166 | CustomScopeKind, | |
167 | AstScopeKind(ast::NodeId), | |
168 | LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX]) | |
169 | } | |
170 | ||
85aaf69f | 171 | impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> { |
1a4d82fc JJ |
172 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
173 | match *self { | |
174 | CustomScopeKind => write!(f, "CustomScopeKind"), | |
175 | AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid), | |
176 | LoopScopeKind(nid, ref blks) => { | |
177 | try!(write!(f, "LoopScopeKind({}, [", nid)); | |
85aaf69f | 178 | for blk in blks { |
1a4d82fc JJ |
179 | try!(write!(f, "{:p}, ", blk)); |
180 | } | |
181 | write!(f, "])") | |
182 | } | |
183 | } | |
184 | } | |
185 | } | |
186 | ||
c34b1796 | 187 | #[derive(Copy, Clone, PartialEq, Debug)] |
1a4d82fc JJ |
188 | pub enum EarlyExitLabel { |
189 | UnwindExit, | |
190 | ReturnExit, | |
c34b1796 | 191 | LoopExit(ast::NodeId, usize) |
1a4d82fc JJ |
192 | } |
193 | ||
c34b1796 | 194 | #[derive(Copy, Clone)] |
1a4d82fc JJ |
195 | pub struct CachedEarlyExit { |
196 | label: EarlyExitLabel, | |
197 | cleanup_block: BasicBlockRef, | |
198 | } | |
199 | ||
200 | pub trait Cleanup<'tcx> { | |
201 | fn must_unwind(&self) -> bool; | |
202 | fn clean_on_unwind(&self) -> bool; | |
203 | fn is_lifetime_end(&self) -> bool; | |
204 | fn trans<'blk>(&self, | |
205 | bcx: Block<'blk, 'tcx>, | |
85aaf69f | 206 | debug_loc: DebugLoc) |
1a4d82fc JJ |
207 | -> Block<'blk, 'tcx>; |
208 | } | |
209 | ||
210 | pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>; | |
211 | ||
c34b1796 | 212 | #[derive(Copy, Clone, Debug)] |
1a4d82fc JJ |
213 | pub enum ScopeId { |
214 | AstScope(ast::NodeId), | |
215 | CustomScope(CustomScopeIndex) | |
216 | } | |
217 | ||
218 | impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { | |
219 | /// Invoked when we start to trans the code contained within a new cleanup scope. | |
85aaf69f | 220 | fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) { |
1a4d82fc JJ |
221 | debug!("push_ast_cleanup_scope({})", |
222 | self.ccx.tcx().map.node_to_string(debug_loc.id)); | |
223 | ||
224 | // FIXME(#2202) -- currently closure bodies have a parent | |
225 | // region, which messes up the assertion below, since there | |
226 | // are no cleanup scopes on the stack at the start of | |
227 | // trans'ing a closure body. I think though that this should | |
228 | // eventually be fixed by closure bodies not having a parent | |
229 | // region, though that's a touch unclear, and it might also be | |
230 | // better just to narrow this assertion more (i.e., by | |
231 | // excluding id's that correspond to closure bodies only). For | |
232 | // now we just say that if there is already an AST scope on the stack, | |
233 | // this new AST scope had better be its immediate child. | |
234 | let top_scope = self.top_ast_scope(); | |
235 | if top_scope.is_some() { | |
85aaf69f SL |
236 | assert!((self.ccx |
237 | .tcx() | |
238 | .region_maps | |
239 | .opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id)) | |
240 | .map(|s|s.node_id()) == top_scope) | |
241 | || | |
242 | (self.ccx | |
243 | .tcx() | |
244 | .region_maps | |
245 | .opt_encl_scope(region::CodeExtent::DestructionScope(debug_loc.id)) | |
246 | .map(|s|s.node_id()) == top_scope)); | |
1a4d82fc JJ |
247 | } |
248 | ||
249 | self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id), | |
85aaf69f | 250 | debug_loc.debug_loc())); |
1a4d82fc JJ |
251 | } |
252 | ||
253 | fn push_loop_cleanup_scope(&self, | |
254 | id: ast::NodeId, | |
255 | exits: [Block<'blk, 'tcx>; EXIT_MAX]) { | |
256 | debug!("push_loop_cleanup_scope({})", | |
257 | self.ccx.tcx().map.node_to_string(id)); | |
258 | assert_eq!(Some(id), self.top_ast_scope()); | |
259 | ||
260 | // Just copy the debuginfo source location from the enclosing scope | |
261 | let debug_loc = self.scopes | |
262 | .borrow() | |
263 | .last() | |
264 | .unwrap() | |
265 | .debug_loc; | |
266 | ||
267 | self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc)); | |
268 | } | |
269 | ||
270 | fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { | |
271 | let index = self.scopes_len(); | |
272 | debug!("push_custom_cleanup_scope(): {}", index); | |
273 | ||
274 | // Just copy the debuginfo source location from the enclosing scope | |
275 | let debug_loc = self.scopes | |
276 | .borrow() | |
277 | .last() | |
278 | .map(|opt_scope| opt_scope.debug_loc) | |
85aaf69f | 279 | .unwrap_or(DebugLoc::None); |
1a4d82fc JJ |
280 | |
281 | self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc)); | |
282 | CustomScopeIndex { index: index } | |
283 | } | |
284 | ||
285 | fn push_custom_cleanup_scope_with_debug_loc(&self, | |
85aaf69f | 286 | debug_loc: NodeIdAndSpan) |
1a4d82fc JJ |
287 | -> CustomScopeIndex { |
288 | let index = self.scopes_len(); | |
289 | debug!("push_custom_cleanup_scope(): {}", index); | |
290 | ||
85aaf69f SL |
291 | self.push_scope(CleanupScope::new(CustomScopeKind, |
292 | debug_loc.debug_loc())); | |
1a4d82fc JJ |
293 | CustomScopeIndex { index: index } |
294 | } | |
295 | ||
296 | /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup | |
297 | /// stack, and generates the code to do its cleanups for normal exit. | |
298 | fn pop_and_trans_ast_cleanup_scope(&self, | |
299 | bcx: Block<'blk, 'tcx>, | |
300 | cleanup_scope: ast::NodeId) | |
301 | -> Block<'blk, 'tcx> { | |
302 | debug!("pop_and_trans_ast_cleanup_scope({})", | |
303 | self.ccx.tcx().map.node_to_string(cleanup_scope)); | |
304 | ||
305 | assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope))); | |
306 | ||
307 | let scope = self.pop_scope(); | |
308 | self.trans_scope_cleanups(bcx, &scope) | |
309 | } | |
310 | ||
311 | /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the | |
312 | /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by | |
313 | /// branching to a block generated by `normal_exit_block`. | |
314 | fn pop_loop_cleanup_scope(&self, | |
315 | cleanup_scope: ast::NodeId) { | |
316 | debug!("pop_loop_cleanup_scope({})", | |
317 | self.ccx.tcx().map.node_to_string(cleanup_scope)); | |
318 | ||
319 | assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope))); | |
320 | ||
321 | let _ = self.pop_scope(); | |
322 | } | |
323 | ||
324 | /// Removes the top cleanup scope from the stack without executing its cleanups. The top | |
325 | /// cleanup scope must be the temporary scope `custom_scope`. | |
326 | fn pop_custom_cleanup_scope(&self, | |
327 | custom_scope: CustomScopeIndex) { | |
328 | debug!("pop_custom_cleanup_scope({})", custom_scope.index); | |
329 | assert!(self.is_valid_to_pop_custom_scope(custom_scope)); | |
330 | let _ = self.pop_scope(); | |
331 | } | |
332 | ||
333 | /// Removes the top cleanup scope from the stack, which must be a temporary scope, and | |
334 | /// generates the code to do its cleanups for normal exit. | |
335 | fn pop_and_trans_custom_cleanup_scope(&self, | |
336 | bcx: Block<'blk, 'tcx>, | |
337 | custom_scope: CustomScopeIndex) | |
338 | -> Block<'blk, 'tcx> { | |
339 | debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); | |
340 | assert!(self.is_valid_to_pop_custom_scope(custom_scope)); | |
341 | ||
342 | let scope = self.pop_scope(); | |
343 | self.trans_scope_cleanups(bcx, &scope) | |
344 | } | |
345 | ||
346 | /// Returns the id of the top-most loop scope | |
347 | fn top_loop_scope(&self) -> ast::NodeId { | |
348 | for scope in self.scopes.borrow().iter().rev() { | |
349 | if let LoopScopeKind(id, _) = scope.kind { | |
350 | return id; | |
351 | } | |
352 | } | |
353 | self.ccx.sess().bug("no loop scope found"); | |
354 | } | |
355 | ||
356 | /// Returns a block to branch to which will perform all pending cleanups and then | |
357 | /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope` | |
358 | fn normal_exit_block(&'blk self, | |
359 | cleanup_scope: ast::NodeId, | |
c34b1796 | 360 | exit: usize) -> BasicBlockRef { |
1a4d82fc JJ |
361 | self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit)) |
362 | } | |
363 | ||
364 | /// Returns a block to branch to which will perform all pending cleanups and then return from | |
365 | /// this function | |
366 | fn return_exit_block(&'blk self) -> BasicBlockRef { | |
367 | self.trans_cleanups_to_exit_scope(ReturnExit) | |
368 | } | |
369 | ||
370 | fn schedule_lifetime_end(&self, | |
371 | cleanup_scope: ScopeId, | |
372 | val: ValueRef) { | |
373 | let drop = box LifetimeEnd { | |
374 | ptr: val, | |
375 | }; | |
376 | ||
377 | debug!("schedule_lifetime_end({:?}, val={})", | |
378 | cleanup_scope, | |
379 | self.ccx.tn().val_to_string(val)); | |
380 | ||
381 | self.schedule_clean(cleanup_scope, drop as CleanupObj); | |
382 | } | |
383 | ||
384 | /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` | |
385 | fn schedule_drop_mem(&self, | |
386 | cleanup_scope: ScopeId, | |
387 | val: ValueRef, | |
388 | ty: Ty<'tcx>) { | |
c34b1796 | 389 | if !self.type_needs_drop(ty) { return; } |
1a4d82fc JJ |
390 | let drop = box DropValue { |
391 | is_immediate: false, | |
392 | must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), | |
393 | val: val, | |
394 | ty: ty, | |
d9579d0f AL |
395 | fill_on_drop: false, |
396 | skip_dtor: false, | |
1a4d82fc JJ |
397 | }; |
398 | ||
62682a34 | 399 | debug!("schedule_drop_mem({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", |
1a4d82fc JJ |
400 | cleanup_scope, |
401 | self.ccx.tn().val_to_string(val), | |
62682a34 | 402 | ty, |
d9579d0f AL |
403 | drop.fill_on_drop, |
404 | drop.skip_dtor); | |
405 | ||
406 | self.schedule_clean(cleanup_scope, drop as CleanupObj); | |
407 | } | |
408 | ||
409 | /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty` | |
410 | fn schedule_drop_and_fill_mem(&self, | |
411 | cleanup_scope: ScopeId, | |
412 | val: ValueRef, | |
413 | ty: Ty<'tcx>) { | |
414 | if !self.type_needs_drop(ty) { return; } | |
415 | ||
416 | let drop = box DropValue { | |
417 | is_immediate: false, | |
418 | must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), | |
419 | val: val, | |
420 | ty: ty, | |
421 | fill_on_drop: true, | |
422 | skip_dtor: false, | |
423 | }; | |
424 | ||
62682a34 | 425 | debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?}, fill_on_drop={}, skip_dtor={})", |
d9579d0f AL |
426 | cleanup_scope, |
427 | self.ccx.tn().val_to_string(val), | |
62682a34 | 428 | ty, |
d9579d0f AL |
429 | drop.fill_on_drop, |
430 | drop.skip_dtor); | |
1a4d82fc JJ |
431 | |
432 | self.schedule_clean(cleanup_scope, drop as CleanupObj); | |
433 | } | |
434 | ||
d9579d0f AL |
435 | /// Issue #23611: Schedules a (deep) drop of the contents of |
436 | /// `val`, which is a pointer to an instance of struct/enum type | |
437 | /// `ty`. The scheduled code handles extracting the discriminant | |
438 | /// and dropping the contents associated with that variant | |
439 | /// *without* executing any associated drop implementation. | |
440 | fn schedule_drop_adt_contents(&self, | |
1a4d82fc JJ |
441 | cleanup_scope: ScopeId, |
442 | val: ValueRef, | |
443 | ty: Ty<'tcx>) { | |
d9579d0f AL |
444 | // `if` below could be "!contents_needs_drop"; skipping drop |
445 | // is just an optimization, so sound to be conservative. | |
c34b1796 AL |
446 | if !self.type_needs_drop(ty) { return; } |
447 | ||
1a4d82fc JJ |
448 | let drop = box DropValue { |
449 | is_immediate: false, | |
450 | must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), | |
451 | val: val, | |
452 | ty: ty, | |
d9579d0f AL |
453 | fill_on_drop: false, |
454 | skip_dtor: true, | |
1a4d82fc JJ |
455 | }; |
456 | ||
62682a34 | 457 | debug!("schedule_drop_adt_contents({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", |
1a4d82fc JJ |
458 | cleanup_scope, |
459 | self.ccx.tn().val_to_string(val), | |
62682a34 | 460 | ty, |
d9579d0f AL |
461 | drop.fill_on_drop, |
462 | drop.skip_dtor); | |
1a4d82fc JJ |
463 | |
464 | self.schedule_clean(cleanup_scope, drop as CleanupObj); | |
465 | } | |
466 | ||
467 | /// Schedules a (deep) drop of `val`, which is an instance of `ty` | |
468 | fn schedule_drop_immediate(&self, | |
469 | cleanup_scope: ScopeId, | |
470 | val: ValueRef, | |
471 | ty: Ty<'tcx>) { | |
472 | ||
c34b1796 | 473 | if !self.type_needs_drop(ty) { return; } |
1a4d82fc JJ |
474 | let drop = box DropValue { |
475 | is_immediate: true, | |
476 | must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty), | |
477 | val: val, | |
478 | ty: ty, | |
d9579d0f AL |
479 | fill_on_drop: false, |
480 | skip_dtor: false, | |
1a4d82fc JJ |
481 | }; |
482 | ||
d9579d0f | 483 | debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", |
1a4d82fc JJ |
484 | cleanup_scope, |
485 | self.ccx.tn().val_to_string(val), | |
62682a34 | 486 | ty, |
d9579d0f AL |
487 | drop.fill_on_drop, |
488 | drop.skip_dtor); | |
1a4d82fc JJ |
489 | |
490 | self.schedule_clean(cleanup_scope, drop as CleanupObj); | |
491 | } | |
492 | ||
493 | /// Schedules a call to `free(val)`. Note that this is a shallow operation. | |
494 | fn schedule_free_value(&self, | |
495 | cleanup_scope: ScopeId, | |
496 | val: ValueRef, | |
497 | heap: Heap, | |
498 | content_ty: Ty<'tcx>) { | |
499 | let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty }; | |
500 | ||
501 | debug!("schedule_free_value({:?}, val={}, heap={:?})", | |
502 | cleanup_scope, | |
503 | self.ccx.tn().val_to_string(val), | |
504 | heap); | |
505 | ||
506 | self.schedule_clean(cleanup_scope, drop as CleanupObj); | |
507 | } | |
508 | ||
1a4d82fc JJ |
509 | fn schedule_clean(&self, |
510 | cleanup_scope: ScopeId, | |
511 | cleanup: CleanupObj<'tcx>) { | |
512 | match cleanup_scope { | |
513 | AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup), | |
514 | CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup), | |
515 | } | |
516 | } | |
517 | ||
518 | /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not | |
519 | /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary | |
520 | /// scope. | |
521 | fn schedule_clean_in_ast_scope(&self, | |
522 | cleanup_scope: ast::NodeId, | |
523 | cleanup: CleanupObj<'tcx>) { | |
524 | debug!("schedule_clean_in_ast_scope(cleanup_scope={})", | |
525 | cleanup_scope); | |
526 | ||
527 | for scope in self.scopes.borrow_mut().iter_mut().rev() { | |
528 | if scope.kind.is_ast_with_id(cleanup_scope) { | |
529 | scope.cleanups.push(cleanup); | |
530 | scope.clear_cached_exits(); | |
531 | return; | |
532 | } else { | |
533 | // will be adding a cleanup to some enclosing scope | |
534 | scope.clear_cached_exits(); | |
535 | } | |
536 | } | |
537 | ||
538 | self.ccx.sess().bug( | |
539 | &format!("no cleanup scope {} found", | |
c34b1796 | 540 | self.ccx.tcx().map.node_to_string(cleanup_scope))); |
1a4d82fc JJ |
541 | } |
542 | ||
543 | /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. | |
544 | fn schedule_clean_in_custom_scope(&self, | |
545 | custom_scope: CustomScopeIndex, | |
546 | cleanup: CleanupObj<'tcx>) { | |
547 | debug!("schedule_clean_in_custom_scope(custom_scope={})", | |
548 | custom_scope.index); | |
549 | ||
550 | assert!(self.is_valid_custom_scope(custom_scope)); | |
551 | ||
552 | let mut scopes = self.scopes.borrow_mut(); | |
553 | let scope = &mut (*scopes)[custom_scope.index]; | |
554 | scope.cleanups.push(cleanup); | |
555 | scope.clear_cached_exits(); | |
556 | } | |
557 | ||
558 | /// Returns true if there are pending cleanups that should execute on panic. | |
559 | fn needs_invoke(&self) -> bool { | |
560 | self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) | |
561 | } | |
562 | ||
563 | /// Returns a basic block to branch to in the event of a panic. This block will run the panic | |
564 | /// cleanups and eventually invoke the LLVM `Resume` instruction. | |
565 | fn get_landing_pad(&'blk self) -> BasicBlockRef { | |
566 | let _icx = base::push_ctxt("get_landing_pad"); | |
567 | ||
568 | debug!("get_landing_pad"); | |
569 | ||
570 | let orig_scopes_len = self.scopes_len(); | |
571 | assert!(orig_scopes_len > 0); | |
572 | ||
573 | // Remove any scopes that do not have cleanups on panic: | |
574 | let mut popped_scopes = vec!(); | |
575 | while !self.top_scope(|s| s.needs_invoke()) { | |
576 | debug!("top scope does not need invoke"); | |
577 | popped_scopes.push(self.pop_scope()); | |
578 | } | |
579 | ||
580 | // Check for an existing landing pad in the new topmost scope: | |
581 | let llbb = self.get_or_create_landing_pad(); | |
582 | ||
583 | // Push the scopes we removed back on: | |
584 | loop { | |
585 | match popped_scopes.pop() { | |
586 | Some(scope) => self.push_scope(scope), | |
587 | None => break | |
588 | } | |
589 | } | |
590 | ||
591 | assert_eq!(self.scopes_len(), orig_scopes_len); | |
592 | ||
593 | return llbb; | |
594 | } | |
595 | } | |
596 | ||
597 | impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { | |
598 | /// Returns the id of the current top-most AST scope, if any. | |
599 | fn top_ast_scope(&self) -> Option<ast::NodeId> { | |
600 | for scope in self.scopes.borrow().iter().rev() { | |
601 | match scope.kind { | |
602 | CustomScopeKind | LoopScopeKind(..) => {} | |
603 | AstScopeKind(i) => { | |
604 | return Some(i); | |
605 | } | |
606 | } | |
607 | } | |
608 | None | |
609 | } | |
610 | ||
c34b1796 | 611 | fn top_nonempty_cleanup_scope(&self) -> Option<usize> { |
1a4d82fc JJ |
612 | self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty()) |
613 | } | |
614 | ||
615 | fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { | |
616 | self.is_valid_custom_scope(custom_scope) && | |
617 | custom_scope.index == self.scopes.borrow().len() - 1 | |
618 | } | |
619 | ||
620 | fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { | |
621 | let scopes = self.scopes.borrow(); | |
622 | custom_scope.index < scopes.len() && | |
623 | (*scopes)[custom_scope.index].kind.is_temp() | |
624 | } | |
625 | ||
626 | /// Generates the cleanups for `scope` into `bcx` | |
627 | fn trans_scope_cleanups(&self, // cannot borrow self, will recurse | |
628 | bcx: Block<'blk, 'tcx>, | |
629 | scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> { | |
630 | ||
631 | let mut bcx = bcx; | |
632 | if !bcx.unreachable.get() { | |
633 | for cleanup in scope.cleanups.iter().rev() { | |
634 | bcx = cleanup.trans(bcx, scope.debug_loc); | |
635 | } | |
636 | } | |
637 | bcx | |
638 | } | |
639 | ||
c34b1796 | 640 | fn scopes_len(&self) -> usize { |
1a4d82fc JJ |
641 | self.scopes.borrow().len() |
642 | } | |
643 | ||
644 | fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) { | |
645 | self.scopes.borrow_mut().push(scope) | |
646 | } | |
647 | ||
648 | fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> { | |
649 | debug!("popping cleanup scope {}, {} scopes remaining", | |
650 | self.top_scope(|s| s.block_name("")), | |
651 | self.scopes_len() - 1); | |
652 | ||
653 | self.scopes.borrow_mut().pop().unwrap() | |
654 | } | |
655 | ||
656 | fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R { | |
657 | f(self.scopes.borrow().last().unwrap()) | |
658 | } | |
659 | ||
660 | /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or | |
661 | /// unwind. This function will generate all cleanups between the top of the stack and the exit | |
662 | /// `label` and return a basic block that the caller can branch to. | |
663 | /// | |
664 | /// For example, if the current stack of cleanups were as follows: | |
665 | /// | |
666 | /// AST 22 | |
667 | /// Custom 1 | |
668 | /// AST 23 | |
669 | /// Loop 23 | |
670 | /// Custom 2 | |
671 | /// AST 24 | |
672 | /// | |
673 | /// and the `label` specifies a break from `Loop 23`, then this function would generate a | |
674 | /// series of basic blocks as follows: | |
675 | /// | |
676 | /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk | |
677 | /// | |
678 | /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return | |
679 | /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could | |
680 | /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the | |
681 | /// `break_blk`. | |
682 | fn trans_cleanups_to_exit_scope(&'blk self, | |
683 | label: EarlyExitLabel) | |
684 | -> BasicBlockRef { | |
685 | debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", | |
686 | label, self.scopes_len()); | |
687 | ||
688 | let orig_scopes_len = self.scopes_len(); | |
689 | let mut prev_llbb; | |
690 | let mut popped_scopes = vec!(); | |
691 | ||
692 | // First we pop off all the cleanup stacks that are | |
693 | // traversed until the exit is reached, pushing them | |
694 | // onto the side vector `popped_scopes`. No code is | |
695 | // generated at this time. | |
696 | // | |
697 | // So, continuing the example from above, we would wind up | |
698 | // with a `popped_scopes` vector of `[AST 24, Custom 2]`. | |
699 | // (Presuming that there are no cached exits) | |
700 | loop { | |
701 | if self.scopes_len() == 0 { | |
702 | match label { | |
703 | UnwindExit => { | |
704 | // Generate a block that will `Resume`. | |
705 | let prev_bcx = self.new_block(true, "resume", None); | |
706 | let personality = self.personality.get().expect( | |
707 | "create_landing_pad() should have set this"); | |
708 | build::Resume(prev_bcx, | |
709 | build::Load(prev_bcx, personality)); | |
710 | prev_llbb = prev_bcx.llbb; | |
711 | break; | |
712 | } | |
713 | ||
714 | ReturnExit => { | |
715 | prev_llbb = self.get_llreturn(); | |
716 | break; | |
717 | } | |
718 | ||
719 | LoopExit(id, _) => { | |
720 | self.ccx.sess().bug(&format!( | |
721 | "cannot exit from scope {}, \ | |
c34b1796 | 722 | not in scope", id)); |
1a4d82fc JJ |
723 | } |
724 | } | |
725 | } | |
726 | ||
727 | // Check if we have already cached the unwinding of this | |
728 | // scope for this label. If so, we can stop popping scopes | |
729 | // and branch to the cached label, since it contains the | |
730 | // cleanups for any subsequent scopes. | |
731 | match self.top_scope(|s| s.cached_early_exit(label)) { | |
732 | Some(cleanup_block) => { | |
733 | prev_llbb = cleanup_block; | |
734 | break; | |
735 | } | |
736 | None => { } | |
737 | } | |
738 | ||
739 | // Pop off the scope, since we will be generating | |
740 | // unwinding code for it. If we are searching for a loop exit, | |
741 | // and this scope is that loop, then stop popping and set | |
742 | // `prev_llbb` to the appropriate exit block from the loop. | |
743 | popped_scopes.push(self.pop_scope()); | |
744 | let scope = popped_scopes.last().unwrap(); | |
745 | match label { | |
746 | UnwindExit | ReturnExit => { } | |
747 | LoopExit(id, exit) => { | |
748 | match scope.kind.early_exit_block(id, exit) { | |
749 | Some(exitllbb) => { | |
750 | prev_llbb = exitllbb; | |
751 | break; | |
752 | } | |
753 | ||
754 | None => { } | |
755 | } | |
756 | } | |
757 | } | |
758 | } | |
759 | ||
760 | debug!("trans_cleanups_to_exit_scope: popped {} scopes", | |
761 | popped_scopes.len()); | |
762 | ||
763 | // Now push the popped scopes back on. As we go, | |
764 | // we track in `prev_llbb` the exit to which this scope | |
765 | // should branch when it's done. | |
766 | // | |
767 | // So, continuing with our example, we will start out with | |
768 | // `prev_llbb` being set to `break_blk` (or possibly a cached | |
769 | // early exit). We will then pop the scopes from `popped_scopes` | |
770 | // and generate a basic block for each one, prepending it in the | |
771 | // series and updating `prev_llbb`. So we begin by popping `Custom 2` | |
772 | // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)` | |
773 | // branch to `prev_llbb == break_blk`, giving us a sequence like: | |
774 | // | |
775 | // Cleanup(Custom 2) -> prev_llbb | |
776 | // | |
777 | // We then pop `AST 24` and repeat the process, giving us the sequence: | |
778 | // | |
779 | // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb | |
780 | // | |
781 | // At this point, `popped_scopes` is empty, and so the final block | |
782 | // that we return to the user is `Cleanup(AST 24)`. | |
783 | while !popped_scopes.is_empty() { | |
784 | let mut scope = popped_scopes.pop().unwrap(); | |
785 | ||
786 | if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label)) | |
787 | { | |
788 | let name = scope.block_name("clean"); | |
789 | debug!("generating cleanups for {}", name); | |
790 | let bcx_in = self.new_block(label.is_unwind(), | |
85aaf69f | 791 | &name[..], |
1a4d82fc JJ |
792 | None); |
793 | let mut bcx_out = bcx_in; | |
794 | for cleanup in scope.cleanups.iter().rev() { | |
795 | if cleanup_is_suitable_for(&**cleanup, label) { | |
796 | bcx_out = cleanup.trans(bcx_out, | |
797 | scope.debug_loc); | |
798 | } | |
799 | } | |
85aaf69f | 800 | build::Br(bcx_out, prev_llbb, DebugLoc::None); |
1a4d82fc JJ |
801 | prev_llbb = bcx_in.llbb; |
802 | } else { | |
803 | debug!("no suitable cleanups in {}", | |
804 | scope.block_name("clean")); | |
805 | } | |
806 | ||
807 | scope.add_cached_early_exit(label, prev_llbb); | |
808 | self.push_scope(scope); | |
809 | } | |
810 | ||
811 | debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb); | |
812 | ||
813 | assert_eq!(self.scopes_len(), orig_scopes_len); | |
814 | prev_llbb | |
815 | } | |
816 | ||
817 | /// Creates a landing pad for the top scope, if one does not exist. The landing pad will | |
818 | /// perform all cleanups necessary for an unwind and then `resume` to continue error | |
819 | /// propagation: | |
820 | /// | |
821 | /// landing_pad -> ... cleanups ... -> [resume] | |
822 | /// | |
823 | /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not | |
824 | /// in this function itself.) | |
825 | fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { | |
826 | let pad_bcx; | |
827 | ||
828 | debug!("get_or_create_landing_pad"); | |
829 | ||
830 | // Check if a landing pad block exists; if not, create one. | |
831 | { | |
832 | let mut scopes = self.scopes.borrow_mut(); | |
833 | let last_scope = scopes.last_mut().unwrap(); | |
834 | match last_scope.cached_landing_pad { | |
835 | Some(llbb) => { return llbb; } | |
836 | None => { | |
837 | let name = last_scope.block_name("unwind"); | |
85aaf69f | 838 | pad_bcx = self.new_block(true, &name[..], None); |
1a4d82fc JJ |
839 | last_scope.cached_landing_pad = Some(pad_bcx.llbb); |
840 | } | |
841 | } | |
842 | } | |
843 | ||
844 | // The landing pad return type (the type being propagated). Not sure what | |
845 | // this represents but it's determined by the personality function and | |
846 | // this is what the EH proposal example uses. | |
847 | let llretty = Type::struct_(self.ccx, | |
848 | &[Type::i8p(self.ccx), Type::i32(self.ccx)], | |
849 | false); | |
850 | ||
851 | // The exception handling personality function. | |
852 | // | |
853 | // If our compilation unit has the `eh_personality` lang item somewhere | |
854 | // within it, then we just need to translate that. Otherwise, we're | |
855 | // building an rlib which will depend on some upstream implementation of | |
856 | // this function, so we just codegen a generic reference to it. We don't | |
857 | // specify any of the types for the function, we just make it a symbol | |
858 | // that LLVM can later use. | |
859 | let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() { | |
860 | Some(def_id) => { | |
861 | callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0), | |
862 | pad_bcx.fcx.param_substs).val | |
863 | } | |
864 | None => { | |
865 | let mut personality = self.ccx.eh_personality().borrow_mut(); | |
866 | match *personality { | |
867 | Some(llpersonality) => llpersonality, | |
868 | None => { | |
869 | let fty = Type::variadic_func(&[], &Type::i32(self.ccx)); | |
9346a6ac AL |
870 | let f = declare::declare_cfn(self.ccx, "rust_eh_personality", fty, |
871 | self.ccx.tcx().types.i32); | |
1a4d82fc JJ |
872 | *personality = Some(f); |
873 | f | |
874 | } | |
875 | } | |
876 | } | |
877 | }; | |
878 | ||
879 | // The only landing pad clause will be 'cleanup' | |
85aaf69f | 880 | let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1); |
1a4d82fc JJ |
881 | |
882 | // The landing pad block is a cleanup | |
883 | build::SetCleanup(pad_bcx, llretval); | |
884 | ||
885 | // We store the retval in a function-central alloca, so that calls to | |
886 | // Resume can find it. | |
887 | match self.personality.get() { | |
888 | Some(addr) => { | |
889 | build::Store(pad_bcx, llretval, addr); | |
890 | } | |
891 | None => { | |
892 | let addr = base::alloca(pad_bcx, common::val_ty(llretval), ""); | |
893 | self.personality.set(Some(addr)); | |
894 | build::Store(pad_bcx, llretval, addr); | |
895 | } | |
896 | } | |
897 | ||
898 | // Generate the cleanup block and branch to it. | |
899 | let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit); | |
85aaf69f | 900 | build::Br(pad_bcx, cleanup_llbb, DebugLoc::None); |
1a4d82fc JJ |
901 | |
902 | return pad_bcx.llbb; | |
903 | } | |
904 | } | |
905 | ||
906 | impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { | |
907 | fn new(kind: CleanupScopeKind<'blk, 'tcx>, | |
85aaf69f | 908 | debug_loc: DebugLoc) |
1a4d82fc JJ |
909 | -> CleanupScope<'blk, 'tcx> { |
910 | CleanupScope { | |
911 | kind: kind, | |
912 | debug_loc: debug_loc, | |
913 | cleanups: vec!(), | |
914 | cached_early_exits: vec!(), | |
915 | cached_landing_pad: None, | |
916 | } | |
917 | } | |
918 | ||
919 | fn clear_cached_exits(&mut self) { | |
920 | self.cached_early_exits = vec!(); | |
921 | self.cached_landing_pad = None; | |
922 | } | |
923 | ||
924 | fn cached_early_exit(&self, | |
925 | label: EarlyExitLabel) | |
926 | -> Option<BasicBlockRef> { | |
927 | self.cached_early_exits.iter(). | |
928 | find(|e| e.label == label). | |
929 | map(|e| e.cleanup_block) | |
930 | } | |
931 | ||
932 | fn add_cached_early_exit(&mut self, | |
933 | label: EarlyExitLabel, | |
934 | blk: BasicBlockRef) { | |
935 | self.cached_early_exits.push( | |
936 | CachedEarlyExit { label: label, | |
937 | cleanup_block: blk }); | |
938 | } | |
939 | ||
940 | /// True if this scope has cleanups that need unwinding | |
941 | fn needs_invoke(&self) -> bool { | |
942 | ||
943 | self.cached_landing_pad.is_some() || | |
944 | self.cleanups.iter().any(|c| c.must_unwind()) | |
945 | } | |
946 | ||
947 | /// Returns a suitable name to use for the basic block that handles this cleanup scope | |
948 | fn block_name(&self, prefix: &str) -> String { | |
949 | match self.kind { | |
950 | CustomScopeKind => format!("{}_custom_", prefix), | |
951 | AstScopeKind(id) => format!("{}_ast_{}_", prefix, id), | |
952 | LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id), | |
953 | } | |
954 | } | |
955 | ||
d9579d0f AL |
956 | /// Manipulate cleanup scope for call arguments. Conceptually, each |
957 | /// argument to a call is an lvalue, and performing the call moves each | |
958 | /// of the arguments into a new rvalue (which gets cleaned up by the | |
959 | /// callee). As an optimization, instead of actually performing all of | |
960 | /// those moves, trans just manipulates the cleanup scope to obtain the | |
961 | /// same effect. | |
1a4d82fc JJ |
962 | pub fn drop_non_lifetime_clean(&mut self) { |
963 | self.cleanups.retain(|c| c.is_lifetime_end()); | |
d9579d0f | 964 | self.clear_cached_exits(); |
1a4d82fc JJ |
965 | } |
966 | } | |
967 | ||
968 | impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> { | |
969 | fn is_temp(&self) -> bool { | |
970 | match *self { | |
971 | CustomScopeKind => true, | |
972 | LoopScopeKind(..) | AstScopeKind(..) => false, | |
973 | } | |
974 | } | |
975 | ||
976 | fn is_ast_with_id(&self, id: ast::NodeId) -> bool { | |
977 | match *self { | |
978 | CustomScopeKind | LoopScopeKind(..) => false, | |
979 | AstScopeKind(i) => i == id | |
980 | } | |
981 | } | |
982 | ||
983 | fn is_loop_with_id(&self, id: ast::NodeId) -> bool { | |
984 | match *self { | |
985 | CustomScopeKind | AstScopeKind(..) => false, | |
986 | LoopScopeKind(i, _) => i == id | |
987 | } | |
988 | } | |
989 | ||
990 | /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None` | |
991 | fn early_exit_block(&self, | |
992 | id: ast::NodeId, | |
c34b1796 | 993 | exit: usize) -> Option<BasicBlockRef> { |
1a4d82fc JJ |
994 | match *self { |
995 | LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb), | |
996 | _ => None, | |
997 | } | |
998 | } | |
999 | } | |
1000 | ||
1001 | impl EarlyExitLabel { | |
1002 | fn is_unwind(&self) -> bool { | |
1003 | match *self { | |
1004 | UnwindExit => true, | |
1005 | _ => false | |
1006 | } | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | /////////////////////////////////////////////////////////////////////////// | |
1011 | // Cleanup types | |
1012 | ||
c34b1796 | 1013 | #[derive(Copy, Clone)] |
1a4d82fc JJ |
1014 | pub struct DropValue<'tcx> { |
1015 | is_immediate: bool, | |
1016 | must_unwind: bool, | |
1017 | val: ValueRef, | |
1018 | ty: Ty<'tcx>, | |
d9579d0f AL |
1019 | fill_on_drop: bool, |
1020 | skip_dtor: bool, | |
1a4d82fc JJ |
1021 | } |
1022 | ||
1023 | impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { | |
1024 | fn must_unwind(&self) -> bool { | |
1025 | self.must_unwind | |
1026 | } | |
1027 | ||
1028 | fn clean_on_unwind(&self) -> bool { | |
1029 | self.must_unwind | |
1030 | } | |
1031 | ||
1032 | fn is_lifetime_end(&self) -> bool { | |
1033 | false | |
1034 | } | |
1035 | ||
1036 | fn trans<'blk>(&self, | |
1037 | bcx: Block<'blk, 'tcx>, | |
85aaf69f | 1038 | debug_loc: DebugLoc) |
1a4d82fc | 1039 | -> Block<'blk, 'tcx> { |
d9579d0f AL |
1040 | let skip_dtor = self.skip_dtor; |
1041 | let _icx = if skip_dtor { | |
1042 | base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true") | |
1043 | } else { | |
1044 | base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false") | |
1045 | }; | |
1a4d82fc | 1046 | let bcx = if self.is_immediate { |
d9579d0f | 1047 | glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) |
1a4d82fc | 1048 | } else { |
d9579d0f | 1049 | glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) |
1a4d82fc | 1050 | }; |
d9579d0f | 1051 | if self.fill_on_drop { |
c34b1796 | 1052 | base::drop_done_fill_mem(bcx, self.val, self.ty); |
1a4d82fc JJ |
1053 | } |
1054 | bcx | |
1055 | } | |
1056 | } | |
1057 | ||
c34b1796 | 1058 | #[derive(Copy, Clone, Debug)] |
1a4d82fc JJ |
1059 | pub enum Heap { |
1060 | HeapExchange | |
1061 | } | |
1062 | ||
c34b1796 | 1063 | #[derive(Copy, Clone)] |
1a4d82fc JJ |
1064 | pub struct FreeValue<'tcx> { |
1065 | ptr: ValueRef, | |
1066 | heap: Heap, | |
1067 | content_ty: Ty<'tcx> | |
1068 | } | |
1069 | ||
1070 | impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> { | |
1071 | fn must_unwind(&self) -> bool { | |
1072 | true | |
1073 | } | |
1074 | ||
1075 | fn clean_on_unwind(&self) -> bool { | |
1076 | true | |
1077 | } | |
1078 | ||
1079 | fn is_lifetime_end(&self) -> bool { | |
1080 | false | |
1081 | } | |
1082 | ||
1083 | fn trans<'blk>(&self, | |
1084 | bcx: Block<'blk, 'tcx>, | |
85aaf69f | 1085 | debug_loc: DebugLoc) |
1a4d82fc | 1086 | -> Block<'blk, 'tcx> { |
1a4d82fc JJ |
1087 | match self.heap { |
1088 | HeapExchange => { | |
85aaf69f SL |
1089 | glue::trans_exchange_free_ty(bcx, |
1090 | self.ptr, | |
1091 | self.content_ty, | |
1092 | debug_loc) | |
1a4d82fc JJ |
1093 | } |
1094 | } | |
1095 | } | |
1096 | } | |
1097 | ||
c34b1796 | 1098 | #[derive(Copy, Clone)] |
1a4d82fc JJ |
1099 | pub struct LifetimeEnd { |
1100 | ptr: ValueRef, | |
1101 | } | |
1102 | ||
1103 | impl<'tcx> Cleanup<'tcx> for LifetimeEnd { | |
1104 | fn must_unwind(&self) -> bool { | |
1105 | false | |
1106 | } | |
1107 | ||
1108 | fn clean_on_unwind(&self) -> bool { | |
1109 | true | |
1110 | } | |
1111 | ||
1112 | fn is_lifetime_end(&self) -> bool { | |
1113 | true | |
1114 | } | |
1115 | ||
1116 | fn trans<'blk>(&self, | |
1117 | bcx: Block<'blk, 'tcx>, | |
85aaf69f | 1118 | debug_loc: DebugLoc) |
1a4d82fc | 1119 | -> Block<'blk, 'tcx> { |
85aaf69f | 1120 | debug_loc.apply(bcx.fcx); |
1a4d82fc JJ |
1121 | base::call_lifetime_end(bcx, self.ptr); |
1122 | bcx | |
1123 | } | |
1124 | } | |
1125 | ||
1126 | pub fn temporary_scope(tcx: &ty::ctxt, | |
1127 | id: ast::NodeId) | |
1128 | -> ScopeId { | |
1129 | match tcx.region_maps.temporary_scope(id) { | |
1130 | Some(scope) => { | |
1131 | let r = AstScope(scope.node_id()); | |
1132 | debug!("temporary_scope({}) = {:?}", id, r); | |
1133 | r | |
1134 | } | |
1135 | None => { | |
1136 | tcx.sess.bug(&format!("no temporary scope available for expr {}", | |
c34b1796 | 1137 | id)) |
1a4d82fc JJ |
1138 | } |
1139 | } | |
1140 | } | |
1141 | ||
1142 | pub fn var_scope(tcx: &ty::ctxt, | |
1143 | id: ast::NodeId) | |
1144 | -> ScopeId { | |
1145 | let r = AstScope(tcx.region_maps.var_scope(id).node_id()); | |
1146 | debug!("var_scope({}) = {:?}", id, r); | |
1147 | r | |
1148 | } | |
1149 | ||
1150 | fn cleanup_is_suitable_for(c: &Cleanup, | |
1151 | label: EarlyExitLabel) -> bool { | |
1152 | !label.is_unwind() || c.clean_on_unwind() | |
1153 | } | |
1154 | ||
1a4d82fc JJ |
1155 | /////////////////////////////////////////////////////////////////////////// |
1156 | // These traits just exist to put the methods into this file. | |
1157 | ||
1158 | pub trait CleanupMethods<'blk, 'tcx> { | |
85aaf69f | 1159 | fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan); |
1a4d82fc JJ |
1160 | fn push_loop_cleanup_scope(&self, |
1161 | id: ast::NodeId, | |
1162 | exits: [Block<'blk, 'tcx>; EXIT_MAX]); | |
1163 | fn push_custom_cleanup_scope(&self) -> CustomScopeIndex; | |
1164 | fn push_custom_cleanup_scope_with_debug_loc(&self, | |
85aaf69f | 1165 | debug_loc: NodeIdAndSpan) |
1a4d82fc JJ |
1166 | -> CustomScopeIndex; |
1167 | fn pop_and_trans_ast_cleanup_scope(&self, | |
85aaf69f SL |
1168 | bcx: Block<'blk, 'tcx>, |
1169 | cleanup_scope: ast::NodeId) | |
1170 | -> Block<'blk, 'tcx>; | |
1a4d82fc JJ |
1171 | fn pop_loop_cleanup_scope(&self, |
1172 | cleanup_scope: ast::NodeId); | |
1173 | fn pop_custom_cleanup_scope(&self, | |
1174 | custom_scope: CustomScopeIndex); | |
1175 | fn pop_and_trans_custom_cleanup_scope(&self, | |
1176 | bcx: Block<'blk, 'tcx>, | |
1177 | custom_scope: CustomScopeIndex) | |
1178 | -> Block<'blk, 'tcx>; | |
1179 | fn top_loop_scope(&self) -> ast::NodeId; | |
1180 | fn normal_exit_block(&'blk self, | |
1181 | cleanup_scope: ast::NodeId, | |
c34b1796 | 1182 | exit: usize) -> BasicBlockRef; |
1a4d82fc JJ |
1183 | fn return_exit_block(&'blk self) -> BasicBlockRef; |
1184 | fn schedule_lifetime_end(&self, | |
1185 | cleanup_scope: ScopeId, | |
1186 | val: ValueRef); | |
1187 | fn schedule_drop_mem(&self, | |
1188 | cleanup_scope: ScopeId, | |
1189 | val: ValueRef, | |
1190 | ty: Ty<'tcx>); | |
d9579d0f AL |
1191 | fn schedule_drop_and_fill_mem(&self, |
1192 | cleanup_scope: ScopeId, | |
1193 | val: ValueRef, | |
1194 | ty: Ty<'tcx>); | |
1195 | fn schedule_drop_adt_contents(&self, | |
1a4d82fc JJ |
1196 | cleanup_scope: ScopeId, |
1197 | val: ValueRef, | |
1198 | ty: Ty<'tcx>); | |
1199 | fn schedule_drop_immediate(&self, | |
1200 | cleanup_scope: ScopeId, | |
1201 | val: ValueRef, | |
1202 | ty: Ty<'tcx>); | |
1203 | fn schedule_free_value(&self, | |
1204 | cleanup_scope: ScopeId, | |
1205 | val: ValueRef, | |
1206 | heap: Heap, | |
1207 | content_ty: Ty<'tcx>); | |
1a4d82fc JJ |
1208 | fn schedule_clean(&self, |
1209 | cleanup_scope: ScopeId, | |
1210 | cleanup: CleanupObj<'tcx>); | |
1211 | fn schedule_clean_in_ast_scope(&self, | |
1212 | cleanup_scope: ast::NodeId, | |
1213 | cleanup: CleanupObj<'tcx>); | |
1214 | fn schedule_clean_in_custom_scope(&self, | |
1215 | custom_scope: CustomScopeIndex, | |
1216 | cleanup: CleanupObj<'tcx>); | |
1217 | fn needs_invoke(&self) -> bool; | |
1218 | fn get_landing_pad(&'blk self) -> BasicBlockRef; | |
1219 | } | |
1220 | ||
1221 | trait CleanupHelperMethods<'blk, 'tcx> { | |
1222 | fn top_ast_scope(&self) -> Option<ast::NodeId>; | |
c34b1796 | 1223 | fn top_nonempty_cleanup_scope(&self) -> Option<usize>; |
1a4d82fc JJ |
1224 | fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; |
1225 | fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; | |
1226 | fn trans_scope_cleanups(&self, | |
1227 | bcx: Block<'blk, 'tcx>, | |
1228 | scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>; | |
1229 | fn trans_cleanups_to_exit_scope(&'blk self, | |
1230 | label: EarlyExitLabel) | |
1231 | -> BasicBlockRef; | |
1232 | fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef; | |
c34b1796 | 1233 | fn scopes_len(&self) -> usize; |
1a4d82fc JJ |
1234 | fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>); |
1235 | fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>; | |
1236 | fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R; | |
1237 | } |