]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_mir_build/src/build/scope.rs
Merge branch 'debian/sid' into debian/experimental
[rustc.git] / compiler / rustc_mir_build / src / build / scope.rs
1 /*!
2 Managing the scope stack. The scopes are tied to lexical scopes, so as
3 we descend the THIR, we push a scope on the stack, build its
4 contents, and then pop it off. Every scope is named by a
5 `region::Scope`.
6
7 ### SEME Regions
8
9 When pushing a new [Scope], we record the current point in the graph (a
10 basic block); this marks the entry to the scope. We then generate more
11 stuff in the control-flow graph. Whenever the scope is exited, either
12 via a `break` or `return` or just by fallthrough, that marks an exit
13 from the scope. Each lexical scope thus corresponds to a single-entry,
14 multiple-exit (SEME) region in the control-flow graph.
15
16 For now, we record the `region::Scope` to each SEME region for later reference
17 (see caveat in next paragraph). This is because destruction scopes are tied to
18 them. This may change in the future so that MIR lowering determines its own
19 destruction scopes.
20
21 ### Not so SEME Regions
22
23 In the course of building matches, it sometimes happens that certain code
24 (namely guards) gets executed multiple times. This means that the scope lexical
25 scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
26 mapping is from one scope to a vector of SEME regions. Since the SEME regions
27 are disjoint, the mapping is still one-to-one for the set of SEME regions that
28 we're currently in.
29
30 Also in matches, the scopes assigned to arms are not always even SEME regions!
31 Each arm has a single region with one entry for each pattern. We manually
32 manipulate the scheduled drops in this scope to avoid dropping things multiple
33 times.
34
35 ### Drops
36
37 The primary purpose for scopes is to insert drops: while building
38 the contents, we also accumulate places that need to be dropped upon
39 exit from each scope. This is done by calling `schedule_drop`. Once a
40 drop is scheduled, whenever we branch out we will insert drops of all
41 those places onto the outgoing edge. Note that we don't know the full
42 set of scheduled drops up front, and so whenever we exit from the
43 scope we only drop the values scheduled thus far. For example, consider
44 the scope S corresponding to this loop:
45
46 ```
47 # let cond = true;
48 loop {
49 let x = ..;
50 if cond { break; }
51 let y = ..;
52 }
53 ```
54
55 When processing the `let x`, we will add one drop to the scope for
56 `x`. The break will then insert a drop for `x`. When we process `let
57 y`, we will add another drop (in fact, to a subscope, but let's ignore
58 that for now); any later drops would also drop `y`.
59
60 ### Early exit
61
62 There are numerous "normal" ways to early exit a scope: `break`,
63 `continue`, `return` (panics are handled separately). Whenever an
64 early exit occurs, the method `break_scope` is called. It is given the
65 current point in execution where the early exit occurs, as well as the
66 scope you want to branch to (note that all early exits from to some
67 other enclosing scope). `break_scope` will record the set of drops currently
68 scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops
69 will be added to the CFG.
70
71 Panics are handled in a similar fashion, except that the drops are added to the
72 MIR once the rest of the function has finished being lowered. If a terminator
73 can panic, call `diverge_from(block)` with the block containing the terminator
74 `block`.
75
76 ### Breakable scopes
77
78 In addition to the normal scope stack, we track a loop scope stack
79 that contains only loops and breakable blocks. It tracks where a `break`,
80 `continue` or `return` should go to.
81
82 */
83
84 use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG};
85 use rustc_data_structures::fx::FxHashMap;
86 use rustc_index::vec::IndexVec;
87 use rustc_middle::middle::region;
88 use rustc_middle::mir::*;
89 use rustc_middle::thir::{Expr, LintLevel};
90
91 use rustc_span::{Span, DUMMY_SP};
92
93 #[derive(Debug)]
94 pub struct Scopes<'tcx> {
95 scopes: Vec<Scope>,
96 /// The current set of breakable scopes. See module comment for more details.
97 breakable_scopes: Vec<BreakableScope<'tcx>>,
98
99 /// Drops that need to be done on unwind paths. See the comment on
100 /// [DropTree] for more details.
101 unwind_drops: DropTree,
102
103 /// Drops that need to be done on paths to the `GeneratorDrop` terminator.
104 generator_drops: DropTree,
105 }
106
107 #[derive(Debug)]
108 struct Scope {
109 /// The source scope this scope was created in.
110 source_scope: SourceScope,
111
112 /// the region span of this scope within source code.
113 region_scope: region::Scope,
114
115 /// the span of that region_scope
116 region_scope_span: Span,
117
118 /// set of places to drop when exiting this scope. This starts
119 /// out empty but grows as variables are declared during the
120 /// building process. This is a stack, so we always drop from the
121 /// end of the vector (top of the stack) first.
122 drops: Vec<DropData>,
123
124 moved_locals: Vec<Local>,
125
126 /// The drop index that will drop everything in and below this scope on an
127 /// unwind path.
128 cached_unwind_block: Option<DropIdx>,
129
130 /// The drop index that will drop everything in and below this scope on a
131 /// generator drop path.
132 cached_generator_drop_block: Option<DropIdx>,
133 }
134
135 #[derive(Clone, Copy, Debug)]
136 struct DropData {
137 /// The `Span` where drop obligation was incurred (typically where place was
138 /// declared)
139 source_info: SourceInfo,
140
141 /// local to drop
142 local: Local,
143
144 /// Whether this is a value Drop or a StorageDead.
145 kind: DropKind,
146 }
147
148 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
149 pub(crate) enum DropKind {
150 Value,
151 Storage,
152 }
153
154 #[derive(Debug)]
155 struct BreakableScope<'tcx> {
156 /// Region scope of the loop
157 region_scope: region::Scope,
158 /// The destination of the loop/block expression itself (i.e., where to put
159 /// the result of a `break` or `return` expression)
160 break_destination: Place<'tcx>,
161 /// Drops that happen on the `break`/`return` path.
162 break_drops: DropTree,
163 /// Drops that happen on the `continue` path.
164 continue_drops: Option<DropTree>,
165 }
166
167 /// The target of an expression that breaks out of a scope
168 #[derive(Clone, Copy, Debug)]
169 crate enum BreakableTarget {
170 Continue(region::Scope),
171 Break(region::Scope),
172 Return,
173 }
174
175 rustc_index::newtype_index! {
176 struct DropIdx { .. }
177 }
178
179 const ROOT_NODE: DropIdx = DropIdx::from_u32(0);
180
181 /// A tree of drops that we have deferred lowering. It's used for:
182 ///
183 /// * Drops on unwind paths
184 /// * Drops on generator drop paths (when a suspended generator is dropped)
185 /// * Drops on return and loop exit paths
186 ///
187 /// Once no more nodes could be added to the tree, we lower it to MIR in one go
188 /// in `build_mir`.
189 #[derive(Debug)]
190 struct DropTree {
191 /// Drops in the tree.
192 drops: IndexVec<DropIdx, (DropData, DropIdx)>,
193 /// Map for finding the inverse of the `next_drop` relation:
194 ///
195 /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind)] == i`
196 previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>,
197 /// Edges into the `DropTree` that need to be added once it's lowered.
198 entry_points: Vec<(DropIdx, BasicBlock)>,
199 }
200
201 impl Scope {
202 /// Whether there's anything to do for the cleanup path, that is,
203 /// when unwinding through this scope. This includes destructors,
204 /// but not StorageDead statements, which don't get emitted at all
205 /// for unwinding, for several reasons:
206 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
207 /// * LLVM's memory dependency analysis can't handle it atm
208 /// * polluting the cleanup MIR with StorageDead creates
209 /// landing pads even though there's no actual destructors
210 /// * freeing up stack space has no effect during unwinding
211 /// Note that for generators we do emit StorageDeads, for the
212 /// use of optimizations in the MIR generator transform.
213 fn needs_cleanup(&self) -> bool {
214 self.drops.iter().any(|drop| match drop.kind {
215 DropKind::Value => true,
216 DropKind::Storage => false,
217 })
218 }
219
220 fn invalidate_cache(&mut self) {
221 self.cached_unwind_block = None;
222 self.cached_generator_drop_block = None;
223 }
224 }
225
226 /// A trait that determined how [DropTree] creates its blocks and
227 /// links to any entry nodes.
228 trait DropTreeBuilder<'tcx> {
229 /// Create a new block for the tree. This should call either
230 /// `cfg.start_new_block()` or `cfg.start_new_cleanup_block()`.
231 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock;
232
233 /// Links a block outside the drop tree, `from`, to the block `to` inside
234 /// the drop tree.
235 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock);
236 }
237
238 impl DropTree {
239 fn new() -> Self {
240 // The root node of the tree doesn't represent a drop, but instead
241 // represents the block in the tree that should be jumped to once all
242 // of the required drops have been performed.
243 let fake_source_info = SourceInfo::outermost(DUMMY_SP);
244 let fake_data =
245 DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
246 let drop_idx = DropIdx::MAX;
247 let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1);
248 Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() }
249 }
250
251 fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx {
252 let drops = &mut self.drops;
253 *self
254 .previous_drops
255 .entry((next, drop.local, drop.kind))
256 .or_insert_with(|| drops.push((drop, next)))
257 }
258
259 fn add_entry(&mut self, from: BasicBlock, to: DropIdx) {
260 debug_assert!(to < self.drops.next_index());
261 self.entry_points.push((to, from));
262 }
263
264 /// Builds the MIR for a given drop tree.
265 ///
266 /// `blocks` should have the same length as `self.drops`, and may have its
267 /// first value set to some already existing block.
268 fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>(
269 &mut self,
270 cfg: &mut CFG<'tcx>,
271 blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
272 ) {
273 debug!("DropTree::build_mir(drops = {:#?})", self);
274 assert_eq!(blocks.len(), self.drops.len());
275
276 self.assign_blocks::<T>(cfg, blocks);
277 self.link_blocks(cfg, blocks)
278 }
279
280 /// Assign blocks for all of the drops in the drop tree that need them.
281 fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>(
282 &mut self,
283 cfg: &mut CFG<'tcx>,
284 blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
285 ) {
286 // StorageDead statements can share blocks with each other and also with
287 // a Drop terminator. We iterate through the drops to find which drops
288 // need their own block.
289 #[derive(Clone, Copy)]
290 enum Block {
291 // This drop is unreachable
292 None,
293 // This drop is only reachable through the `StorageDead` with the
294 // specified index.
295 Shares(DropIdx),
296 // This drop has more than one way of being reached, or it is
297 // branched to from outside the tree, or its predecessor is a
298 // `Value` drop.
299 Own,
300 }
301
302 let mut needs_block = IndexVec::from_elem(Block::None, &self.drops);
303 if blocks[ROOT_NODE].is_some() {
304 // In some cases (such as drops for `continue`) the root node
305 // already has a block. In this case, make sure that we don't
306 // override it.
307 needs_block[ROOT_NODE] = Block::Own;
308 }
309
310 // Sort so that we only need to check the last value.
311 let entry_points = &mut self.entry_points;
312 entry_points.sort();
313
314 for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
315 if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
316 let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
317 needs_block[drop_idx] = Block::Own;
318 while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
319 let entry_block = entry_points.pop().unwrap().1;
320 T::add_entry(cfg, entry_block, block);
321 }
322 }
323 match needs_block[drop_idx] {
324 Block::None => continue,
325 Block::Own => {
326 blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
327 }
328 Block::Shares(pred) => {
329 blocks[drop_idx] = blocks[pred];
330 }
331 }
332 if let DropKind::Value = drop_data.0.kind {
333 needs_block[drop_data.1] = Block::Own;
334 } else if drop_idx != ROOT_NODE {
335 match &mut needs_block[drop_data.1] {
336 pred @ Block::None => *pred = Block::Shares(drop_idx),
337 pred @ Block::Shares(_) => *pred = Block::Own,
338 Block::Own => (),
339 }
340 }
341 }
342
343 debug!("assign_blocks: blocks = {:#?}", blocks);
344 assert!(entry_points.is_empty());
345 }
346
347 fn link_blocks<'tcx>(
348 &self,
349 cfg: &mut CFG<'tcx>,
350 blocks: &IndexVec<DropIdx, Option<BasicBlock>>,
351 ) {
352 for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
353 let block = if let Some(block) = blocks[drop_idx] {
354 block
355 } else {
356 continue;
357 };
358 match drop_data.0.kind {
359 DropKind::Value => {
360 let terminator = TerminatorKind::Drop {
361 target: blocks[drop_data.1].unwrap(),
362 // The caller will handle this if needed.
363 unwind: None,
364 place: drop_data.0.local.into(),
365 };
366 cfg.terminate(block, drop_data.0.source_info, terminator);
367 }
368 // Root nodes don't correspond to a drop.
369 DropKind::Storage if drop_idx == ROOT_NODE => {}
370 DropKind::Storage => {
371 let stmt = Statement {
372 source_info: drop_data.0.source_info,
373 kind: StatementKind::StorageDead(drop_data.0.local),
374 };
375 cfg.push(block, stmt);
376 let target = blocks[drop_data.1].unwrap();
377 if target != block {
378 // Diagnostics don't use this `Span` but debuginfo
379 // might. Since we don't want breakpoints to be placed
380 // here, especially when this is on an unwind path, we
381 // use `DUMMY_SP`.
382 let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info };
383 let terminator = TerminatorKind::Goto { target };
384 cfg.terminate(block, source_info, terminator);
385 }
386 }
387 }
388 }
389 }
390 }
391
392 impl<'tcx> Scopes<'tcx> {
393 pub(crate) fn new() -> Self {
394 Self {
395 scopes: Vec::new(),
396 breakable_scopes: Vec::new(),
397 unwind_drops: DropTree::new(),
398 generator_drops: DropTree::new(),
399 }
400 }
401
402 fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) {
403 debug!("push_scope({:?})", region_scope);
404 self.scopes.push(Scope {
405 source_scope: vis_scope,
406 region_scope: region_scope.0,
407 region_scope_span: region_scope.1.span,
408 drops: vec![],
409 moved_locals: vec![],
410 cached_unwind_block: None,
411 cached_generator_drop_block: None,
412 });
413 }
414
415 fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope {
416 let scope = self.scopes.pop().unwrap();
417 assert_eq!(scope.region_scope, region_scope.0);
418 scope
419 }
420
421 fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize {
422 self.scopes
423 .iter()
424 .rposition(|scope| scope.region_scope == region_scope)
425 .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope))
426 }
427
428 /// Returns the topmost active scope, which is known to be alive until
429 /// the next scope expression.
430 fn topmost(&self) -> region::Scope {
431 self.scopes.last().expect("topmost_scope: no scopes present").region_scope
432 }
433 }
434
435 impl<'a, 'tcx> Builder<'a, 'tcx> {
436 // Adding and removing scopes
437 // ==========================
438 // Start a breakable scope, which tracks where `continue`, `break` and
439 // `return` should branch to.
440 crate fn in_breakable_scope<F>(
441 &mut self,
442 loop_block: Option<BasicBlock>,
443 break_destination: Place<'tcx>,
444 span: Span,
445 f: F,
446 ) -> BlockAnd<()>
447 where
448 F: FnOnce(&mut Builder<'a, 'tcx>) -> Option<BlockAnd<()>>,
449 {
450 let region_scope = self.scopes.topmost();
451 let scope = BreakableScope {
452 region_scope,
453 break_destination,
454 break_drops: DropTree::new(),
455 continue_drops: loop_block.map(|_| DropTree::new()),
456 };
457 self.scopes.breakable_scopes.push(scope);
458 let normal_exit_block = f(self);
459 let breakable_scope = self.scopes.breakable_scopes.pop().unwrap();
460 assert!(breakable_scope.region_scope == region_scope);
461 let break_block = self.build_exit_tree(breakable_scope.break_drops, None);
462 if let Some(drops) = breakable_scope.continue_drops {
463 self.build_exit_tree(drops, loop_block);
464 }
465 match (normal_exit_block, break_block) {
466 (Some(block), None) | (None, Some(block)) => block,
467 (None, None) => self.cfg.start_new_block().unit(),
468 (Some(normal_block), Some(exit_block)) => {
469 let target = self.cfg.start_new_block();
470 let source_info = self.source_info(span);
471 self.cfg.terminate(
472 unpack!(normal_block),
473 source_info,
474 TerminatorKind::Goto { target },
475 );
476 self.cfg.terminate(
477 unpack!(exit_block),
478 source_info,
479 TerminatorKind::Goto { target },
480 );
481 target.unit()
482 }
483 }
484 }
485
486 crate fn in_opt_scope<F, R>(
487 &mut self,
488 opt_scope: Option<(region::Scope, SourceInfo)>,
489 f: F,
490 ) -> BlockAnd<R>
491 where
492 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
493 {
494 debug!("in_opt_scope(opt_scope={:?})", opt_scope);
495 if let Some(region_scope) = opt_scope {
496 self.push_scope(region_scope);
497 }
498 let mut block;
499 let rv = unpack!(block = f(self));
500 if let Some(region_scope) = opt_scope {
501 unpack!(block = self.pop_scope(region_scope, block));
502 }
503 debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
504 block.and(rv)
505 }
506
507 /// Convenience wrapper that pushes a scope and then executes `f`
508 /// to build its contents, popping the scope afterwards.
509 crate fn in_scope<F, R>(
510 &mut self,
511 region_scope: (region::Scope, SourceInfo),
512 lint_level: LintLevel,
513 f: F,
514 ) -> BlockAnd<R>
515 where
516 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
517 {
518 debug!("in_scope(region_scope={:?})", region_scope);
519 let source_scope = self.source_scope;
520 let tcx = self.tcx;
521 if let LintLevel::Explicit(current_hir_id) = lint_level {
522 // Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
523 // to avoid adding Hir dependences on our parents.
524 // We estimate the true lint roots here to avoid creating a lot of source scopes.
525
526 let parent_root = tcx.maybe_lint_level_root_bounded(
527 self.source_scopes[source_scope].local_data.as_ref().assert_crate_local().lint_root,
528 self.hir_id,
529 );
530 let current_root = tcx.maybe_lint_level_root_bounded(current_hir_id, self.hir_id);
531
532 if parent_root != current_root {
533 self.source_scope = self.new_source_scope(
534 region_scope.1.span,
535 LintLevel::Explicit(current_root),
536 None,
537 );
538 }
539 }
540 self.push_scope(region_scope);
541 let mut block;
542 let rv = unpack!(block = f(self));
543 unpack!(block = self.pop_scope(region_scope, block));
544 self.source_scope = source_scope;
545 debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
546 block.and(rv)
547 }
548
549 /// Push a scope onto the stack. You can then build code in this
550 /// scope and call `pop_scope` afterwards. Note that these two
551 /// calls must be paired; using `in_scope` as a convenience
552 /// wrapper maybe preferable.
553 crate fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
554 self.scopes.push_scope(region_scope, self.source_scope);
555 }
556
557 /// Pops a scope, which should have region scope `region_scope`,
558 /// adding any drops onto the end of `block` that are needed.
559 /// This must match 1-to-1 with `push_scope`.
560 crate fn pop_scope(
561 &mut self,
562 region_scope: (region::Scope, SourceInfo),
563 mut block: BasicBlock,
564 ) -> BlockAnd<()> {
565 debug!("pop_scope({:?}, {:?})", region_scope, block);
566
567 block = self.leave_top_scope(block);
568
569 self.scopes.pop_scope(region_scope);
570
571 block.unit()
572 }
573
574 /// Sets up the drops for breaking from `block` to `target`.
575 crate fn break_scope(
576 &mut self,
577 mut block: BasicBlock,
578 value: Option<&Expr<'tcx>>,
579 target: BreakableTarget,
580 source_info: SourceInfo,
581 ) -> BlockAnd<()> {
582 let span = source_info.span;
583
584 let get_scope_index = |scope: region::Scope| {
585 // find the loop-scope by its `region::Scope`.
586 self.scopes
587 .breakable_scopes
588 .iter()
589 .rposition(|breakable_scope| breakable_scope.region_scope == scope)
590 .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
591 };
592 let (break_index, destination) = match target {
593 BreakableTarget::Return => {
594 let scope = &self.scopes.breakable_scopes[0];
595 if scope.break_destination != Place::return_place() {
596 span_bug!(span, "`return` in item with no return scope");
597 }
598 (0, Some(scope.break_destination))
599 }
600 BreakableTarget::Break(scope) => {
601 let break_index = get_scope_index(scope);
602 let scope = &self.scopes.breakable_scopes[break_index];
603 (break_index, Some(scope.break_destination))
604 }
605 BreakableTarget::Continue(scope) => {
606 let break_index = get_scope_index(scope);
607 (break_index, None)
608 }
609 };
610
611 if let Some(destination) = destination {
612 if let Some(value) = value {
613 debug!("stmt_expr Break val block_context.push(SubExpr)");
614 self.block_context.push(BlockFrame::SubExpr);
615 unpack!(block = self.expr_into_dest(destination, block, value));
616 self.block_context.pop();
617 } else {
618 self.cfg.push_assign_unit(block, source_info, destination, self.tcx)
619 }
620 } else {
621 assert!(value.is_none(), "`return` and `break` should have a destination");
622 if self.tcx.sess.instrument_coverage() {
623 // Unlike `break` and `return`, which push an `Assign` statement to MIR, from which
624 // a Coverage code region can be generated, `continue` needs no `Assign`; but
625 // without one, the `InstrumentCoverage` MIR pass cannot generate a code region for
626 // `continue`. Coverage will be missing unless we add a dummy `Assign` to MIR.
627 self.add_dummy_assignment(&span, block, source_info);
628 }
629 }
630
631 let region_scope = self.scopes.breakable_scopes[break_index].region_scope;
632 let scope_index = self.scopes.scope_index(region_scope, span);
633 let drops = if destination.is_some() {
634 &mut self.scopes.breakable_scopes[break_index].break_drops
635 } else {
636 self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap()
637 };
638 let mut drop_idx = ROOT_NODE;
639 for scope in &self.scopes.scopes[scope_index + 1..] {
640 for drop in &scope.drops {
641 drop_idx = drops.add_drop(*drop, drop_idx);
642 }
643 }
644 drops.add_entry(block, drop_idx);
645
646 // `build_drop_tree` doesn't have access to our source_info, so we
647 // create a dummy terminator now. `TerminatorKind::Resume` is used
648 // because MIR type checking will panic if it hasn't been overwritten.
649 self.cfg.terminate(block, source_info, TerminatorKind::Resume);
650
651 self.cfg.start_new_block().unit()
652 }
653
654 // Add a dummy `Assign` statement to the CFG, with the span for the source code's `continue`
655 // statement.
656 fn add_dummy_assignment(&mut self, span: &Span, block: BasicBlock, source_info: SourceInfo) {
657 let local_decl = LocalDecl::new(self.tcx.mk_unit(), *span).internal();
658 let temp_place = Place::from(self.local_decls.push(local_decl));
659 self.cfg.push_assign_unit(block, source_info, temp_place, self.tcx);
660 }
661
662 crate fn exit_top_scope(
663 &mut self,
664 mut block: BasicBlock,
665 target: BasicBlock,
666 source_info: SourceInfo,
667 ) {
668 block = self.leave_top_scope(block);
669 self.cfg.terminate(block, source_info, TerminatorKind::Goto { target });
670 }
671
672 fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
673 // If we are emitting a `drop` statement, we need to have the cached
674 // diverge cleanup pads ready in case that drop panics.
675 let needs_cleanup = self.scopes.scopes.last().map_or(false, |scope| scope.needs_cleanup());
676 let is_generator = self.generator_kind.is_some();
677 let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };
678
679 let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
680 unpack!(build_scope_drops(
681 &mut self.cfg,
682 &mut self.scopes.unwind_drops,
683 scope,
684 block,
685 unwind_to,
686 is_generator && needs_cleanup,
687 self.arg_count,
688 ))
689 }
690
691 /// Creates a new source scope, nested in the current one.
692 crate fn new_source_scope(
693 &mut self,
694 span: Span,
695 lint_level: LintLevel,
696 safety: Option<Safety>,
697 ) -> SourceScope {
698 let parent = self.source_scope;
699 debug!(
700 "new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
701 span,
702 lint_level,
703 safety,
704 parent,
705 self.source_scopes.get(parent)
706 );
707 let scope_local_data = SourceScopeLocalData {
708 lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
709 lint_root
710 } else {
711 self.source_scopes[parent].local_data.as_ref().assert_crate_local().lint_root
712 },
713 safety: safety.unwrap_or_else(|| {
714 self.source_scopes[parent].local_data.as_ref().assert_crate_local().safety
715 }),
716 };
717 self.source_scopes.push(SourceScopeData {
718 span,
719 parent_scope: Some(parent),
720 inlined: None,
721 inlined_parent_scope: None,
722 local_data: ClearCrossCrate::Set(scope_local_data),
723 })
724 }
725
726 /// Given a span and the current source scope, make a SourceInfo.
727 crate fn source_info(&self, span: Span) -> SourceInfo {
728 SourceInfo { span, scope: self.source_scope }
729 }
730
731 // Finding scopes
732 // ==============
733 /// Returns the scope that we should use as the lifetime of an
734 /// operand. Basically, an operand must live until it is consumed.
735 /// This is similar to, but not quite the same as, the temporary
736 /// scope (which can be larger or smaller).
737 ///
738 /// Consider:
739 ///
740 /// let x = foo(bar(X, Y));
741 ///
742 /// We wish to pop the storage for X and Y after `bar()` is
743 /// called, not after the whole `let` is completed.
744 ///
745 /// As another example, if the second argument diverges:
746 ///
747 /// foo(Box::new(2), panic!())
748 ///
749 /// We would allocate the box but then free it on the unwinding
750 /// path; we would also emit a free on the 'success' path from
751 /// panic, but that will turn out to be removed as dead-code.
752 crate fn local_scope(&self) -> region::Scope {
753 self.scopes.topmost()
754 }
755
756 // Scheduling drops
757 // ================
758 crate fn schedule_drop_storage_and_value(
759 &mut self,
760 span: Span,
761 region_scope: region::Scope,
762 local: Local,
763 ) {
764 self.schedule_drop(span, region_scope, local, DropKind::Storage);
765 self.schedule_drop(span, region_scope, local, DropKind::Value);
766 }
767
768 /// Indicates that `place` should be dropped on exit from `region_scope`.
769 ///
770 /// When called with `DropKind::Storage`, `place` shouldn't be the return
771 /// place, or a function parameter.
772 crate fn schedule_drop(
773 &mut self,
774 span: Span,
775 region_scope: region::Scope,
776 local: Local,
777 drop_kind: DropKind,
778 ) {
779 let needs_drop = match drop_kind {
780 DropKind::Value => {
781 if !self.local_decls[local].ty.needs_drop(self.tcx, self.param_env) {
782 return;
783 }
784 true
785 }
786 DropKind::Storage => {
787 if local.index() <= self.arg_count {
788 span_bug!(
789 span,
790 "`schedule_drop` called with local {:?} and arg_count {}",
791 local,
792 self.arg_count,
793 )
794 }
795 false
796 }
797 };
798
799 // When building drops, we try to cache chains of drops to reduce the
800 // number of `DropTree::add_drop` calls. This, however, means that
801 // whenever we add a drop into a scope which already had some entries
802 // in the drop tree built (and thus, cached) for it, we must invalidate
803 // all caches which might branch into the scope which had a drop just
804 // added to it. This is necessary, because otherwise some other code
805 // might use the cache to branch into already built chain of drops,
806 // essentially ignoring the newly added drop.
807 //
808 // For example consider there’s two scopes with a drop in each. These
809 // are built and thus the caches are filled:
810 //
811 // +--------------------------------------------------------+
812 // | +---------------------------------+ |
813 // | | +--------+ +-------------+ | +---------------+ |
814 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
815 // | | +--------+ +-------------+ | +---------------+ |
816 // | +------------|outer_scope cache|--+ |
817 // +------------------------------|middle_scope cache|------+
818 //
819 // Now, a new, inner-most scope is added along with a new drop into
820 // both inner-most and outer-most scopes:
821 //
822 // +------------------------------------------------------------+
823 // | +----------------------------------+ |
824 // | | +--------+ +-------------+ | +---------------+ | +-------------+
825 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
826 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
827 // | | +-+ +-------------+ | |
828 // | +---|invalid outer_scope cache|----+ |
829 // +----=----------------|invalid middle_scope cache|-----------+
830 //
831 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
832 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
833 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
834 // wrong results.
835 //
836 // Note that this code iterates scopes from the inner-most to the outer-most,
837 // invalidating caches of each scope visited. This way bare minimum of the
838 // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
839 // cache of outer scope stays intact.
840 //
841 // Since we only cache drops for the unwind path and the generator drop
842 // path, we only need to invalidate the cache for drops that happen on
843 // the unwind or generator drop paths. This means that for
844 // non-generators we don't need to invalidate caches for `DropKind::Storage`.
845 let invalidate_caches = needs_drop || self.generator_kind.is_some();
846 for scope in self.scopes.scopes.iter_mut().rev() {
847 if invalidate_caches {
848 scope.invalidate_cache();
849 }
850
851 if scope.region_scope == region_scope {
852 let region_scope_span = region_scope.span(self.tcx, &self.region_scope_tree);
853 // Attribute scope exit drops to scope's closing brace.
854 let scope_end = self.tcx.sess.source_map().end_point(region_scope_span);
855
856 scope.drops.push(DropData {
857 source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
858 local,
859 kind: drop_kind,
860 });
861
862 return;
863 }
864 }
865
866 span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local);
867 }
868
869 /// Indicates that the "local operand" stored in `local` is
870 /// *moved* at some point during execution (see `local_scope` for
871 /// more information about what a "local operand" is -- in short,
872 /// it's an intermediate operand created as part of preparing some
873 /// MIR instruction). We use this information to suppress
874 /// redundant drops on the non-unwind paths. This results in less
875 /// MIR, but also avoids spurious borrow check errors
876 /// (c.f. #64391).
877 ///
878 /// Example: when compiling the call to `foo` here:
879 ///
880 /// ```rust
881 /// foo(bar(), ...)
882 /// ```
883 ///
884 /// we would evaluate `bar()` to an operand `_X`. We would also
885 /// schedule `_X` to be dropped when the expression scope for
886 /// `foo(bar())` is exited. This is relevant, for example, if the
887 /// later arguments should unwind (it would ensure that `_X` gets
888 /// dropped). However, if no unwind occurs, then `_X` will be
889 /// unconditionally consumed by the `call`:
890 ///
891 /// ```
892 /// bb {
893 /// ...
894 /// _R = CALL(foo, _X, ...)
895 /// }
896 /// ```
897 ///
898 /// However, `_X` is still registered to be dropped, and so if we
899 /// do nothing else, we would generate a `DROP(_X)` that occurs
900 /// after the call. This will later be optimized out by the
901 /// drop-elaboation code, but in the meantime it can lead to
902 /// spurious borrow-check errors -- the problem, ironically, is
903 /// not the `DROP(_X)` itself, but the (spurious) unwind pathways
904 /// that it creates. See #64391 for an example.
905 crate fn record_operands_moved(&mut self, operands: &[Operand<'tcx>]) {
906 let local_scope = self.local_scope();
907 let scope = self.scopes.scopes.last_mut().unwrap();
908
909 assert_eq!(scope.region_scope, local_scope, "local scope is not the topmost scope!",);
910
911 // look for moves of a local variable, like `MOVE(_X)`
912 let locals_moved = operands.iter().flat_map(|operand| match operand {
913 Operand::Copy(_) | Operand::Constant(_) => None,
914 Operand::Move(place) => place.as_local(),
915 });
916
917 for local in locals_moved {
918 // check if we have a Drop for this operand and -- if so
919 // -- add it to the list of moved operands. Note that this
920 // local might not have been an operand created for this
921 // call, it could come from other places too.
922 if scope.drops.iter().any(|drop| drop.local == local && drop.kind == DropKind::Value) {
923 scope.moved_locals.push(local);
924 }
925 }
926 }
927
928 // Other
929 // =====
930 /// Branch based on a boolean condition.
931 ///
932 /// This is a special case because the temporary for the condition needs to
933 /// be dropped on both the true and the false arm.
934 crate fn test_bool(
935 &mut self,
936 mut block: BasicBlock,
937 condition: &Expr<'tcx>,
938 source_info: SourceInfo,
939 ) -> (BasicBlock, BasicBlock) {
940 let cond = unpack!(block = self.as_local_operand(block, condition));
941 let true_block = self.cfg.start_new_block();
942 let false_block = self.cfg.start_new_block();
943 let term = TerminatorKind::if_(self.tcx, cond.clone(), true_block, false_block);
944 self.cfg.terminate(block, source_info, term);
945
946 match cond {
947 // Don't try to drop a constant
948 Operand::Constant(_) => (),
949 Operand::Copy(place) | Operand::Move(place) => {
950 if let Some(cond_temp) = place.as_local() {
951 // Manually drop the condition on both branches.
952 let top_scope = self.scopes.scopes.last_mut().unwrap();
953 let top_drop_data = top_scope.drops.pop().unwrap();
954 if self.generator_kind.is_some() {
955 top_scope.invalidate_cache();
956 }
957
958 match top_drop_data.kind {
959 DropKind::Value { .. } => {
960 bug!("Drop scheduled on top of condition variable")
961 }
962 DropKind::Storage => {
963 let source_info = top_drop_data.source_info;
964 let local = top_drop_data.local;
965 assert_eq!(local, cond_temp, "Drop scheduled on top of condition");
966 self.cfg.push(
967 true_block,
968 Statement { source_info, kind: StatementKind::StorageDead(local) },
969 );
970 self.cfg.push(
971 false_block,
972 Statement { source_info, kind: StatementKind::StorageDead(local) },
973 );
974 }
975 }
976 } else {
977 bug!("Expected as_local_operand to produce a temporary");
978 }
979 }
980 }
981
982 (true_block, false_block)
983 }
984
985 /// Returns the [DropIdx] for the innermost drop if the function unwound at
986 /// this point. The `DropIdx` will be created if it doesn't already exist.
987 fn diverge_cleanup(&mut self) -> DropIdx {
988 let is_generator = self.generator_kind.is_some();
989 let (uncached_scope, mut cached_drop) = self
990 .scopes
991 .scopes
992 .iter()
993 .enumerate()
994 .rev()
995 .find_map(|(scope_idx, scope)| {
996 scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block))
997 })
998 .unwrap_or((0, ROOT_NODE));
999
1000 for scope in &mut self.scopes.scopes[uncached_scope..] {
1001 for drop in &scope.drops {
1002 if is_generator || drop.kind == DropKind::Value {
1003 cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
1004 }
1005 }
1006 scope.cached_unwind_block = Some(cached_drop);
1007 }
1008
1009 cached_drop
1010 }
1011
1012 /// Prepares to create a path that performs all required cleanup for a
1013 /// terminator that can unwind at the given basic block.
1014 ///
1015 /// This path terminates in Resume. The path isn't created until after all
1016 /// of the non-unwind paths in this item have been lowered.
1017 crate fn diverge_from(&mut self, start: BasicBlock) {
1018 debug_assert!(
1019 matches!(
1020 self.cfg.block_data(start).terminator().kind,
1021 TerminatorKind::Assert { .. }
1022 | TerminatorKind::Call { .. }
1023 | TerminatorKind::DropAndReplace { .. }
1024 | TerminatorKind::FalseUnwind { .. }
1025 ),
1026 "diverge_from called on block with terminator that cannot unwind."
1027 );
1028
1029 let next_drop = self.diverge_cleanup();
1030 self.scopes.unwind_drops.add_entry(start, next_drop);
1031 }
1032
1033 /// Sets up a path that performs all required cleanup for dropping a
1034 /// generator, starting from the given block that ends in
1035 /// [TerminatorKind::Yield].
1036 ///
1037 /// This path terminates in GeneratorDrop.
1038 crate fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) {
1039 debug_assert!(
1040 matches!(
1041 self.cfg.block_data(yield_block).terminator().kind,
1042 TerminatorKind::Yield { .. }
1043 ),
1044 "generator_drop_cleanup called on block with non-yield terminator."
1045 );
1046 let (uncached_scope, mut cached_drop) = self
1047 .scopes
1048 .scopes
1049 .iter()
1050 .enumerate()
1051 .rev()
1052 .find_map(|(scope_idx, scope)| {
1053 scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
1054 })
1055 .unwrap_or((0, ROOT_NODE));
1056
1057 for scope in &mut self.scopes.scopes[uncached_scope..] {
1058 for drop in &scope.drops {
1059 cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop);
1060 }
1061 scope.cached_generator_drop_block = Some(cached_drop);
1062 }
1063
1064 self.scopes.generator_drops.add_entry(yield_block, cached_drop);
1065 }
1066
1067 /// Utility function for *non*-scope code to build their own drops
1068 crate fn build_drop_and_replace(
1069 &mut self,
1070 block: BasicBlock,
1071 span: Span,
1072 place: Place<'tcx>,
1073 value: Operand<'tcx>,
1074 ) -> BlockAnd<()> {
1075 let source_info = self.source_info(span);
1076 let next_target = self.cfg.start_new_block();
1077
1078 self.cfg.terminate(
1079 block,
1080 source_info,
1081 TerminatorKind::DropAndReplace { place, value, target: next_target, unwind: None },
1082 );
1083 self.diverge_from(block);
1084
1085 next_target.unit()
1086 }
1087
1088 /// Creates an `Assert` terminator and return the success block.
1089 /// If the boolean condition operand is not the expected value,
1090 /// a runtime panic will be caused with the given message.
1091 crate fn assert(
1092 &mut self,
1093 block: BasicBlock,
1094 cond: Operand<'tcx>,
1095 expected: bool,
1096 msg: AssertMessage<'tcx>,
1097 span: Span,
1098 ) -> BasicBlock {
1099 let source_info = self.source_info(span);
1100 let success_block = self.cfg.start_new_block();
1101
1102 self.cfg.terminate(
1103 block,
1104 source_info,
1105 TerminatorKind::Assert { cond, expected, msg, target: success_block, cleanup: None },
1106 );
1107 self.diverge_from(block);
1108
1109 success_block
1110 }
1111
1112 /// Unschedules any drops in the top scope.
1113 ///
1114 /// This is only needed for `match` arm scopes, because they have one
1115 /// entrance per pattern, but only one exit.
1116 crate fn clear_top_scope(&mut self, region_scope: region::Scope) {
1117 let top_scope = self.scopes.scopes.last_mut().unwrap();
1118
1119 assert_eq!(top_scope.region_scope, region_scope);
1120
1121 top_scope.drops.clear();
1122 top_scope.invalidate_cache();
1123 }
1124 }
1125
1126 /// Builds drops for `pop_scope` and `leave_top_scope`.
1127 fn build_scope_drops<'tcx>(
1128 cfg: &mut CFG<'tcx>,
1129 unwind_drops: &mut DropTree,
1130 scope: &Scope,
1131 mut block: BasicBlock,
1132 mut unwind_to: DropIdx,
1133 storage_dead_on_unwind: bool,
1134 arg_count: usize,
1135 ) -> BlockAnd<()> {
1136 debug!("build_scope_drops({:?} -> {:?})", block, scope);
1137
1138 // Build up the drops in evaluation order. The end result will
1139 // look like:
1140 //
1141 // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
1142 // | | |
1143 // : | |
1144 // V V
1145 // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
1146 //
1147 // The horizontal arrows represent the execution path when the drops return
1148 // successfully. The downwards arrows represent the execution path when the
1149 // drops panic (panicking while unwinding will abort, so there's no need for
1150 // another set of arrows).
1151 //
1152 // For generators, we unwind from a drop on a local to its StorageDead
1153 // statement. For other functions we don't worry about StorageDead. The
1154 // drops for the unwind path should have already been generated by
1155 // `diverge_cleanup_gen`.
1156
1157 for drop_data in scope.drops.iter().rev() {
1158 let source_info = drop_data.source_info;
1159 let local = drop_data.local;
1160
1161 match drop_data.kind {
1162 DropKind::Value => {
1163 // `unwind_to` should drop the value that we're about to
1164 // schedule. If dropping this value panics, then we continue
1165 // with the *next* value on the unwind path.
1166 debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
1167 debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
1168 unwind_to = unwind_drops.drops[unwind_to].1;
1169
1170 // If the operand has been moved, and we are not on an unwind
1171 // path, then don't generate the drop. (We only take this into
1172 // account for non-unwind paths so as not to disturb the
1173 // caching mechanism.)
1174 if scope.moved_locals.iter().any(|&o| o == local) {
1175 continue;
1176 }
1177
1178 unwind_drops.add_entry(block, unwind_to);
1179
1180 let next = cfg.start_new_block();
1181 cfg.terminate(
1182 block,
1183 source_info,
1184 TerminatorKind::Drop { place: local.into(), target: next, unwind: None },
1185 );
1186 block = next;
1187 }
1188 DropKind::Storage => {
1189 if storage_dead_on_unwind {
1190 debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
1191 debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
1192 unwind_to = unwind_drops.drops[unwind_to].1;
1193 }
1194 // Only temps and vars need their storage dead.
1195 assert!(local.index() > arg_count);
1196 cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) });
1197 }
1198 }
1199 }
1200 block.unit()
1201 }
1202
1203 impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1204 /// Build a drop tree for a breakable scope.
1205 ///
1206 /// If `continue_block` is `Some`, then the tree is for `continue` inside a
1207 /// loop. Otherwise this is for `break` or `return`.
1208 fn build_exit_tree(
1209 &mut self,
1210 mut drops: DropTree,
1211 continue_block: Option<BasicBlock>,
1212 ) -> Option<BlockAnd<()>> {
1213 let mut blocks = IndexVec::from_elem(None, &drops.drops);
1214 blocks[ROOT_NODE] = continue_block;
1215
1216 drops.build_mir::<ExitScopes>(&mut self.cfg, &mut blocks);
1217
1218 // Link the exit drop tree to unwind drop tree.
1219 if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) {
1220 let unwind_target = self.diverge_cleanup();
1221 let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
1222 for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
1223 match drop_data.0.kind {
1224 DropKind::Storage => {
1225 if self.generator_kind.is_some() {
1226 let unwind_drop = self
1227 .scopes
1228 .unwind_drops
1229 .add_drop(drop_data.0, unwind_indices[drop_data.1]);
1230 unwind_indices.push(unwind_drop);
1231 } else {
1232 unwind_indices.push(unwind_indices[drop_data.1]);
1233 }
1234 }
1235 DropKind::Value => {
1236 let unwind_drop = self
1237 .scopes
1238 .unwind_drops
1239 .add_drop(drop_data.0, unwind_indices[drop_data.1]);
1240 self.scopes
1241 .unwind_drops
1242 .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]);
1243 unwind_indices.push(unwind_drop);
1244 }
1245 }
1246 }
1247 }
1248 blocks[ROOT_NODE].map(BasicBlock::unit)
1249 }
1250
1251 /// Build the unwind and generator drop trees.
1252 crate fn build_drop_trees(&mut self, should_abort: bool) {
1253 if self.generator_kind.is_some() {
1254 self.build_generator_drop_trees(should_abort);
1255 } else {
1256 Self::build_unwind_tree(
1257 &mut self.cfg,
1258 &mut self.scopes.unwind_drops,
1259 self.fn_span,
1260 should_abort,
1261 &mut None,
1262 );
1263 }
1264 }
1265
1266 fn build_generator_drop_trees(&mut self, should_abort: bool) {
1267 // Build the drop tree for dropping the generator while it's suspended.
1268 let drops = &mut self.scopes.generator_drops;
1269 let cfg = &mut self.cfg;
1270 let fn_span = self.fn_span;
1271 let mut blocks = IndexVec::from_elem(None, &drops.drops);
1272 drops.build_mir::<GeneratorDrop>(cfg, &mut blocks);
1273 if let Some(root_block) = blocks[ROOT_NODE] {
1274 cfg.terminate(
1275 root_block,
1276 SourceInfo::outermost(fn_span),
1277 TerminatorKind::GeneratorDrop,
1278 );
1279 }
1280
1281 // Build the drop tree for unwinding in the normal control flow paths.
1282 let resume_block = &mut None;
1283 let unwind_drops = &mut self.scopes.unwind_drops;
1284 Self::build_unwind_tree(cfg, unwind_drops, fn_span, should_abort, resume_block);
1285
1286 // Build the drop tree for unwinding when dropping a suspended
1287 // generator.
1288 //
1289 // This is a different tree to the standard unwind paths here to
1290 // prevent drop elaboration from creating drop flags that would have
1291 // to be captured by the generator. I'm not sure how important this
1292 // optimization is, but it is here.
1293 for (drop_idx, drop_data) in drops.drops.iter_enumerated() {
1294 if let DropKind::Value = drop_data.0.kind {
1295 debug_assert!(drop_data.1 < drops.drops.next_index());
1296 drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap()));
1297 }
1298 }
1299 Self::build_unwind_tree(cfg, drops, fn_span, should_abort, resume_block);
1300 }
1301
1302 fn build_unwind_tree(
1303 cfg: &mut CFG<'tcx>,
1304 drops: &mut DropTree,
1305 fn_span: Span,
1306 should_abort: bool,
1307 resume_block: &mut Option<BasicBlock>,
1308 ) {
1309 let mut blocks = IndexVec::from_elem(None, &drops.drops);
1310 blocks[ROOT_NODE] = *resume_block;
1311 drops.build_mir::<Unwind>(cfg, &mut blocks);
1312 if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) {
1313 // `TerminatorKind::Abort` is used for `#[unwind(aborts)]`
1314 // functions.
1315 let terminator =
1316 if should_abort { TerminatorKind::Abort } else { TerminatorKind::Resume };
1317
1318 cfg.terminate(resume, SourceInfo::outermost(fn_span), terminator);
1319
1320 *resume_block = blocks[ROOT_NODE];
1321 }
1322 }
1323 }
1324
1325 // DropTreeBuilder implementations.
1326
1327 struct ExitScopes;
1328
1329 impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes {
1330 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
1331 cfg.start_new_block()
1332 }
1333 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
1334 cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to };
1335 }
1336 }
1337
1338 struct GeneratorDrop;
1339
1340 impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop {
1341 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
1342 cfg.start_new_block()
1343 }
1344 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
1345 let term = cfg.block_data_mut(from).terminator_mut();
1346 if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
1347 *drop = Some(to);
1348 } else {
1349 span_bug!(
1350 term.source_info.span,
1351 "cannot enter generator drop tree from {:?}",
1352 term.kind
1353 )
1354 }
1355 }
1356 }
1357
1358 struct Unwind;
1359
1360 impl<'tcx> DropTreeBuilder<'tcx> for Unwind {
1361 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
1362 cfg.start_new_cleanup_block()
1363 }
1364 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
1365 let term = &mut cfg.block_data_mut(from).terminator_mut();
1366 match &mut term.kind {
1367 TerminatorKind::Drop { unwind, .. }
1368 | TerminatorKind::DropAndReplace { unwind, .. }
1369 | TerminatorKind::FalseUnwind { unwind, .. }
1370 | TerminatorKind::Call { cleanup: unwind, .. }
1371 | TerminatorKind::Assert { cleanup: unwind, .. } => {
1372 *unwind = Some(to);
1373 }
1374 TerminatorKind::Goto { .. }
1375 | TerminatorKind::SwitchInt { .. }
1376 | TerminatorKind::Resume
1377 | TerminatorKind::Abort
1378 | TerminatorKind::Return
1379 | TerminatorKind::Unreachable
1380 | TerminatorKind::Yield { .. }
1381 | TerminatorKind::GeneratorDrop
1382 | TerminatorKind::FalseEdge { .. }
1383 | TerminatorKind::InlineAsm { .. } => {
1384 span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind)
1385 }
1386 }
1387 }
1388 }