]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_mir_build/src/build/scope.rs
Merge tag 'debian/1.65.0+dfsg1-1_exp3' into debian/sid
[rustc.git] / compiler / rustc_mir_build / src / build / scope.rs
1 /*!
2 Managing the scope stack. The scopes are tied to lexical scopes, so as
3 we descend the THIR, we push a scope on the stack, build its
4 contents, and then pop it off. Every scope is named by a
5 `region::Scope`.
6
7 ### SEME Regions
8
9 When pushing a new [Scope], we record the current point in the graph (a
10 basic block); this marks the entry to the scope. We then generate more
11 stuff in the control-flow graph. Whenever the scope is exited, either
12 via a `break` or `return` or just by fallthrough, that marks an exit
13 from the scope. Each lexical scope thus corresponds to a single-entry,
14 multiple-exit (SEME) region in the control-flow graph.
15
16 For now, we record the `region::Scope` to each SEME region for later reference
17 (see caveat in next paragraph). This is because destruction scopes are tied to
18 them. This may change in the future so that MIR lowering determines its own
19 destruction scopes.
20
21 ### Not so SEME Regions
22
23 In the course of building matches, it sometimes happens that certain code
24 (namely guards) gets executed multiple times. This means that the scope lexical
25 scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
26 mapping is from one scope to a vector of SEME regions. Since the SEME regions
27 are disjoint, the mapping is still one-to-one for the set of SEME regions that
28 we're currently in.
29
30 Also in matches, the scopes assigned to arms are not always even SEME regions!
31 Each arm has a single region with one entry for each pattern. We manually
32 manipulate the scheduled drops in this scope to avoid dropping things multiple
33 times.
34
35 ### Drops
36
37 The primary purpose for scopes is to insert drops: while building
38 the contents, we also accumulate places that need to be dropped upon
39 exit from each scope. This is done by calling `schedule_drop`. Once a
40 drop is scheduled, whenever we branch out we will insert drops of all
41 those places onto the outgoing edge. Note that we don't know the full
42 set of scheduled drops up front, and so whenever we exit from the
43 scope we only drop the values scheduled thus far. For example, consider
44 the scope S corresponding to this loop:
45
46 ```
47 # let cond = true;
48 loop {
49 let x = ..;
50 if cond { break; }
51 let y = ..;
52 }
53 ```
54
55 When processing the `let x`, we will add one drop to the scope for
56 `x`. The break will then insert a drop for `x`. When we process `let
57 y`, we will add another drop (in fact, to a subscope, but let's ignore
58 that for now); any later drops would also drop `y`.
59
60 ### Early exit
61
62 There are numerous "normal" ways to early exit a scope: `break`,
63 `continue`, `return` (panics are handled separately). Whenever an
64 early exit occurs, the method `break_scope` is called. It is given the
65 current point in execution where the early exit occurs, as well as the
66 scope you want to branch to (note that all early exits from to some
67 other enclosing scope). `break_scope` will record the set of drops currently
68 scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops
69 will be added to the CFG.
70
71 Panics are handled in a similar fashion, except that the drops are added to the
72 MIR once the rest of the function has finished being lowered. If a terminator
73 can panic, call `diverge_from(block)` with the block containing the terminator
74 `block`.
75
76 ### Breakable scopes
77
78 In addition to the normal scope stack, we track a loop scope stack
79 that contains only loops and breakable blocks. It tracks where a `break`,
80 `continue` or `return` should go to.
81
82 */
83
84 use std::mem;
85
86 use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG};
87 use rustc_data_structures::fx::FxHashMap;
88 use rustc_index::vec::IndexVec;
89 use rustc_middle::middle::region;
90 use rustc_middle::mir::*;
91 use rustc_middle::thir::{Expr, LintLevel};
92
93 use rustc_span::{Span, DUMMY_SP};
94
95 #[derive(Debug)]
96 pub struct Scopes<'tcx> {
97 scopes: Vec<Scope>,
98
99 /// The current set of breakable scopes. See module comment for more details.
100 breakable_scopes: Vec<BreakableScope<'tcx>>,
101
102 /// The scope of the innermost if-then currently being lowered.
103 if_then_scope: Option<IfThenScope>,
104
105 /// Drops that need to be done on unwind paths. See the comment on
106 /// [DropTree] for more details.
107 unwind_drops: DropTree,
108
109 /// Drops that need to be done on paths to the `GeneratorDrop` terminator.
110 generator_drops: DropTree,
111 }
112
113 #[derive(Debug)]
114 struct Scope {
115 /// The source scope this scope was created in.
116 source_scope: SourceScope,
117
118 /// the region span of this scope within source code.
119 region_scope: region::Scope,
120
121 /// set of places to drop when exiting this scope. This starts
122 /// out empty but grows as variables are declared during the
123 /// building process. This is a stack, so we always drop from the
124 /// end of the vector (top of the stack) first.
125 drops: Vec<DropData>,
126
127 moved_locals: Vec<Local>,
128
129 /// The drop index that will drop everything in and below this scope on an
130 /// unwind path.
131 cached_unwind_block: Option<DropIdx>,
132
133 /// The drop index that will drop everything in and below this scope on a
134 /// generator drop path.
135 cached_generator_drop_block: Option<DropIdx>,
136 }
137
138 #[derive(Clone, Copy, Debug)]
139 struct DropData {
140 /// The `Span` where drop obligation was incurred (typically where place was
141 /// declared)
142 source_info: SourceInfo,
143
144 /// local to drop
145 local: Local,
146
147 /// Whether this is a value Drop or a StorageDead.
148 kind: DropKind,
149 }
150
151 #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
152 pub(crate) enum DropKind {
153 Value,
154 Storage,
155 }
156
157 #[derive(Debug)]
158 struct BreakableScope<'tcx> {
159 /// Region scope of the loop
160 region_scope: region::Scope,
161 /// The destination of the loop/block expression itself (i.e., where to put
162 /// the result of a `break` or `return` expression)
163 break_destination: Place<'tcx>,
164 /// Drops that happen on the `break`/`return` path.
165 break_drops: DropTree,
166 /// Drops that happen on the `continue` path.
167 continue_drops: Option<DropTree>,
168 }
169
170 #[derive(Debug)]
171 struct IfThenScope {
172 /// The if-then scope or arm scope
173 region_scope: region::Scope,
174 /// Drops that happen on the `else` path.
175 else_drops: DropTree,
176 }
177
178 /// The target of an expression that breaks out of a scope
179 #[derive(Clone, Copy, Debug)]
180 pub(crate) enum BreakableTarget {
181 Continue(region::Scope),
182 Break(region::Scope),
183 Return,
184 }
185
186 rustc_index::newtype_index! {
187 struct DropIdx { .. }
188 }
189
190 const ROOT_NODE: DropIdx = DropIdx::from_u32(0);
191
192 /// A tree of drops that we have deferred lowering. It's used for:
193 ///
194 /// * Drops on unwind paths
195 /// * Drops on generator drop paths (when a suspended generator is dropped)
196 /// * Drops on return and loop exit paths
197 /// * Drops on the else path in an `if let` chain
198 ///
199 /// Once no more nodes could be added to the tree, we lower it to MIR in one go
200 /// in `build_mir`.
201 #[derive(Debug)]
202 struct DropTree {
203 /// Drops in the tree.
204 drops: IndexVec<DropIdx, (DropData, DropIdx)>,
205 /// Map for finding the inverse of the `next_drop` relation:
206 ///
207 /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind)] == i`
208 previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>,
209 /// Edges into the `DropTree` that need to be added once it's lowered.
210 entry_points: Vec<(DropIdx, BasicBlock)>,
211 }
212
213 impl Scope {
214 /// Whether there's anything to do for the cleanup path, that is,
215 /// when unwinding through this scope. This includes destructors,
216 /// but not StorageDead statements, which don't get emitted at all
217 /// for unwinding, for several reasons:
218 /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
219 /// * LLVM's memory dependency analysis can't handle it atm
220 /// * polluting the cleanup MIR with StorageDead creates
221 /// landing pads even though there's no actual destructors
222 /// * freeing up stack space has no effect during unwinding
223 /// Note that for generators we do emit StorageDeads, for the
224 /// use of optimizations in the MIR generator transform.
225 fn needs_cleanup(&self) -> bool {
226 self.drops.iter().any(|drop| match drop.kind {
227 DropKind::Value => true,
228 DropKind::Storage => false,
229 })
230 }
231
232 fn invalidate_cache(&mut self) {
233 self.cached_unwind_block = None;
234 self.cached_generator_drop_block = None;
235 }
236 }
237
238 /// A trait that determined how [DropTree] creates its blocks and
239 /// links to any entry nodes.
240 trait DropTreeBuilder<'tcx> {
241 /// Create a new block for the tree. This should call either
242 /// `cfg.start_new_block()` or `cfg.start_new_cleanup_block()`.
243 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock;
244
245 /// Links a block outside the drop tree, `from`, to the block `to` inside
246 /// the drop tree.
247 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock);
248 }
249
250 impl DropTree {
251 fn new() -> Self {
252 // The root node of the tree doesn't represent a drop, but instead
253 // represents the block in the tree that should be jumped to once all
254 // of the required drops have been performed.
255 let fake_source_info = SourceInfo::outermost(DUMMY_SP);
256 let fake_data =
257 DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
258 let drop_idx = DropIdx::MAX;
259 let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1);
260 Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() }
261 }
262
263 fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx {
264 let drops = &mut self.drops;
265 *self
266 .previous_drops
267 .entry((next, drop.local, drop.kind))
268 .or_insert_with(|| drops.push((drop, next)))
269 }
270
271 fn add_entry(&mut self, from: BasicBlock, to: DropIdx) {
272 debug_assert!(to < self.drops.next_index());
273 self.entry_points.push((to, from));
274 }
275
276 /// Builds the MIR for a given drop tree.
277 ///
278 /// `blocks` should have the same length as `self.drops`, and may have its
279 /// first value set to some already existing block.
280 fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>(
281 &mut self,
282 cfg: &mut CFG<'tcx>,
283 blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
284 ) {
285 debug!("DropTree::build_mir(drops = {:#?})", self);
286 assert_eq!(blocks.len(), self.drops.len());
287
288 self.assign_blocks::<T>(cfg, blocks);
289 self.link_blocks(cfg, blocks)
290 }
291
292 /// Assign blocks for all of the drops in the drop tree that need them.
293 fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>(
294 &mut self,
295 cfg: &mut CFG<'tcx>,
296 blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
297 ) {
298 // StorageDead statements can share blocks with each other and also with
299 // a Drop terminator. We iterate through the drops to find which drops
300 // need their own block.
301 #[derive(Clone, Copy)]
302 enum Block {
303 // This drop is unreachable
304 None,
305 // This drop is only reachable through the `StorageDead` with the
306 // specified index.
307 Shares(DropIdx),
308 // This drop has more than one way of being reached, or it is
309 // branched to from outside the tree, or its predecessor is a
310 // `Value` drop.
311 Own,
312 }
313
314 let mut needs_block = IndexVec::from_elem(Block::None, &self.drops);
315 if blocks[ROOT_NODE].is_some() {
316 // In some cases (such as drops for `continue`) the root node
317 // already has a block. In this case, make sure that we don't
318 // override it.
319 needs_block[ROOT_NODE] = Block::Own;
320 }
321
322 // Sort so that we only need to check the last value.
323 let entry_points = &mut self.entry_points;
324 entry_points.sort();
325
326 for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
327 if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
328 let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
329 needs_block[drop_idx] = Block::Own;
330 while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
331 let entry_block = entry_points.pop().unwrap().1;
332 T::add_entry(cfg, entry_block, block);
333 }
334 }
335 match needs_block[drop_idx] {
336 Block::None => continue,
337 Block::Own => {
338 blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
339 }
340 Block::Shares(pred) => {
341 blocks[drop_idx] = blocks[pred];
342 }
343 }
344 if let DropKind::Value = drop_data.0.kind {
345 needs_block[drop_data.1] = Block::Own;
346 } else if drop_idx != ROOT_NODE {
347 match &mut needs_block[drop_data.1] {
348 pred @ Block::None => *pred = Block::Shares(drop_idx),
349 pred @ Block::Shares(_) => *pred = Block::Own,
350 Block::Own => (),
351 }
352 }
353 }
354
355 debug!("assign_blocks: blocks = {:#?}", blocks);
356 assert!(entry_points.is_empty());
357 }
358
359 fn link_blocks<'tcx>(
360 &self,
361 cfg: &mut CFG<'tcx>,
362 blocks: &IndexVec<DropIdx, Option<BasicBlock>>,
363 ) {
364 for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
365 let Some(block) = blocks[drop_idx] else { continue };
366 match drop_data.0.kind {
367 DropKind::Value => {
368 let terminator = TerminatorKind::Drop {
369 target: blocks[drop_data.1].unwrap(),
370 // The caller will handle this if needed.
371 unwind: None,
372 place: drop_data.0.local.into(),
373 };
374 cfg.terminate(block, drop_data.0.source_info, terminator);
375 }
376 // Root nodes don't correspond to a drop.
377 DropKind::Storage if drop_idx == ROOT_NODE => {}
378 DropKind::Storage => {
379 let stmt = Statement {
380 source_info: drop_data.0.source_info,
381 kind: StatementKind::StorageDead(drop_data.0.local),
382 };
383 cfg.push(block, stmt);
384 let target = blocks[drop_data.1].unwrap();
385 if target != block {
386 // Diagnostics don't use this `Span` but debuginfo
387 // might. Since we don't want breakpoints to be placed
388 // here, especially when this is on an unwind path, we
389 // use `DUMMY_SP`.
390 let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info };
391 let terminator = TerminatorKind::Goto { target };
392 cfg.terminate(block, source_info, terminator);
393 }
394 }
395 }
396 }
397 }
398 }
399
400 impl<'tcx> Scopes<'tcx> {
401 pub(crate) fn new() -> Self {
402 Self {
403 scopes: Vec::new(),
404 breakable_scopes: Vec::new(),
405 if_then_scope: None,
406 unwind_drops: DropTree::new(),
407 generator_drops: DropTree::new(),
408 }
409 }
410
411 fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) {
412 debug!("push_scope({:?})", region_scope);
413 self.scopes.push(Scope {
414 source_scope: vis_scope,
415 region_scope: region_scope.0,
416 drops: vec![],
417 moved_locals: vec![],
418 cached_unwind_block: None,
419 cached_generator_drop_block: None,
420 });
421 }
422
423 fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope {
424 let scope = self.scopes.pop().unwrap();
425 assert_eq!(scope.region_scope, region_scope.0);
426 scope
427 }
428
429 fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize {
430 self.scopes
431 .iter()
432 .rposition(|scope| scope.region_scope == region_scope)
433 .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope))
434 }
435
436 /// Returns the topmost active scope, which is known to be alive until
437 /// the next scope expression.
438 fn topmost(&self) -> region::Scope {
439 self.scopes.last().expect("topmost_scope: no scopes present").region_scope
440 }
441 }
442
443 impl<'a, 'tcx> Builder<'a, 'tcx> {
444 // Adding and removing scopes
445 // ==========================
446 // Start a breakable scope, which tracks where `continue`, `break` and
447 // `return` should branch to.
448 pub(crate) fn in_breakable_scope<F>(
449 &mut self,
450 loop_block: Option<BasicBlock>,
451 break_destination: Place<'tcx>,
452 span: Span,
453 f: F,
454 ) -> BlockAnd<()>
455 where
456 F: FnOnce(&mut Builder<'a, 'tcx>) -> Option<BlockAnd<()>>,
457 {
458 let region_scope = self.scopes.topmost();
459 let scope = BreakableScope {
460 region_scope,
461 break_destination,
462 break_drops: DropTree::new(),
463 continue_drops: loop_block.map(|_| DropTree::new()),
464 };
465 self.scopes.breakable_scopes.push(scope);
466 let normal_exit_block = f(self);
467 let breakable_scope = self.scopes.breakable_scopes.pop().unwrap();
468 assert!(breakable_scope.region_scope == region_scope);
469 let break_block =
470 self.build_exit_tree(breakable_scope.break_drops, region_scope, span, None);
471 if let Some(drops) = breakable_scope.continue_drops {
472 self.build_exit_tree(drops, region_scope, span, loop_block);
473 }
474 match (normal_exit_block, break_block) {
475 (Some(block), None) | (None, Some(block)) => block,
476 (None, None) => self.cfg.start_new_block().unit(),
477 (Some(normal_block), Some(exit_block)) => {
478 let target = self.cfg.start_new_block();
479 let source_info = self.source_info(span);
480 self.cfg.terminate(
481 unpack!(normal_block),
482 source_info,
483 TerminatorKind::Goto { target },
484 );
485 self.cfg.terminate(
486 unpack!(exit_block),
487 source_info,
488 TerminatorKind::Goto { target },
489 );
490 target.unit()
491 }
492 }
493 }
494
495 /// Start an if-then scope which tracks drop for `if` expressions and `if`
496 /// guards.
497 ///
498 /// For an if-let chain:
499 ///
500 /// if let Some(x) = a && let Some(y) = b && let Some(z) = c { ... }
501 ///
502 /// There are three possible ways the condition can be false and we may have
503 /// to drop `x`, `x` and `y`, or neither depending on which binding fails.
504 /// To handle this correctly we use a `DropTree` in a similar way to a
505 /// `loop` expression and 'break' out on all of the 'else' paths.
506 ///
507 /// Notes:
508 /// - We don't need to keep a stack of scopes in the `Builder` because the
509 /// 'else' paths will only leave the innermost scope.
510 /// - This is also used for match guards.
511 pub(crate) fn in_if_then_scope<F>(
512 &mut self,
513 region_scope: region::Scope,
514 span: Span,
515 f: F,
516 ) -> (BasicBlock, BasicBlock)
517 where
518 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<()>,
519 {
520 let scope = IfThenScope { region_scope, else_drops: DropTree::new() };
521 let previous_scope = mem::replace(&mut self.scopes.if_then_scope, Some(scope));
522
523 let then_block = unpack!(f(self));
524
525 let if_then_scope = mem::replace(&mut self.scopes.if_then_scope, previous_scope).unwrap();
526 assert!(if_then_scope.region_scope == region_scope);
527
528 let else_block = self
529 .build_exit_tree(if_then_scope.else_drops, region_scope, span, None)
530 .map_or_else(|| self.cfg.start_new_block(), |else_block_and| unpack!(else_block_and));
531
532 (then_block, else_block)
533 }
534
535 pub(crate) fn in_opt_scope<F, R>(
536 &mut self,
537 opt_scope: Option<(region::Scope, SourceInfo)>,
538 f: F,
539 ) -> BlockAnd<R>
540 where
541 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
542 {
543 debug!("in_opt_scope(opt_scope={:?})", opt_scope);
544 if let Some(region_scope) = opt_scope {
545 self.push_scope(region_scope);
546 }
547 let mut block;
548 let rv = unpack!(block = f(self));
549 if let Some(region_scope) = opt_scope {
550 unpack!(block = self.pop_scope(region_scope, block));
551 }
552 debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
553 block.and(rv)
554 }
555
556 /// Convenience wrapper that pushes a scope and then executes `f`
557 /// to build its contents, popping the scope afterwards.
558 pub(crate) fn in_scope<F, R>(
559 &mut self,
560 region_scope: (region::Scope, SourceInfo),
561 lint_level: LintLevel,
562 f: F,
563 ) -> BlockAnd<R>
564 where
565 F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
566 {
567 debug!("in_scope(region_scope={:?})", region_scope);
568 let source_scope = self.source_scope;
569 let tcx = self.tcx;
570 if let LintLevel::Explicit(current_hir_id) = lint_level {
571 // Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
572 // to avoid adding Hir dependencies on our parents.
573 // We estimate the true lint roots here to avoid creating a lot of source scopes.
574
575 let parent_root = tcx.maybe_lint_level_root_bounded(
576 self.source_scopes[source_scope].local_data.as_ref().assert_crate_local().lint_root,
577 self.hir_id,
578 );
579 let current_root = tcx.maybe_lint_level_root_bounded(current_hir_id, self.hir_id);
580
581 if parent_root != current_root {
582 self.source_scope = self.new_source_scope(
583 region_scope.1.span,
584 LintLevel::Explicit(current_root),
585 None,
586 );
587 }
588 }
589 self.push_scope(region_scope);
590 let mut block;
591 let rv = unpack!(block = f(self));
592 unpack!(block = self.pop_scope(region_scope, block));
593 self.source_scope = source_scope;
594 debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
595 block.and(rv)
596 }
597
598 /// Push a scope onto the stack. You can then build code in this
599 /// scope and call `pop_scope` afterwards. Note that these two
600 /// calls must be paired; using `in_scope` as a convenience
601 /// wrapper maybe preferable.
602 pub(crate) fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
603 self.scopes.push_scope(region_scope, self.source_scope);
604 }
605
606 /// Pops a scope, which should have region scope `region_scope`,
607 /// adding any drops onto the end of `block` that are needed.
608 /// This must match 1-to-1 with `push_scope`.
609 pub(crate) fn pop_scope(
610 &mut self,
611 region_scope: (region::Scope, SourceInfo),
612 mut block: BasicBlock,
613 ) -> BlockAnd<()> {
614 debug!("pop_scope({:?}, {:?})", region_scope, block);
615
616 block = self.leave_top_scope(block);
617
618 self.scopes.pop_scope(region_scope);
619
620 block.unit()
621 }
622
623 /// Sets up the drops for breaking from `block` to `target`.
624 pub(crate) fn break_scope(
625 &mut self,
626 mut block: BasicBlock,
627 value: Option<&Expr<'tcx>>,
628 target: BreakableTarget,
629 source_info: SourceInfo,
630 ) -> BlockAnd<()> {
631 let span = source_info.span;
632
633 let get_scope_index = |scope: region::Scope| {
634 // find the loop-scope by its `region::Scope`.
635 self.scopes
636 .breakable_scopes
637 .iter()
638 .rposition(|breakable_scope| breakable_scope.region_scope == scope)
639 .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
640 };
641 let (break_index, destination) = match target {
642 BreakableTarget::Return => {
643 let scope = &self.scopes.breakable_scopes[0];
644 if scope.break_destination != Place::return_place() {
645 span_bug!(span, "`return` in item with no return scope");
646 }
647 (0, Some(scope.break_destination))
648 }
649 BreakableTarget::Break(scope) => {
650 let break_index = get_scope_index(scope);
651 let scope = &self.scopes.breakable_scopes[break_index];
652 (break_index, Some(scope.break_destination))
653 }
654 BreakableTarget::Continue(scope) => {
655 let break_index = get_scope_index(scope);
656 (break_index, None)
657 }
658 };
659
660 if let Some(destination) = destination {
661 if let Some(value) = value {
662 debug!("stmt_expr Break val block_context.push(SubExpr)");
663 self.block_context.push(BlockFrame::SubExpr);
664 unpack!(block = self.expr_into_dest(destination, block, value));
665 self.block_context.pop();
666 } else {
667 self.cfg.push_assign_unit(block, source_info, destination, self.tcx)
668 }
669 } else {
670 assert!(value.is_none(), "`return` and `break` should have a destination");
671 if self.tcx.sess.instrument_coverage() {
672 // Unlike `break` and `return`, which push an `Assign` statement to MIR, from which
673 // a Coverage code region can be generated, `continue` needs no `Assign`; but
674 // without one, the `InstrumentCoverage` MIR pass cannot generate a code region for
675 // `continue`. Coverage will be missing unless we add a dummy `Assign` to MIR.
676 self.add_dummy_assignment(span, block, source_info);
677 }
678 }
679
680 let region_scope = self.scopes.breakable_scopes[break_index].region_scope;
681 let scope_index = self.scopes.scope_index(region_scope, span);
682 let drops = if destination.is_some() {
683 &mut self.scopes.breakable_scopes[break_index].break_drops
684 } else {
685 self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap()
686 };
687 let mut drop_idx = ROOT_NODE;
688 for scope in &self.scopes.scopes[scope_index + 1..] {
689 for drop in &scope.drops {
690 drop_idx = drops.add_drop(*drop, drop_idx);
691 }
692 }
693 drops.add_entry(block, drop_idx);
694
695 // `build_drop_trees` doesn't have access to our source_info, so we
696 // create a dummy terminator now. `TerminatorKind::Resume` is used
697 // because MIR type checking will panic if it hasn't been overwritten.
698 self.cfg.terminate(block, source_info, TerminatorKind::Resume);
699
700 self.cfg.start_new_block().unit()
701 }
702
703 pub(crate) fn break_for_else(
704 &mut self,
705 block: BasicBlock,
706 target: region::Scope,
707 source_info: SourceInfo,
708 ) {
709 let scope_index = self.scopes.scope_index(target, source_info.span);
710 let if_then_scope = self
711 .scopes
712 .if_then_scope
713 .as_mut()
714 .unwrap_or_else(|| span_bug!(source_info.span, "no if-then scope found"));
715
716 assert_eq!(if_then_scope.region_scope, target, "breaking to incorrect scope");
717
718 let mut drop_idx = ROOT_NODE;
719 let drops = &mut if_then_scope.else_drops;
720 for scope in &self.scopes.scopes[scope_index + 1..] {
721 for drop in &scope.drops {
722 drop_idx = drops.add_drop(*drop, drop_idx);
723 }
724 }
725 drops.add_entry(block, drop_idx);
726
727 // `build_drop_trees` doesn't have access to our source_info, so we
728 // create a dummy terminator now. `TerminatorKind::Resume` is used
729 // because MIR type checking will panic if it hasn't been overwritten.
730 self.cfg.terminate(block, source_info, TerminatorKind::Resume);
731 }
732
733 // Add a dummy `Assign` statement to the CFG, with the span for the source code's `continue`
734 // statement.
735 fn add_dummy_assignment(&mut self, span: Span, block: BasicBlock, source_info: SourceInfo) {
736 let local_decl = LocalDecl::new(self.tcx.mk_unit(), span).internal();
737 let temp_place = Place::from(self.local_decls.push(local_decl));
738 self.cfg.push_assign_unit(block, source_info, temp_place, self.tcx);
739 }
740
741 fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
742 // If we are emitting a `drop` statement, we need to have the cached
743 // diverge cleanup pads ready in case that drop panics.
744 let needs_cleanup = self.scopes.scopes.last().map_or(false, |scope| scope.needs_cleanup());
745 let is_generator = self.generator_kind.is_some();
746 let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };
747
748 let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
749 unpack!(build_scope_drops(
750 &mut self.cfg,
751 &mut self.scopes.unwind_drops,
752 scope,
753 block,
754 unwind_to,
755 is_generator && needs_cleanup,
756 self.arg_count,
757 ))
758 }
759
760 /// Creates a new source scope, nested in the current one.
761 pub(crate) fn new_source_scope(
762 &mut self,
763 span: Span,
764 lint_level: LintLevel,
765 safety: Option<Safety>,
766 ) -> SourceScope {
767 let parent = self.source_scope;
768 debug!(
769 "new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
770 span,
771 lint_level,
772 safety,
773 parent,
774 self.source_scopes.get(parent)
775 );
776 let scope_local_data = SourceScopeLocalData {
777 lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
778 lint_root
779 } else {
780 self.source_scopes[parent].local_data.as_ref().assert_crate_local().lint_root
781 },
782 safety: safety.unwrap_or_else(|| {
783 self.source_scopes[parent].local_data.as_ref().assert_crate_local().safety
784 }),
785 };
786 self.source_scopes.push(SourceScopeData {
787 span,
788 parent_scope: Some(parent),
789 inlined: None,
790 inlined_parent_scope: None,
791 local_data: ClearCrossCrate::Set(scope_local_data),
792 })
793 }
794
795 /// Given a span and the current source scope, make a SourceInfo.
796 pub(crate) fn source_info(&self, span: Span) -> SourceInfo {
797 SourceInfo { span, scope: self.source_scope }
798 }
799
800 // Finding scopes
801 // ==============
802 /// Returns the scope that we should use as the lifetime of an
803 /// operand. Basically, an operand must live until it is consumed.
804 /// This is similar to, but not quite the same as, the temporary
805 /// scope (which can be larger or smaller).
806 ///
807 /// Consider:
808 /// ```ignore (illustrative)
809 /// let x = foo(bar(X, Y));
810 /// ```
811 /// We wish to pop the storage for X and Y after `bar()` is
812 /// called, not after the whole `let` is completed.
813 ///
814 /// As another example, if the second argument diverges:
815 /// ```ignore (illustrative)
816 /// foo(Box::new(2), panic!())
817 /// ```
818 /// We would allocate the box but then free it on the unwinding
819 /// path; we would also emit a free on the 'success' path from
820 /// panic, but that will turn out to be removed as dead-code.
821 pub(crate) fn local_scope(&self) -> region::Scope {
822 self.scopes.topmost()
823 }
824
825 // Scheduling drops
826 // ================
827 pub(crate) fn schedule_drop_storage_and_value(
828 &mut self,
829 span: Span,
830 region_scope: region::Scope,
831 local: Local,
832 ) {
833 self.schedule_drop(span, region_scope, local, DropKind::Storage);
834 self.schedule_drop(span, region_scope, local, DropKind::Value);
835 }
836
837 /// Indicates that `place` should be dropped on exit from `region_scope`.
838 ///
839 /// When called with `DropKind::Storage`, `place` shouldn't be the return
840 /// place, or a function parameter.
841 pub(crate) fn schedule_drop(
842 &mut self,
843 span: Span,
844 region_scope: region::Scope,
845 local: Local,
846 drop_kind: DropKind,
847 ) {
848 let needs_drop = match drop_kind {
849 DropKind::Value => {
850 if !self.local_decls[local].ty.needs_drop(self.tcx, self.param_env) {
851 return;
852 }
853 true
854 }
855 DropKind::Storage => {
856 if local.index() <= self.arg_count {
857 span_bug!(
858 span,
859 "`schedule_drop` called with local {:?} and arg_count {}",
860 local,
861 self.arg_count,
862 )
863 }
864 false
865 }
866 };
867
868 // When building drops, we try to cache chains of drops to reduce the
869 // number of `DropTree::add_drop` calls. This, however, means that
870 // whenever we add a drop into a scope which already had some entries
871 // in the drop tree built (and thus, cached) for it, we must invalidate
872 // all caches which might branch into the scope which had a drop just
873 // added to it. This is necessary, because otherwise some other code
874 // might use the cache to branch into already built chain of drops,
875 // essentially ignoring the newly added drop.
876 //
877 // For example consider there’s two scopes with a drop in each. These
878 // are built and thus the caches are filled:
879 //
880 // +--------------------------------------------------------+
881 // | +---------------------------------+ |
882 // | | +--------+ +-------------+ | +---------------+ |
883 // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
884 // | | +--------+ +-------------+ | +---------------+ |
885 // | +------------|outer_scope cache|--+ |
886 // +------------------------------|middle_scope cache|------+
887 //
888 // Now, a new, inner-most scope is added along with a new drop into
889 // both inner-most and outer-most scopes:
890 //
891 // +------------------------------------------------------------+
892 // | +----------------------------------+ |
893 // | | +--------+ +-------------+ | +---------------+ | +-------------+
894 // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
895 // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
896 // | | +-+ +-------------+ | |
897 // | +---|invalid outer_scope cache|----+ |
898 // +----=----------------|invalid middle_scope cache|-----------+
899 //
900 // If, when adding `drop(new)` we do not invalidate the cached blocks for both
901 // outer_scope and middle_scope, then, when building drops for the inner (right-most)
902 // scope, the old, cached blocks, without `drop(new)` will get used, producing the
903 // wrong results.
904 //
905 // Note that this code iterates scopes from the inner-most to the outer-most,
906 // invalidating caches of each scope visited. This way bare minimum of the
907 // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
908 // cache of outer scope stays intact.
909 //
910 // Since we only cache drops for the unwind path and the generator drop
911 // path, we only need to invalidate the cache for drops that happen on
912 // the unwind or generator drop paths. This means that for
913 // non-generators we don't need to invalidate caches for `DropKind::Storage`.
914 let invalidate_caches = needs_drop || self.generator_kind.is_some();
915 for scope in self.scopes.scopes.iter_mut().rev() {
916 if invalidate_caches {
917 scope.invalidate_cache();
918 }
919
920 if scope.region_scope == region_scope {
921 let region_scope_span = region_scope.span(self.tcx, &self.region_scope_tree);
922 // Attribute scope exit drops to scope's closing brace.
923 let scope_end = self.tcx.sess.source_map().end_point(region_scope_span);
924
925 scope.drops.push(DropData {
926 source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
927 local,
928 kind: drop_kind,
929 });
930
931 return;
932 }
933 }
934
935 span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local);
936 }
937
938 /// Indicates that the "local operand" stored in `local` is
939 /// *moved* at some point during execution (see `local_scope` for
940 /// more information about what a "local operand" is -- in short,
941 /// it's an intermediate operand created as part of preparing some
942 /// MIR instruction). We use this information to suppress
943 /// redundant drops on the non-unwind paths. This results in less
944 /// MIR, but also avoids spurious borrow check errors
945 /// (c.f. #64391).
946 ///
947 /// Example: when compiling the call to `foo` here:
948 ///
949 /// ```ignore (illustrative)
950 /// foo(bar(), ...)
951 /// ```
952 ///
953 /// we would evaluate `bar()` to an operand `_X`. We would also
954 /// schedule `_X` to be dropped when the expression scope for
955 /// `foo(bar())` is exited. This is relevant, for example, if the
956 /// later arguments should unwind (it would ensure that `_X` gets
957 /// dropped). However, if no unwind occurs, then `_X` will be
958 /// unconditionally consumed by the `call`:
959 ///
960 /// ```ignore (illustrative)
961 /// bb {
962 /// ...
963 /// _R = CALL(foo, _X, ...)
964 /// }
965 /// ```
966 ///
967 /// However, `_X` is still registered to be dropped, and so if we
968 /// do nothing else, we would generate a `DROP(_X)` that occurs
969 /// after the call. This will later be optimized out by the
970 /// drop-elaboration code, but in the meantime it can lead to
971 /// spurious borrow-check errors -- the problem, ironically, is
972 /// not the `DROP(_X)` itself, but the (spurious) unwind pathways
973 /// that it creates. See #64391 for an example.
974 pub(crate) fn record_operands_moved(&mut self, operands: &[Operand<'tcx>]) {
975 let local_scope = self.local_scope();
976 let scope = self.scopes.scopes.last_mut().unwrap();
977
978 assert_eq!(scope.region_scope, local_scope, "local scope is not the topmost scope!",);
979
980 // look for moves of a local variable, like `MOVE(_X)`
981 let locals_moved = operands.iter().flat_map(|operand| match operand {
982 Operand::Copy(_) | Operand::Constant(_) => None,
983 Operand::Move(place) => place.as_local(),
984 });
985
986 for local in locals_moved {
987 // check if we have a Drop for this operand and -- if so
988 // -- add it to the list of moved operands. Note that this
989 // local might not have been an operand created for this
990 // call, it could come from other places too.
991 if scope.drops.iter().any(|drop| drop.local == local && drop.kind == DropKind::Value) {
992 scope.moved_locals.push(local);
993 }
994 }
995 }
996
997 // Other
998 // =====
999 /// Returns the [DropIdx] for the innermost drop if the function unwound at
1000 /// this point. The `DropIdx` will be created if it doesn't already exist.
1001 fn diverge_cleanup(&mut self) -> DropIdx {
1002 // It is okay to use dummy span because the getting scope index on the topmost scope
1003 // must always succeed.
1004 self.diverge_cleanup_target(self.scopes.topmost(), DUMMY_SP)
1005 }
1006
1007 /// This is similar to [diverge_cleanup](Self::diverge_cleanup) except its target is set to
1008 /// some ancestor scope instead of the current scope.
1009 /// It is possible to unwind to some ancestor scope if some drop panics as
1010 /// the program breaks out of a if-then scope.
1011 fn diverge_cleanup_target(&mut self, target_scope: region::Scope, span: Span) -> DropIdx {
1012 let target = self.scopes.scope_index(target_scope, span);
1013 let (uncached_scope, mut cached_drop) = self.scopes.scopes[..=target]
1014 .iter()
1015 .enumerate()
1016 .rev()
1017 .find_map(|(scope_idx, scope)| {
1018 scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block))
1019 })
1020 .unwrap_or((0, ROOT_NODE));
1021
1022 if uncached_scope > target {
1023 return cached_drop;
1024 }
1025
1026 let is_generator = self.generator_kind.is_some();
1027 for scope in &mut self.scopes.scopes[uncached_scope..=target] {
1028 for drop in &scope.drops {
1029 if is_generator || drop.kind == DropKind::Value {
1030 cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
1031 }
1032 }
1033 scope.cached_unwind_block = Some(cached_drop);
1034 }
1035
1036 cached_drop
1037 }
1038
1039 /// Prepares to create a path that performs all required cleanup for a
1040 /// terminator that can unwind at the given basic block.
1041 ///
1042 /// This path terminates in Resume. The path isn't created until after all
1043 /// of the non-unwind paths in this item have been lowered.
1044 pub(crate) fn diverge_from(&mut self, start: BasicBlock) {
1045 debug_assert!(
1046 matches!(
1047 self.cfg.block_data(start).terminator().kind,
1048 TerminatorKind::Assert { .. }
1049 | TerminatorKind::Call { .. }
1050 | TerminatorKind::Drop { .. }
1051 | TerminatorKind::DropAndReplace { .. }
1052 | TerminatorKind::FalseUnwind { .. }
1053 | TerminatorKind::InlineAsm { .. }
1054 ),
1055 "diverge_from called on block with terminator that cannot unwind."
1056 );
1057
1058 let next_drop = self.diverge_cleanup();
1059 self.scopes.unwind_drops.add_entry(start, next_drop);
1060 }
1061
1062 /// Sets up a path that performs all required cleanup for dropping a
1063 /// generator, starting from the given block that ends in
1064 /// [TerminatorKind::Yield].
1065 ///
1066 /// This path terminates in GeneratorDrop.
1067 pub(crate) fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) {
1068 debug_assert!(
1069 matches!(
1070 self.cfg.block_data(yield_block).terminator().kind,
1071 TerminatorKind::Yield { .. }
1072 ),
1073 "generator_drop_cleanup called on block with non-yield terminator."
1074 );
1075 let (uncached_scope, mut cached_drop) = self
1076 .scopes
1077 .scopes
1078 .iter()
1079 .enumerate()
1080 .rev()
1081 .find_map(|(scope_idx, scope)| {
1082 scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
1083 })
1084 .unwrap_or((0, ROOT_NODE));
1085
1086 for scope in &mut self.scopes.scopes[uncached_scope..] {
1087 for drop in &scope.drops {
1088 cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop);
1089 }
1090 scope.cached_generator_drop_block = Some(cached_drop);
1091 }
1092
1093 self.scopes.generator_drops.add_entry(yield_block, cached_drop);
1094 }
1095
1096 /// Utility function for *non*-scope code to build their own drops
1097 pub(crate) fn build_drop_and_replace(
1098 &mut self,
1099 block: BasicBlock,
1100 span: Span,
1101 place: Place<'tcx>,
1102 value: Operand<'tcx>,
1103 ) -> BlockAnd<()> {
1104 let source_info = self.source_info(span);
1105 let next_target = self.cfg.start_new_block();
1106
1107 self.cfg.terminate(
1108 block,
1109 source_info,
1110 TerminatorKind::DropAndReplace { place, value, target: next_target, unwind: None },
1111 );
1112 self.diverge_from(block);
1113
1114 next_target.unit()
1115 }
1116
1117 /// Creates an `Assert` terminator and return the success block.
1118 /// If the boolean condition operand is not the expected value,
1119 /// a runtime panic will be caused with the given message.
1120 pub(crate) fn assert(
1121 &mut self,
1122 block: BasicBlock,
1123 cond: Operand<'tcx>,
1124 expected: bool,
1125 msg: AssertMessage<'tcx>,
1126 span: Span,
1127 ) -> BasicBlock {
1128 let source_info = self.source_info(span);
1129 let success_block = self.cfg.start_new_block();
1130
1131 self.cfg.terminate(
1132 block,
1133 source_info,
1134 TerminatorKind::Assert { cond, expected, msg, target: success_block, cleanup: None },
1135 );
1136 self.diverge_from(block);
1137
1138 success_block
1139 }
1140
1141 /// Unschedules any drops in the top scope.
1142 ///
1143 /// This is only needed for `match` arm scopes, because they have one
1144 /// entrance per pattern, but only one exit.
1145 pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) {
1146 let top_scope = self.scopes.scopes.last_mut().unwrap();
1147
1148 assert_eq!(top_scope.region_scope, region_scope);
1149
1150 top_scope.drops.clear();
1151 top_scope.invalidate_cache();
1152 }
1153 }
1154
1155 /// Builds drops for `pop_scope` and `leave_top_scope`.
1156 fn build_scope_drops<'tcx>(
1157 cfg: &mut CFG<'tcx>,
1158 unwind_drops: &mut DropTree,
1159 scope: &Scope,
1160 mut block: BasicBlock,
1161 mut unwind_to: DropIdx,
1162 storage_dead_on_unwind: bool,
1163 arg_count: usize,
1164 ) -> BlockAnd<()> {
1165 debug!("build_scope_drops({:?} -> {:?})", block, scope);
1166
1167 // Build up the drops in evaluation order. The end result will
1168 // look like:
1169 //
1170 // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
1171 // | | |
1172 // : | |
1173 // V V
1174 // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
1175 //
1176 // The horizontal arrows represent the execution path when the drops return
1177 // successfully. The downwards arrows represent the execution path when the
1178 // drops panic (panicking while unwinding will abort, so there's no need for
1179 // another set of arrows).
1180 //
1181 // For generators, we unwind from a drop on a local to its StorageDead
1182 // statement. For other functions we don't worry about StorageDead. The
1183 // drops for the unwind path should have already been generated by
1184 // `diverge_cleanup_gen`.
1185
1186 for drop_data in scope.drops.iter().rev() {
1187 let source_info = drop_data.source_info;
1188 let local = drop_data.local;
1189
1190 match drop_data.kind {
1191 DropKind::Value => {
1192 // `unwind_to` should drop the value that we're about to
1193 // schedule. If dropping this value panics, then we continue
1194 // with the *next* value on the unwind path.
1195 debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
1196 debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
1197 unwind_to = unwind_drops.drops[unwind_to].1;
1198
1199 // If the operand has been moved, and we are not on an unwind
1200 // path, then don't generate the drop. (We only take this into
1201 // account for non-unwind paths so as not to disturb the
1202 // caching mechanism.)
1203 if scope.moved_locals.iter().any(|&o| o == local) {
1204 continue;
1205 }
1206
1207 unwind_drops.add_entry(block, unwind_to);
1208
1209 let next = cfg.start_new_block();
1210 cfg.terminate(
1211 block,
1212 source_info,
1213 TerminatorKind::Drop { place: local.into(), target: next, unwind: None },
1214 );
1215 block = next;
1216 }
1217 DropKind::Storage => {
1218 if storage_dead_on_unwind {
1219 debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
1220 debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
1221 unwind_to = unwind_drops.drops[unwind_to].1;
1222 }
1223 // Only temps and vars need their storage dead.
1224 assert!(local.index() > arg_count);
1225 cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) });
1226 }
1227 }
1228 }
1229 block.unit()
1230 }
1231
1232 impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1233 /// Build a drop tree for a breakable scope.
1234 ///
1235 /// If `continue_block` is `Some`, then the tree is for `continue` inside a
1236 /// loop. Otherwise this is for `break` or `return`.
1237 fn build_exit_tree(
1238 &mut self,
1239 mut drops: DropTree,
1240 else_scope: region::Scope,
1241 span: Span,
1242 continue_block: Option<BasicBlock>,
1243 ) -> Option<BlockAnd<()>> {
1244 let mut blocks = IndexVec::from_elem(None, &drops.drops);
1245 blocks[ROOT_NODE] = continue_block;
1246
1247 drops.build_mir::<ExitScopes>(&mut self.cfg, &mut blocks);
1248 let is_generator = self.generator_kind.is_some();
1249
1250 // Link the exit drop tree to unwind drop tree.
1251 if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) {
1252 let unwind_target = self.diverge_cleanup_target(else_scope, span);
1253 let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
1254 for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
1255 match drop_data.0.kind {
1256 DropKind::Storage => {
1257 if is_generator {
1258 let unwind_drop = self
1259 .scopes
1260 .unwind_drops
1261 .add_drop(drop_data.0, unwind_indices[drop_data.1]);
1262 unwind_indices.push(unwind_drop);
1263 } else {
1264 unwind_indices.push(unwind_indices[drop_data.1]);
1265 }
1266 }
1267 DropKind::Value => {
1268 let unwind_drop = self
1269 .scopes
1270 .unwind_drops
1271 .add_drop(drop_data.0, unwind_indices[drop_data.1]);
1272 self.scopes
1273 .unwind_drops
1274 .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]);
1275 unwind_indices.push(unwind_drop);
1276 }
1277 }
1278 }
1279 }
1280 blocks[ROOT_NODE].map(BasicBlock::unit)
1281 }
1282
1283 /// Build the unwind and generator drop trees.
1284 pub(crate) fn build_drop_trees(&mut self) {
1285 if self.generator_kind.is_some() {
1286 self.build_generator_drop_trees();
1287 } else {
1288 Self::build_unwind_tree(
1289 &mut self.cfg,
1290 &mut self.scopes.unwind_drops,
1291 self.fn_span,
1292 &mut None,
1293 );
1294 }
1295 }
1296
1297 fn build_generator_drop_trees(&mut self) {
1298 // Build the drop tree for dropping the generator while it's suspended.
1299 let drops = &mut self.scopes.generator_drops;
1300 let cfg = &mut self.cfg;
1301 let fn_span = self.fn_span;
1302 let mut blocks = IndexVec::from_elem(None, &drops.drops);
1303 drops.build_mir::<GeneratorDrop>(cfg, &mut blocks);
1304 if let Some(root_block) = blocks[ROOT_NODE] {
1305 cfg.terminate(
1306 root_block,
1307 SourceInfo::outermost(fn_span),
1308 TerminatorKind::GeneratorDrop,
1309 );
1310 }
1311
1312 // Build the drop tree for unwinding in the normal control flow paths.
1313 let resume_block = &mut None;
1314 let unwind_drops = &mut self.scopes.unwind_drops;
1315 Self::build_unwind_tree(cfg, unwind_drops, fn_span, resume_block);
1316
1317 // Build the drop tree for unwinding when dropping a suspended
1318 // generator.
1319 //
1320 // This is a different tree to the standard unwind paths here to
1321 // prevent drop elaboration from creating drop flags that would have
1322 // to be captured by the generator. I'm not sure how important this
1323 // optimization is, but it is here.
1324 for (drop_idx, drop_data) in drops.drops.iter_enumerated() {
1325 if let DropKind::Value = drop_data.0.kind {
1326 debug_assert!(drop_data.1 < drops.drops.next_index());
1327 drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap()));
1328 }
1329 }
1330 Self::build_unwind_tree(cfg, drops, fn_span, resume_block);
1331 }
1332
1333 fn build_unwind_tree(
1334 cfg: &mut CFG<'tcx>,
1335 drops: &mut DropTree,
1336 fn_span: Span,
1337 resume_block: &mut Option<BasicBlock>,
1338 ) {
1339 let mut blocks = IndexVec::from_elem(None, &drops.drops);
1340 blocks[ROOT_NODE] = *resume_block;
1341 drops.build_mir::<Unwind>(cfg, &mut blocks);
1342 if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) {
1343 cfg.terminate(resume, SourceInfo::outermost(fn_span), TerminatorKind::Resume);
1344
1345 *resume_block = blocks[ROOT_NODE];
1346 }
1347 }
1348 }
1349
1350 // DropTreeBuilder implementations.
1351
1352 struct ExitScopes;
1353
1354 impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes {
1355 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
1356 cfg.start_new_block()
1357 }
1358 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
1359 cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to };
1360 }
1361 }
1362
1363 struct GeneratorDrop;
1364
1365 impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop {
1366 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
1367 cfg.start_new_block()
1368 }
1369 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
1370 let term = cfg.block_data_mut(from).terminator_mut();
1371 if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
1372 *drop = Some(to);
1373 } else {
1374 span_bug!(
1375 term.source_info.span,
1376 "cannot enter generator drop tree from {:?}",
1377 term.kind
1378 )
1379 }
1380 }
1381 }
1382
1383 struct Unwind;
1384
1385 impl<'tcx> DropTreeBuilder<'tcx> for Unwind {
1386 fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
1387 cfg.start_new_cleanup_block()
1388 }
1389 fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
1390 let term = &mut cfg.block_data_mut(from).terminator_mut();
1391 match &mut term.kind {
1392 TerminatorKind::Drop { unwind, .. }
1393 | TerminatorKind::DropAndReplace { unwind, .. }
1394 | TerminatorKind::FalseUnwind { unwind, .. }
1395 | TerminatorKind::Call { cleanup: unwind, .. }
1396 | TerminatorKind::Assert { cleanup: unwind, .. }
1397 | TerminatorKind::InlineAsm { cleanup: unwind, .. } => {
1398 *unwind = Some(to);
1399 }
1400 TerminatorKind::Goto { .. }
1401 | TerminatorKind::SwitchInt { .. }
1402 | TerminatorKind::Resume
1403 | TerminatorKind::Abort
1404 | TerminatorKind::Return
1405 | TerminatorKind::Unreachable
1406 | TerminatorKind::Yield { .. }
1407 | TerminatorKind::GeneratorDrop
1408 | TerminatorKind::FalseEdge { .. } => {
1409 span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind)
1410 }
1411 }
1412 }
1413 }