must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
- zero: false
+ fill_on_drop: false,
+ skip_dtor: false,
};
- debug!("schedule_drop_mem({:?}, val={}, ty={})",
+ debug!("schedule_drop_mem({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
- ty.repr(self.ccx.tcx()));
+ ty.repr(self.ccx.tcx()),
+ drop.fill_on_drop,
+ drop.skip_dtor);
+
+ self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ }
+
+ /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
+ fn schedule_drop_and_fill_mem(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ ty: Ty<'tcx>) {
+ if !self.type_needs_drop(ty) { return; }
+
+ let drop = box DropValue {
+ is_immediate: false,
+ must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
+ val: val,
+ ty: ty,
+ fill_on_drop: true,
+ skip_dtor: false,
+ };
+
+ debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={}, fill_on_drop={}, skip_dtor={})",
+ cleanup_scope,
+ self.ccx.tn().val_to_string(val),
+ ty.repr(self.ccx.tcx()),
+ drop.fill_on_drop,
+ drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
- /// Schedules a (deep) drop and zero-ing of `val`, which is a pointer to an instance of `ty`
- fn schedule_drop_and_zero_mem(&self,
+ /// Issue #23611: Schedules a (deep) drop of the contents of
+ /// `val`, which is a pointer to an instance of struct/enum type
+ /// `ty`. The scheduled code handles extracting the discriminant
+ /// and dropping the contents associated with that variant
+ /// *without* executing any associated drop implementation.
+ fn schedule_drop_adt_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
+ // `if` below could be "!contents_needs_drop"; skipping drop
+ // is just an optimization, so sound to be conservative.
if !self.type_needs_drop(ty) { return; }
let drop = box DropValue {
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
- zero: true
+ fill_on_drop: false,
+ skip_dtor: true,
};
- debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
+ debug!("schedule_drop_adt_contents({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
- true);
+ drop.fill_on_drop,
+ drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
- zero: false
+ fill_on_drop: false,
+ skip_dtor: false,
};
- debug!("schedule_drop_immediate({:?}, val={}, ty={:?})",
+ debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
- ty.repr(self.ccx.tcx()));
+ ty.repr(self.ccx.tcx()),
+ drop.fill_on_drop,
+ drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
- /// Schedules a call to `free(val)`. Note that this is a shallow operation.
- fn schedule_free_slice(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- size: ValueRef,
- align: ValueRef,
- heap: Heap) {
- let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
-
- debug!("schedule_free_slice({:?}, val={}, heap={:?})",
- cleanup_scope,
- self.ccx.tn().val_to_string(val),
- heap);
-
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>) {
}
}
+ /// Manipulate cleanup scope for call arguments. Conceptually, each
+ /// argument to a call is an lvalue, and performing the call moves each
+ /// of the arguments into a new rvalue (which gets cleaned up by the
+ /// callee). As an optimization, instead of actually performing all of
+ /// those moves, trans just manipulates the cleanup scope to obtain the
+ /// same effect.
pub fn drop_non_lifetime_clean(&mut self) {
self.cleanups.retain(|c| c.is_lifetime_end());
+ self.clear_cached_exits();
}
}
must_unwind: bool,
val: ValueRef,
ty: Ty<'tcx>,
- zero: bool
+ fill_on_drop: bool,
+ skip_dtor: bool,
}
impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
- let _icx = base::push_ctxt("<DropValue as Cleanup>::trans");
+ let skip_dtor = self.skip_dtor;
+ let _icx = if skip_dtor {
+ base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
+ } else {
+ base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
+ };
let bcx = if self.is_immediate {
- glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
+ glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else {
- glue::drop_ty(bcx, self.val, self.ty, debug_loc)
+ glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
};
- if self.zero {
+ if self.fill_on_drop {
base::drop_done_fill_mem(bcx, self.val, self.ty);
}
bcx
}
}
-#[derive(Copy, Clone)]
-pub struct FreeSlice {
- ptr: ValueRef,
- size: ValueRef,
- align: ValueRef,
- heap: Heap,
-}
-
-impl<'tcx> Cleanup<'tcx> for FreeSlice {
- fn must_unwind(&self) -> bool {
- true
- }
-
- fn clean_on_unwind(&self) -> bool {
- true
- }
-
- fn is_lifetime_end(&self) -> bool {
- false
- }
-
- fn trans<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> Block<'blk, 'tcx> {
- match self.heap {
- HeapExchange => {
- glue::trans_exchange_free_dyn(bcx,
- self.ptr,
- self.size,
- self.align,
- debug_loc)
- }
- }
- }
-}
-
#[derive(Copy, Clone)]
pub struct LifetimeEnd {
ptr: ValueRef,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
- fn schedule_drop_and_zero_mem(&self,
+ fn schedule_drop_and_fill_mem(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ ty: Ty<'tcx>);
+ fn schedule_drop_adt_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
val: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>);
- fn schedule_free_slice(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- size: ValueRef,
- align: ValueRef,
- heap: Heap);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>);