]> git.proxmox.com Git - rustc.git/blobdiff - src/librustc_trans/trans/glue.rs
Imported Upstream version 1.6.0+dfsg1
[rustc.git] / src / librustc_trans / trans / glue.rs
index 2219cd59263cc2648ecde6821500127d67e25735..3fc9bc0d66d3d48d7834cf6ea4807f03adfd1be9 100644 (file)
 
 //!
 //
-// Code relating to taking, dropping, etc as well as type descriptors.
+// Code relating to drop glue.
 
+use std;
 
-use back::abi;
 use back::link::*;
-use llvm::{ValueRef, True, get_param};
 use llvm;
+use llvm::{ValueRef, get_param};
 use middle::lang_items::ExchangeFreeFnLangItem;
-use middle::subst;
-use middle::subst::{Subst, Substs};
+use middle::subst::{Substs};
+use middle::traits;
+use middle::ty::{self, Ty};
 use trans::adt;
+use trans::adt::GetDtorType; // for tcx.dtor_type()
 use trans::base::*;
 use trans::build::*;
 use trans::callee;
 use trans::cleanup;
 use trans::cleanup::CleanupMethods;
-use trans::consts;
 use trans::common::*;
-use trans::datum;
-use trans::debuginfo;
+use trans::debuginfo::DebugLoc;
+use trans::declare;
 use trans::expr;
 use trans::machine::*;
-use trans::tvec;
-use trans::type_::Type;
+use trans::monomorphize;
 use trans::type_of::{type_of, sizing_type_of, align_of};
-use middle::ty::{self, Ty};
-use util::ppaux::{ty_to_short_str, Repr};
-use util::ppaux;
+use trans::type_::Type;
 
 use arena::TypedArena;
 use libc::c_uint;
-use std::ffi::CString;
 use syntax::ast;
-use syntax::parse::token;
+use syntax::codemap::DUMMY_SP;
 
-pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
-                                           size: ValueRef, align: ValueRef)
+pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                                           v: ValueRef,
+                                           size: ValueRef,
+                                           align: ValueRef,
+                                           debug_loc: DebugLoc)
                                            -> Block<'blk, 'tcx> {
     let _icx = push_ctxt("trans_exchange_free");
     let ccx = cx.ccx();
     callee::trans_lang_call(cx,
         langcall(cx, None, "", ExchangeFreeFnLangItem),
         &[PointerCast(cx, v, Type::i8p(ccx)), size, align],
-        Some(expr::Ignore)).bcx
+        Some(expr::Ignore),
+        debug_loc).bcx
 }
 
-pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
-                                       size: u64, align: u32) -> Block<'blk, 'tcx> {
-    trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size),
-                                   C_uint(cx.ccx(), align))
+pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+                                       v: ValueRef,
+                                       size: u64,
+                                       align: u32,
+                                       debug_loc: DebugLoc)
+                                       -> Block<'blk, 'tcx> {
+    trans_exchange_free_dyn(cx,
+                            v,
+                            C_uint(cx.ccx(), size),
+                            C_uint(cx.ccx(), align),
+                            debug_loc)
 }
 
-pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef,
-                                          content_ty: Ty<'tcx>) -> Block<'blk, 'tcx> {
+pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                          ptr: ValueRef,
+                                          content_ty: Ty<'tcx>,
+                                          debug_loc: DebugLoc)
+                                          -> Block<'blk, 'tcx> {
     assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
     let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
     let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
@@ -71,7 +82,7 @@ pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef,
     // `Box<ZeroSizeType>` does not allocate.
     if content_size != 0 {
         let content_align = align_of(bcx.ccx(), content_ty);
-        trans_exchange_free(bcx, ptr, content_size, content_align)
+        trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
     } else {
         bcx
     }
@@ -85,11 +96,21 @@ pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
     if !type_is_sized(tcx, t) {
         return t
     }
+
+    // FIXME (#22815): note that type_needs_drop conservatively
+    // approximates in some cases and may say a type expression
+    // requires drop glue when it actually does not.
+    //
+    // (In this case it is not clear whether any harm is done, i.e.
+    // erroneously returning `t` in some cases where we could have
+    // returned `tcx.types.i8` does not appear unsound. The impact on
+    // code quality is unknown at this time.)
+
     if !type_needs_drop(tcx, t) {
         return tcx.types.i8;
     }
     match t.sty {
-        ty::ty_uniq(typ) if !type_needs_drop(tcx, typ)
+        ty::TyBox(typ) if !type_needs_drop(tcx, typ)
                          && type_is_sized(tcx, typ) => {
             let llty = sizing_type_of(ccx, typ);
             // `Box<ZeroSizeType>` does not allocate.
@@ -106,14 +127,29 @@ pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
 pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                            v: ValueRef,
                            t: Ty<'tcx>,
-                           source_location: Option<NodeInfo>)
-                           -> Block<'blk, 'tcx> {
+                           debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
+    drop_ty_core(bcx, v, t, debug_loc, false, None)
+}
+
+pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+                                v: ValueRef,
+                                t: Ty<'tcx>,
+                                debug_loc: DebugLoc,
+                                skip_dtor: bool,
+                                drop_hint: Option<cleanup::DropHintValue>)
+                                -> Block<'blk, 'tcx> {
     // NB: v is an *alias* of type t here, not a direct value.
-    debug!("drop_ty(t={})", t.repr(bcx.tcx()));
+    debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
     let _icx = push_ctxt("drop_ty");
-    if type_needs_drop(bcx.tcx(), t) {
+    let mut bcx = bcx;
+    if bcx.fcx.type_needs_drop(t) {
         let ccx = bcx.ccx();
-        let glue = get_drop_glue(ccx, t);
+        let g = if skip_dtor {
+            DropGlueKind::TyContents(t)
+        } else {
+            DropGlueKind::Ty(t)
+        };
+        let glue = get_drop_glue_core(ccx, g);
         let glue_type = get_drop_glue_type(ccx, t);
         let ptr = if glue_type != t {
             PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
@@ -121,12 +157,23 @@ pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
             v
         };
 
-        match source_location {
-            Some(sl) => debuginfo::set_source_location(bcx.fcx, sl.id, sl.span),
-            None => debuginfo::clear_source_location(bcx.fcx)
-        };
-
-        Call(bcx, glue, &[ptr], None);
+        match drop_hint {
+            Some(drop_hint) => {
+                let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
+                let moved_val =
+                    C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
+                let may_need_drop =
+                    ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
+                bcx = with_cond(bcx, may_need_drop, |cx| {
+                    Call(cx, glue, &[ptr], None, debug_loc);
+                    cx
+                })
+            }
+            None => {
+                // No drop-hint ==> call standard drop glue
+                Call(bcx, glue, &[ptr], None, debug_loc);
+            }
+        }
     }
     bcx
 }
@@ -134,455 +181,420 @@ pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
 pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                      v: ValueRef,
                                      t: Ty<'tcx>,
-                                     source_location: Option<NodeInfo>)
+                                     debug_loc: DebugLoc,
+                                     skip_dtor: bool)
                                      -> Block<'blk, 'tcx> {
     let _icx = push_ctxt("drop_ty_immediate");
-    let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
-    Store(bcx, v, vp);
-    drop_ty(bcx, vp, t, source_location)
+    let vp = alloc_ty(bcx, t, "");
+    call_lifetime_start(bcx, vp);
+    store_ty(bcx, v, vp, t);
+    let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
+    call_lifetime_end(bcx, vp);
+    bcx
 }
 
 pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
-    debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t));
-    let t = get_drop_glue_type(ccx, t);
-    debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t));
-    match ccx.drop_glues().borrow().get(&t) {
+    get_drop_glue_core(ccx, DropGlueKind::Ty(t))
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub enum DropGlueKind<'tcx> {
+    /// The normal path; runs the dtor, and then recurs on the contents
+    Ty(Ty<'tcx>),
+    /// Skips the dtor, if any, for ty; drops the contents directly.
+    /// Note that the dtor is only skipped at the most *shallow*
+    /// level, namely, an `impl Drop for Ty` itself. So, for example,
+    /// if Ty is Newtype(S) then only the Drop impl for Newtype itself
+    /// will be skipped, while the Drop impl for S, if any, will be
+    /// invoked.
+    TyContents(Ty<'tcx>),
+}
+
+impl<'tcx> DropGlueKind<'tcx> {
+    fn ty(&self) -> Ty<'tcx> {
+        match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
+    }
+
+    fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
+    {
+        match *self {
+            DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
+            DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
+        }
+    }
+}
+
+fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                                g: DropGlueKind<'tcx>) -> ValueRef {
+    debug!("make drop glue for {:?}", g);
+    let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
+    debug!("drop glue type {:?}", g);
+    match ccx.drop_glues().borrow().get(&g) {
         Some(&glue) => return glue,
         _ => { }
     }
+    let t = g.ty();
 
     let llty = if type_is_sized(ccx.tcx(), t) {
         type_of(ccx, t).ptr_to()
     } else {
-        type_of(ccx, ty::mk_uniq(ccx.tcx(), t)).ptr_to()
+        type_of(ccx, ccx.tcx().mk_box(t)).ptr_to()
     };
 
     let llfnty = Type::glue_fn(ccx, llty);
 
-    let (glue, new_sym) = match ccx.available_drop_glues().borrow().get(&t) {
-        Some(old_sym) => {
-            let glue = decl_cdecl_fn(ccx, &old_sym[], llfnty, ty::mk_nil(ccx.tcx()));
-            (glue, None)
-        },
-        None => {
-            let (sym, glue) = declare_generic_glue(ccx, t, llfnty, "drop");
-            (glue, Some(sym))
-        },
+    // To avoid infinite recursion, don't `make_drop_glue` until after we've
+    // added the entry to the `drop_glues` cache.
+    if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
+        let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil());
+        ccx.drop_glues().borrow_mut().insert(g, llfn);
+        return llfn;
     };
 
-    ccx.drop_glues().borrow_mut().insert(t, glue);
+    let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop");
+    let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{
+       ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm));
+    });
+    ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
 
-    // To avoid infinite recursion, don't `make_drop_glue` until after we've
-    // added the entry to the `drop_glues` cache.
-    match new_sym {
-        Some(sym) => {
-            ccx.available_drop_glues().borrow_mut().insert(t, sym);
-            // We're creating a new drop glue, so also generate a body.
-            make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
-        },
-        None => {},
-    }
+    let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
+
+    let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
+    let (arena, fcx): (TypedArena<_>, FunctionContext);
+    arena = TypedArena::new();
+    fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
+                      ty::FnConverging(ccx.tcx().mk_nil()),
+                      empty_substs, None, &arena);
+
+    let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil()));
+
+    update_linkage(ccx, llfn, None, OriginalTranslation);
+
+    ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
+    // All glue functions take values passed *by alias*; this is a
+    // requirement since in many contexts glue is invoked indirectly and
+    // the caller has no idea if it's dealing with something that can be
+    // passed by value.
+    //
+    // llfn is expected be declared to take a parameter of the appropriate
+    // type, so we don't need to explicitly cast the function parameter.
+
+    let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint);
+    let bcx = make_drop_glue(bcx, llrawptr0, g);
+    finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
 
-    glue
+    llfn
 }
 
 fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
                                       t: Ty<'tcx>,
-                                      v0: ValueRef,
-                                      dtor_did: ast::DefId,
-                                      class_did: ast::DefId,
-                                      substs: &subst::Substs<'tcx>)
+                                      struct_data: ValueRef)
                                       -> Block<'blk, 'tcx> {
+    assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
+
     let repr = adt::represent_type(bcx.ccx(), t);
-    let struct_data = if type_is_sized(bcx.tcx(), t) {
-        v0
+    let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
+    let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
+    let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
+    let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
+
+    let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
+        bcx
     } else {
-        let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
-        Load(bcx, llval)
+        let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
+        let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
+        let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
+        let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
+        let drop_flag_neither_initialized_nor_cleared =
+            And(bcx, not_init, not_done, DebugLoc::None);
+        with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
+            let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
+            Call(cx, llfn, &[], None, DebugLoc::None);
+            cx
+        })
     };
-    let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
-    with_cond(bcx, load_ty(bcx, drop_flag.val, bcx.tcx().types.bool), |cx| {
-        trans_struct_drop(cx, t, v0, dtor_did, class_did, substs)
+
+    let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
+    with_cond(bcx, drop_flag_dtor_needed, |cx| {
+        trans_struct_drop(cx, t, struct_data)
     })
 }
-
 fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
                                  t: Ty<'tcx>,
-                                 v0: ValueRef,
-                                 dtor_did: ast::DefId,
-                                 class_did: ast::DefId,
-                                 substs: &subst::Substs<'tcx>)
+                                 v0: ValueRef)
                                  -> Block<'blk, 'tcx>
 {
-    let repr = adt::represent_type(bcx.ccx(), t);
+    debug!("trans_struct_drop t: {}", t);
+    let tcx = bcx.tcx();
+    let mut bcx = bcx;
 
-    // Find and call the actual destructor
-    let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, t,
-                                 class_did, substs);
-
-    // The first argument is the "self" argument for drop
-    let params = unsafe {
-        let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
-        ty.element_type().func_params()
-    };
-
-    let fty = ty::lookup_item_type(bcx.tcx(), dtor_did).ty.subst(bcx.tcx(), substs);
-    let self_ty = match fty.sty {
-        ty::ty_bare_fn(_, ref f) => {
-            let sig = ty::erase_late_bound_regions(bcx.tcx(), &f.sig);
-            assert!(sig.inputs.len() == 1);
-            sig.inputs[0]
-        }
-        _ => bcx.sess().bug(&format!("Expected function type, found {}",
-                                    bcx.ty_to_string(fty))[])
-    };
+    let def = t.ty_adt_def().unwrap();
 
-    let (struct_data, info) = if type_is_sized(bcx.tcx(), t) {
-        (v0, None)
+    // Be sure to put the contents into a scope so we can use an invoke
+    // instruction to call the user destructor but still call the field
+    // destructors if the user destructor panics.
+    //
+    // FIXME (#14875) panic-in-drop semantics might be unsupported; we
+    // might well consider changing below to more direct code.
+    let contents_scope = bcx.fcx.push_custom_cleanup_scope();
+
+    // Issue #23611: schedule cleanup of contents, re-inspecting the
+    // discriminant (if any) in case of variant swap in drop code.
+    bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
+
+    let (sized_args, unsized_args);
+    let args: &[ValueRef] = if type_is_sized(tcx, t) {
+        sized_args = [v0];
+        &sized_args
     } else {
-        let data = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
-        let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
-        (Load(bcx, data), Some(Load(bcx, info)))
+        unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
+        &unsized_args
     };
 
-    adt::fold_variants(bcx, &*repr, struct_data, |variant_cx, st, value| {
-        // Be sure to put all of the fields into a scope so we can use an invoke
-        // instruction to call the user destructor but still call the field
-        // destructors if the user destructor panics.
-        let field_scope = variant_cx.fcx.push_custom_cleanup_scope();
-
-        // Class dtors have no explicit args, so the params should
-        // just consist of the environment (self).
-        assert_eq!(params.len(), 1);
-        let self_arg = if type_is_fat_ptr(bcx.tcx(), self_ty) {
-            // The dtor expects a fat pointer, so make one, even if we have to fake it.
-            let boxed_ty = ty::mk_open(bcx.tcx(), t);
-            let scratch = datum::rvalue_scratch_datum(bcx, boxed_ty, "__fat_ptr_drop_self");
-            Store(bcx, value, GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
-            Store(bcx,
-                  // If we just had a thin pointer, make a fat pointer by sticking
-                  // null where we put the unsizing info. This works because t
-                  // is a sized type, so we will only unpack the fat pointer, never
-                  // use the fake info.
-                  info.unwrap_or(C_null(Type::i8p(bcx.ccx()))),
-                  GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA]));
-            PointerCast(variant_cx, scratch.val, params[0])
-        } else {
-            PointerCast(variant_cx, value, params[0])
+    bcx = callee::trans_call_inner(bcx, DebugLoc::None, |bcx, _| {
+        let trait_ref = ty::Binder(ty::TraitRef {
+            def_id: tcx.lang_items.drop_trait().unwrap(),
+            substs: tcx.mk_substs(Substs::trans_empty().with_self_ty(t))
+        });
+        let vtbl = match fulfill_obligation(bcx.ccx(), DUMMY_SP, trait_ref) {
+            traits::VtableImpl(data) => data,
+            _ => tcx.sess.bug(&format!("dtor for {:?} is not an impl???", t))
         };
-        let args = vec!(self_arg);
-
-        // Add all the fields as a value which needs to be cleaned at the end of
-        // this scope. Iterate in reverse order so a Drop impl doesn't reverse
-        // the order in which fields get dropped.
-        for (i, ty) in st.fields.iter().enumerate().rev() {
-            let llfld_a = adt::struct_field_ptr(variant_cx, &*st, value, i, false);
-
-            let val = if type_is_sized(bcx.tcx(), *ty) {
-                llfld_a
-            } else {
-                let boxed_ty = ty::mk_open(bcx.tcx(), *ty);
-                let scratch = datum::rvalue_scratch_datum(bcx, boxed_ty, "__fat_ptr_drop_field");
-                Store(bcx, llfld_a, GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
-                Store(bcx, info.unwrap(), GEPi(bcx, scratch.val, &[0, abi::FAT_PTR_EXTRA]));
-                scratch.val
-            };
-            variant_cx.fcx.schedule_drop_mem(cleanup::CustomScope(field_scope),
-                                             val, *ty);
+        let dtor_did = def.destructor().unwrap();
+        let datum = callee::trans_fn_ref_with_substs(bcx.ccx(),
+                                                     dtor_did,
+                                                     ExprId(0),
+                                                     bcx.fcx.param_substs,
+                                                     vtbl.substs);
+        callee::Callee {
+            bcx: bcx,
+            data: callee::Fn(datum.val),
+            ty: datum.ty
         }
+    }, callee::ArgVals(args), Some(expr::Ignore)).bcx;
 
-        let dtor_ty = ty::mk_ctor_fn(bcx.tcx(),
-                                     class_did,
-                                     &[get_drop_glue_type(bcx.ccx(), t)],
-                                     ty::mk_nil(bcx.tcx()));
-        let (_, variant_cx) = invoke(variant_cx, dtor_addr, &args[], dtor_ty, None);
-
-        variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope);
-        variant_cx
-    })
+    bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
 }
 
-fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
-                                     -> (ValueRef, ValueRef) {
+pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef)
+                                         -> (ValueRef, ValueRef) {
     debug!("calculate size of DST: {}; with lost info: {}",
-           bcx.ty_to_string(t), bcx.val_to_string(info));
+           t, bcx.val_to_string(info));
     if type_is_sized(bcx.tcx(), t) {
         let sizing_type = sizing_type_of(bcx.ccx(), t);
-        let size = C_uint(bcx.ccx(), llsize_of_alloc(bcx.ccx(), sizing_type));
-        let align = C_uint(bcx.ccx(), align_of(bcx.ccx(), t));
+        let size = llsize_of_alloc(bcx.ccx(), sizing_type);
+        let align = align_of(bcx.ccx(), t);
+        debug!("size_and_align_of_dst t={} info={} size: {} align: {}",
+               t, bcx.val_to_string(info), size, align);
+        let size = C_uint(bcx.ccx(), size);
+        let align = C_uint(bcx.ccx(), align);
         return (size, align);
     }
     match t.sty {
-        ty::ty_struct(id, substs) => {
+        ty::TyStruct(def, substs) => {
             let ccx = bcx.ccx();
             // First get the size of all statically known fields.
             // Don't use type_of::sizing_type_of because that expects t to be sized.
-            assert!(!ty::type_is_simd(bcx.tcx(), t));
+            assert!(!t.is_simd());
             let repr = adt::represent_type(ccx, t);
-            let sizing_type = adt::sizing_type_of(ccx, &*repr, true);
-            let sized_size = C_uint(ccx, llsize_of_alloc(ccx, sizing_type));
-            let sized_align = C_uint(ccx, llalign_of_min(ccx, sizing_type));
+            let sizing_type = adt::sizing_type_context_of(ccx, &*repr, true);
+            debug!("DST {} sizing_type: {}", t, sizing_type.to_string());
+            let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
+            let sized_align = llalign_of_min(ccx, sizing_type.prefix());
+            debug!("DST {} statically sized prefix size: {} align: {}",
+                   t, sized_size, sized_align);
+            let sized_size = C_uint(ccx, sized_size);
+            let sized_align = C_uint(ccx, sized_align);
 
             // Recurse to get the size of the dynamically sized field (must be
             // the last field).
-            let fields = ty::struct_fields(bcx.tcx(), id, substs);
-            let last_field = fields[fields.len()-1];
-            let field_ty = last_field.mt.ty;
+            let last_field = def.struct_variant().fields.last().unwrap();
+            let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
             let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
 
+            let dbloc = DebugLoc::None;
+
+            // FIXME (#26403, #27023): We should be adding padding
+            // to `sized_size` (to accommodate the `unsized_align`
+            // required of the unsized field that follows) before
+            // summing it with `sized_size`. (Note that since #26403
+            // is unfixed, we do not yet add the necessary padding
+            // here. But this is where the add would go.)
+
             // Return the sum of sizes and max of aligns.
-            let size = Add(bcx, sized_size, unsized_size);
-            let align = Select(bcx,
-                               ICmp(bcx, llvm::IntULT, sized_align, unsized_align),
-                               sized_align,
-                               unsized_align);
+            let mut size = Add(bcx, sized_size, unsized_size, dbloc);
+
+            // Issue #27023: If there is a drop flag, *now* we add 1
+            // to the size.  (We can do this without adding any
+            // padding because drop flags do not have any alignment
+            // constraints.)
+            if sizing_type.needs_drop_flag() {
+                size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc);
+            }
+
+            // Choose max of two known alignments (combined value must
+            // be aligned according to more restrictive of the two).
+            let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) {
+                (Some(sized_align), Some(unsized_align)) => {
+                    // If both alignments are constant, (the sized_align should always be), then
+                    // pick the correct alignment statically.
+                    C_uint(ccx, std::cmp::max(sized_align, unsized_align))
+                }
+                _ => Select(bcx,
+                            ICmp(bcx,
+                                 llvm::IntUGT,
+                                 sized_align,
+                                 unsized_align,
+                                 dbloc),
+                            sized_align,
+                            unsized_align)
+            };
+
+            // Issue #27023: must add any necessary padding to `size`
+            // (to make it a multiple of `align`) before returning it.
+            //
+            // Namely, the returned size should be, in C notation:
+            //
+            //   `size + ((size & (align-1)) ? align : 0)`
+            //
+            // emulated via the semi-standard fast bit trick:
+            //
+            //   `(size + (align-1)) & -align`
+
+            let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
+            let size = And(
+                bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
+
             (size, align)
         }
-        ty::ty_trait(..) => {
+        ty::TyTrait(..) => {
             // info points to the vtable and the second entry in the vtable is the
             // dynamic size of the object.
             let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to());
-            let size_ptr = GEPi(bcx, info, &[1u]);
-            let align_ptr = GEPi(bcx, info, &[2u]);
+            let size_ptr = GEPi(bcx, info, &[1]);
+            let align_ptr = GEPi(bcx, info, &[2]);
             (Load(bcx, size_ptr), Load(bcx, align_ptr))
         }
-        ty::ty_vec(unit_ty, None) => {
-            // The info in this case is the length of the vec, so the size is that
+        ty::TySlice(_) | ty::TyStr => {
+            let unit_ty = t.sequence_element_type(bcx.tcx());
+            // The info in this case is the length of the str, so the size is that
             // times the unit size.
             let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
+            let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
             let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
-            (Mul(bcx, info, C_uint(bcx.ccx(), unit_size)), C_uint(bcx.ccx(), 8u))
+            (Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None),
+             C_uint(bcx.ccx(), unit_align))
         }
-        _ => bcx.sess().bug(&format!("Unexpected unsized type, found {}",
-                                    bcx.ty_to_string(t))[])
+        _ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t))
     }
 }
 
-fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: Ty<'tcx>)
+fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
                               -> Block<'blk, 'tcx> {
+    let t = g.ty();
+    let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
     // NB: v0 is an *alias* of type t here, not a direct value.
     let _icx = push_ctxt("make_drop_glue");
+
+    // Only drop the value when it ... well, we used to check for
+    // non-null, (and maybe we need to continue doing so), but we now
+    // must definitely check for special bit-patterns corresponding to
+    // the special dtor markings.
+
+    let inttype = Type::int(bcx.ccx());
+    let dropped_pattern = C_integral(inttype, adt::dtor_done_usize(bcx.fcx.ccx) as u64, false);
+
     match t.sty {
-        ty::ty_uniq(content_ty) => {
-            match content_ty.sty {
-                ty::ty_vec(ty, None) => {
-                    tvec::make_drop_glue_unboxed(bcx, v0, ty, true)
-                }
-                ty::ty_str => {
-                    let unit_ty = ty::sequence_element_type(bcx.tcx(), content_ty);
-                    tvec::make_drop_glue_unboxed(bcx, v0, unit_ty, true)
-                }
-                ty::ty_trait(..) => {
-                    let lluniquevalue = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
-                    // Only drop the value when it is non-null
-                    let concrete_ptr = Load(bcx, lluniquevalue);
-                    with_cond(bcx, IsNotNull(bcx, concrete_ptr), |bcx| {
-                        let dtor_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]));
-                        let dtor = Load(bcx, dtor_ptr);
-                        Call(bcx,
-                             dtor,
-                             &[PointerCast(bcx, lluniquevalue, Type::i8p(bcx.ccx()))],
-                             None);
-                        bcx
-                    })
-                }
-                ty::ty_struct(..) if !type_is_sized(bcx.tcx(), content_ty) => {
-                    let llval = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
-                    let llbox = Load(bcx, llval);
-                    let not_null = IsNotNull(bcx, llbox);
-                    with_cond(bcx, not_null, |bcx| {
-                        let bcx = drop_ty(bcx, v0, content_ty, None);
-                        let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
-                        let info = Load(bcx, info);
-                        let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
-                        trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
+        ty::TyBox(content_ty) => {
+            // Support for TyBox is built-in and its drop glue is
+            // special. It may move to library and have Drop impl. As
+            // a safe-guard, assert TyBox not used with TyContents.
+            assert!(!skip_dtor);
+            if !type_is_sized(bcx.tcx(), content_ty) {
+                let llval = expr::get_dataptr(bcx, v0);
+                let llbox = Load(bcx, llval);
+                let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
+                let drop_flag_not_dropped_already =
+                    ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
+                with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
+                    let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
+                    let info = expr::get_meta(bcx, v0);
+                    let info = Load(bcx, info);
+                    let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
+
+                    // `Box<ZeroSizeType>` does not allocate.
+                    let needs_free = ICmp(bcx,
+                                          llvm::IntNE,
+                                          llsize,
+                                          C_uint(bcx.ccx(), 0u64),
+                                          DebugLoc::None);
+                    with_cond(bcx, needs_free, |bcx| {
+                        trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
                     })
-                }
-                _ => {
-                    assert!(type_is_sized(bcx.tcx(), content_ty));
-                    let llval = v0;
-                    let llbox = Load(bcx, llval);
-                    let not_null = IsNotNull(bcx, llbox);
-                    with_cond(bcx, not_null, |bcx| {
-                        let bcx = drop_ty(bcx, llbox, content_ty, None);
-                        trans_exchange_free_ty(bcx, llbox, content_ty)
-                    })
-                }
+                })
+            } else {
+                let llval = v0;
+                let llbox = Load(bcx, llval);
+                let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
+                let drop_flag_not_dropped_already =
+                    ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
+                with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
+                    let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
+                    trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
+                })
             }
         }
-        ty::ty_struct(did, substs) | ty::ty_enum(did, substs) => {
-            let tcx = bcx.tcx();
-            match ty::ty_dtor(tcx, did) {
-                ty::TraitDtor(dtor, true) => {
+        ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
+            match (def.dtor_kind(), skip_dtor) {
+                (ty::TraitDtor(true), false) => {
                     // FIXME(16758) Since the struct is unsized, it is hard to
                     // find the drop flag (which is at the end of the struct).
                     // Lets just ignore the flag and pretend everything will be
                     // OK.
                     if type_is_sized(bcx.tcx(), t) {
-                        trans_struct_drop_flag(bcx, t, v0, dtor, did, substs)
+                        trans_struct_drop_flag(bcx, t, v0)
                     } else {
                         // Give the user a heads up that we are doing something
                         // stupid and dangerous.
                         bcx.sess().warn(&format!("Ignoring drop flag in destructor for {}\
                                                  because the struct is unsized. See issue\
-                                                 #16758",
-                                                bcx.ty_to_string(t))[]);
-                        trans_struct_drop(bcx, t, v0, dtor, did, substs)
+                                                 #16758", t));
+                        trans_struct_drop(bcx, t, v0)
                     }
                 }
-                ty::TraitDtor(dtor, false) => {
-                    trans_struct_drop(bcx, t, v0, dtor, did, substs)
+                (ty::TraitDtor(false), false) => {
+                    trans_struct_drop(bcx, t, v0)
                 }
-                ty::NoDtor => {
+                (ty::NoDtor, _) | (_, true) => {
                     // No dtor? Just the default case
-                    iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, None))
+                    iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
                 }
             }
         }
-        ty::ty_unboxed_closure(..) => iter_structural_ty(bcx,
-                                                         v0,
-                                                         t,
-                                                         |bb, vv, tt| drop_ty(bb, vv, tt, None)),
-        ty::ty_trait(..) => {
-            // No need to do a null check here (as opposed to the Box<trait case
-            // above), because this happens for a trait field in an unsized
-            // struct. If anything is null, it is the whole struct and we won't
-            // get here.
-            let lluniquevalue = GEPi(bcx, v0, &[0, abi::FAT_PTR_ADDR]);
-            let dtor_ptr = Load(bcx, GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]));
-            let dtor = Load(bcx, dtor_ptr);
+        ty::TyTrait(..) => {
+            // No support in vtable for distinguishing destroying with
+            // versus without calling Drop::drop. Assert caller is
+            // okay with always calling the Drop impl, if any.
+            assert!(!skip_dtor);
+            let data_ptr = expr::get_dataptr(bcx, v0);
+            let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
+            let dtor = Load(bcx, vtable_ptr);
             Call(bcx,
                  dtor,
-                 &[PointerCast(bcx, Load(bcx, lluniquevalue), Type::i8p(bcx.ccx()))],
-                 None);
+                 &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
+                 None,
+                 DebugLoc::None);
             bcx
         }
-        ty::ty_vec(ty, None) => tvec::make_drop_glue_unboxed(bcx, v0, ty, false),
         _ => {
-            assert!(type_is_sized(bcx.tcx(), t));
-            if type_needs_drop(bcx.tcx(), t) &&
-                ty::type_is_structural(t) {
-                iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, None))
+            if bcx.fcx.type_needs_drop(t) {
+                iter_structural_ty(bcx,
+                                   v0,
+                                   t,
+                                   |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
             } else {
                 bcx
             }
         }
     }
 }
-
-// Generates the declaration for (but doesn't emit) a type descriptor.
-pub fn declare_tydesc<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>)
-                                -> tydesc_info<'tcx> {
-    // If emit_tydescs already ran, then we shouldn't be creating any new
-    // tydescs.
-    assert!(!ccx.finished_tydescs().get());
-
-    let llty = type_of(ccx, t);
-
-    if ccx.sess().count_type_sizes() {
-        println!("{}\t{}", llsize_of_real(ccx, llty),
-                 ppaux::ty_to_string(ccx.tcx(), t));
-    }
-
-    let llsize = llsize_of(ccx, llty);
-    let llalign = llalign_of(ccx, llty);
-    let name = mangle_internal_name_by_type_and_seq(ccx, t, "tydesc");
-    debug!("+++ declare_tydesc {} {}", ppaux::ty_to_string(ccx.tcx(), t), name);
-    let buf = CString::from_slice(name.as_bytes());
-    let gvar = unsafe {
-        llvm::LLVMAddGlobal(ccx.llmod(), ccx.tydesc_type().to_ref(),
-                            buf.as_ptr())
-    };
-    note_unique_llvm_symbol(ccx, name);
-
-    let ty_name = token::intern_and_get_ident(
-        &ppaux::ty_to_string(ccx.tcx(), t)[]);
-    let ty_name = C_str_slice(ccx, ty_name);
-
-    debug!("--- declare_tydesc {}", ppaux::ty_to_string(ccx.tcx(), t));
-    tydesc_info {
-        ty: t,
-        tydesc: gvar,
-        size: llsize,
-        align: llalign,
-        name: ty_name,
-    }
-}
-
-fn declare_generic_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>,
-                                  llfnty: Type, name: &str) -> (String, ValueRef) {
-    let _icx = push_ctxt("declare_generic_glue");
-    let fn_nm = mangle_internal_name_by_type_and_seq(
-        ccx,
-        t,
-        &format!("glue_{}", name)[]);
-    let llfn = decl_cdecl_fn(ccx, &fn_nm[], llfnty, ty::mk_nil(ccx.tcx()));
-    note_unique_llvm_symbol(ccx, fn_nm.clone());
-    return (fn_nm, llfn);
-}
-
-fn make_generic_glue<'a, 'tcx, F>(ccx: &CrateContext<'a, 'tcx>,
-                                  t: Ty<'tcx>,
-                                  llfn: ValueRef,
-                                  helper: F,
-                                  name: &str)
-                                  -> ValueRef where
-    F: for<'blk> FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
-{
-    let _icx = push_ctxt("make_generic_glue");
-    let glue_name = format!("glue {} {}", name, ty_to_short_str(ccx.tcx(), t));
-    let _s = StatRecorder::new(ccx, glue_name);
-
-    let arena = TypedArena::new();
-    let empty_param_substs = Substs::trans_empty();
-    let fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
-                          ty::FnConverging(ty::mk_nil(ccx.tcx())),
-                          &empty_param_substs, None, &arena);
-
-    let bcx = init_function(&fcx, false, ty::FnConverging(ty::mk_nil(ccx.tcx())));
-
-    update_linkage(ccx, llfn, None, OriginalTranslation);
-
-    ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1u);
-    // All glue functions take values passed *by alias*; this is a
-    // requirement since in many contexts glue is invoked indirectly and
-    // the caller has no idea if it's dealing with something that can be
-    // passed by value.
-    //
-    // llfn is expected be declared to take a parameter of the appropriate
-    // type, so we don't need to explicitly cast the function parameter.
-
-    let llrawptr0 = get_param(llfn, fcx.arg_pos(0) as c_uint);
-    let bcx = helper(bcx, llrawptr0, t);
-    finish_fn(&fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())));
-
-    llfn
-}
-
-pub fn emit_tydescs(ccx: &CrateContext) {
-    let _icx = push_ctxt("emit_tydescs");
-    // As of this point, allow no more tydescs to be created.
-    ccx.finished_tydescs().set(true);
-    let glue_fn_ty = Type::generic_glue_fn(ccx).ptr_to();
-    for (_, ti) in ccx.tydescs().borrow().iter() {
-        // Each of the glue functions needs to be cast to a generic type
-        // before being put into the tydesc because we only have a singleton
-        // tydesc type. Then we'll recast each function to its real type when
-        // calling it.
-        let drop_glue = consts::ptrcast(get_drop_glue(ccx, ti.ty), glue_fn_ty);
-        ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() + 1);
-
-        let tydesc = C_named_struct(ccx.tydesc_type(),
-                                    &[ti.size, // size
-                                      ti.align, // align
-                                      drop_glue, // drop_glue
-                                      ti.name]); // name
-
-        unsafe {
-            let gvar = ti.tydesc;
-            llvm::LLVMSetInitializer(gvar, tydesc);
-            llvm::LLVMSetGlobalConstant(gvar, True);
-            llvm::SetLinkage(gvar, llvm::InternalLinkage);
-        }
-    };
-}