]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/glue.rs
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / librustc_trans / glue.rs
1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //!
12 //
13 // Code relating to drop glue.
14
15 use std;
16
17 use back::symbol_names;
18 use llvm;
19 use llvm::{ValueRef, get_param};
20 use middle::lang_items::ExchangeFreeFnLangItem;
21 use rustc::ty::subst::{Substs};
22 use rustc::traits;
23 use rustc::ty::{self, Ty, TyCtxt};
24 use abi::{Abi, FnType};
25 use adt;
26 use adt::GetDtorType; // for tcx.dtor_type()
27 use base::*;
28 use build::*;
29 use callee::{Callee, ArgVals};
30 use cleanup;
31 use cleanup::CleanupMethods;
32 use collector::{self, TransItem};
33 use common::*;
34 use debuginfo::DebugLoc;
35 use declare;
36 use expr;
37 use machine::*;
38 use monomorphize;
39 use type_of::{type_of, sizing_type_of, align_of};
40 use type_::Type;
41 use value::Value;
42
43 use arena::TypedArena;
44 use syntax::codemap::DUMMY_SP;
45
46 pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
47 v: ValueRef,
48 size: ValueRef,
49 align: ValueRef,
50 debug_loc: DebugLoc)
51 -> Block<'blk, 'tcx> {
52 let _icx = push_ctxt("trans_exchange_free");
53
54 let def_id = langcall(bcx, None, "", ExchangeFreeFnLangItem);
55 let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
56 Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty()))
57 .call(bcx, debug_loc, ArgVals(&args), None).bcx
58 }
59
60 pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
61 v: ValueRef,
62 size: u64,
63 align: u32,
64 debug_loc: DebugLoc)
65 -> Block<'blk, 'tcx> {
66 trans_exchange_free_dyn(cx,
67 v,
68 C_uint(cx.ccx(), size),
69 C_uint(cx.ccx(), align),
70 debug_loc)
71 }
72
73 pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
74 ptr: ValueRef,
75 content_ty: Ty<'tcx>,
76 debug_loc: DebugLoc)
77 -> Block<'blk, 'tcx> {
78 assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
79 let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
80 let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
81
82 // `Box<ZeroSizeType>` does not allocate.
83 if content_size != 0 {
84 let content_align = align_of(bcx.ccx(), content_ty);
85 trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc)
86 } else {
87 bcx
88 }
89 }
90
91 pub fn type_needs_drop<'tcx>(tcx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
92 tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
93 }
94
95 pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
96 t: Ty<'tcx>) -> Ty<'tcx> {
97 let tcx = ccx.tcx();
98 // Even if there is no dtor for t, there might be one deeper down and we
99 // might need to pass in the vtable ptr.
100 if !type_is_sized(tcx, t) {
101 return t
102 }
103
104 // FIXME (#22815): note that type_needs_drop conservatively
105 // approximates in some cases and may say a type expression
106 // requires drop glue when it actually does not.
107 //
108 // (In this case it is not clear whether any harm is done, i.e.
109 // erroneously returning `t` in some cases where we could have
110 // returned `tcx.types.i8` does not appear unsound. The impact on
111 // code quality is unknown at this time.)
112
113 if !type_needs_drop(&tcx, t) {
114 return tcx.types.i8;
115 }
116 match t.sty {
117 ty::TyBox(typ) if !type_needs_drop(&tcx, typ)
118 && type_is_sized(tcx, typ) => {
119 let llty = sizing_type_of(ccx, typ);
120 // `Box<ZeroSizeType>` does not allocate.
121 if llsize_of_alloc(ccx, llty) == 0 {
122 tcx.types.i8
123 } else {
124 t
125 }
126 }
127 _ => t
128 }
129 }
130
131 pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
132 v: ValueRef,
133 t: Ty<'tcx>,
134 debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
135 drop_ty_core(bcx, v, t, debug_loc, false, None)
136 }
137
138 pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
139 v: ValueRef,
140 t: Ty<'tcx>,
141 debug_loc: DebugLoc,
142 skip_dtor: bool,
143 drop_hint: Option<cleanup::DropHintValue>)
144 -> Block<'blk, 'tcx> {
145 // NB: v is an *alias* of type t here, not a direct value.
146 debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
147 let _icx = push_ctxt("drop_ty");
148 let mut bcx = bcx;
149 if bcx.fcx.type_needs_drop(t) {
150 let ccx = bcx.ccx();
151 let g = if skip_dtor {
152 DropGlueKind::TyContents(t)
153 } else {
154 DropGlueKind::Ty(t)
155 };
156 let glue = get_drop_glue_core(ccx, g);
157 let glue_type = get_drop_glue_type(ccx, t);
158 let ptr = if glue_type != t {
159 PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to())
160 } else {
161 v
162 };
163
164 match drop_hint {
165 Some(drop_hint) => {
166 let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
167 let moved_val =
168 C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
169 let may_need_drop =
170 ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
171 bcx = with_cond(bcx, may_need_drop, |cx| {
172 Call(cx, glue, &[ptr], debug_loc);
173 cx
174 })
175 }
176 None => {
177 // No drop-hint ==> call standard drop glue
178 Call(bcx, glue, &[ptr], debug_loc);
179 }
180 }
181 }
182 bcx
183 }
184
185 pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
186 v: ValueRef,
187 t: Ty<'tcx>,
188 debug_loc: DebugLoc,
189 skip_dtor: bool)
190 -> Block<'blk, 'tcx> {
191 let _icx = push_ctxt("drop_ty_immediate");
192 let vp = alloc_ty(bcx, t, "");
193 call_lifetime_start(bcx, vp);
194 store_ty(bcx, v, vp, t);
195 let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
196 call_lifetime_end(bcx, vp);
197 bcx
198 }
199
200 pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
201 get_drop_glue_core(ccx, DropGlueKind::Ty(t))
202 }
203
204 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
205 pub enum DropGlueKind<'tcx> {
206 /// The normal path; runs the dtor, and then recurs on the contents
207 Ty(Ty<'tcx>),
208 /// Skips the dtor, if any, for ty; drops the contents directly.
209 /// Note that the dtor is only skipped at the most *shallow*
210 /// level, namely, an `impl Drop for Ty` itself. So, for example,
211 /// if Ty is Newtype(S) then only the Drop impl for Newtype itself
212 /// will be skipped, while the Drop impl for S, if any, will be
213 /// invoked.
214 TyContents(Ty<'tcx>),
215 }
216
217 impl<'tcx> DropGlueKind<'tcx> {
218 fn ty(&self) -> Ty<'tcx> {
219 match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
220 }
221
222 fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
223 {
224 match *self {
225 DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
226 DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
227 }
228 }
229 }
230
231 fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
232 g: DropGlueKind<'tcx>) -> ValueRef {
233 debug!("make drop glue for {:?}", g);
234 let g = g.map_ty(|t| get_drop_glue_type(ccx, t));
235 debug!("drop glue type {:?}", g);
236 match ccx.drop_glues().borrow().get(&g) {
237 Some(&glue) => return glue,
238 _ => { }
239 }
240 let t = g.ty();
241
242 let tcx = ccx.tcx();
243 let sig = ty::FnSig {
244 inputs: vec![tcx.mk_mut_ptr(tcx.types.i8)],
245 output: ty::FnOutput::FnConverging(tcx.mk_nil()),
246 variadic: false,
247 };
248 // Create a FnType for fn(*mut i8) and substitute the real type in
249 // later - that prevents FnType from splitting fat pointers up.
250 let mut fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
251 fn_ty.args[0].original_ty = type_of(ccx, t).ptr_to();
252 let llfnty = fn_ty.llvm_type(ccx);
253
254 // To avoid infinite recursion, don't `make_drop_glue` until after we've
255 // added the entry to the `drop_glues` cache.
256 if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
257 let llfn = declare::declare_cfn(ccx, &old_sym, llfnty);
258 ccx.drop_glues().borrow_mut().insert(g, llfn);
259 return llfn;
260 };
261
262 let suffix = match g {
263 DropGlueKind::Ty(_) => "drop",
264 DropGlueKind::TyContents(_) => "drop_contents",
265 };
266
267 let fn_nm = symbol_names::internal_name_from_type_and_suffix(ccx, t, suffix);
268 assert!(declare::get_defined_value(ccx, &fn_nm).is_none());
269 let llfn = declare::declare_cfn(ccx, &fn_nm, llfnty);
270 ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
271 ccx.drop_glues().borrow_mut().insert(g, llfn);
272
273 let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
274
275 let empty_substs = ccx.tcx().mk_substs(Substs::empty());
276 let (arena, fcx): (TypedArena<_>, FunctionContext);
277 arena = TypedArena::new();
278 fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &arena);
279
280 let bcx = fcx.init(false, None);
281
282 update_linkage(ccx, llfn, None, OriginalTranslation);
283
284 ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
285 // All glue functions take values passed *by alias*; this is a
286 // requirement since in many contexts glue is invoked indirectly and
287 // the caller has no idea if it's dealing with something that can be
288 // passed by value.
289 //
290 // llfn is expected be declared to take a parameter of the appropriate
291 // type, so we don't need to explicitly cast the function parameter.
292
293 let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
294 fcx.finish(bcx, DebugLoc::None);
295
296 llfn
297 }
298
299 fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
300 t: Ty<'tcx>,
301 struct_data: ValueRef)
302 -> Block<'blk, 'tcx> {
303 assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
304
305 let repr = adt::represent_type(bcx.ccx(), t);
306 let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data));
307 let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
308 let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
309 let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
310
311 let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
312 bcx
313 } else {
314 let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
315 let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
316 let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
317 let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
318 let drop_flag_neither_initialized_nor_cleared =
319 And(bcx, not_init, not_done, DebugLoc::None);
320 with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
321 let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
322 Call(cx, llfn, &[], DebugLoc::None);
323 cx
324 })
325 };
326
327 let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
328 with_cond(bcx, drop_flag_dtor_needed, |cx| {
329 trans_struct_drop(cx, t, struct_data)
330 })
331 }
332 fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
333 t: Ty<'tcx>,
334 v0: ValueRef)
335 -> Block<'blk, 'tcx>
336 {
337 debug!("trans_struct_drop t: {}", t);
338 let tcx = bcx.tcx();
339 let mut bcx = bcx;
340
341 let def = t.ty_adt_def().unwrap();
342
343 // Be sure to put the contents into a scope so we can use an invoke
344 // instruction to call the user destructor but still call the field
345 // destructors if the user destructor panics.
346 //
347 // FIXME (#14875) panic-in-drop semantics might be unsupported; we
348 // might well consider changing below to more direct code.
349 let contents_scope = bcx.fcx.push_custom_cleanup_scope();
350
351 // Issue #23611: schedule cleanup of contents, re-inspecting the
352 // discriminant (if any) in case of variant swap in drop code.
353 bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
354
355 let (sized_args, unsized_args);
356 let args: &[ValueRef] = if type_is_sized(tcx, t) {
357 sized_args = [v0];
358 &sized_args
359 } else {
360 unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
361 &unsized_args
362 };
363
364 let trait_ref = ty::Binder(ty::TraitRef {
365 def_id: tcx.lang_items.drop_trait().unwrap(),
366 substs: tcx.mk_substs(Substs::empty().with_self_ty(t))
367 });
368 let vtbl = match fulfill_obligation(bcx.ccx(), DUMMY_SP, trait_ref) {
369 traits::VtableImpl(data) => data,
370 _ => bug!("dtor for {:?} is not an impl???", t)
371 };
372 let dtor_did = def.destructor().unwrap();
373 bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
374 .call(bcx, DebugLoc::None, ArgVals(args), None).bcx;
375
376 bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
377 }
378
379 pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
380 t: Ty<'tcx>, info: ValueRef)
381 -> (ValueRef, ValueRef) {
382 debug!("calculate size of DST: {}; with lost info: {:?}",
383 t, Value(info));
384 if type_is_sized(bcx.tcx(), t) {
385 let sizing_type = sizing_type_of(bcx.ccx(), t);
386 let size = llsize_of_alloc(bcx.ccx(), sizing_type);
387 let align = align_of(bcx.ccx(), t);
388 debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
389 t, Value(info), size, align);
390 let size = C_uint(bcx.ccx(), size);
391 let align = C_uint(bcx.ccx(), align);
392 return (size, align);
393 }
394 if bcx.is_unreachable() {
395 let llty = Type::int(bcx.ccx());
396 return (C_undef(llty), C_undef(llty));
397 }
398 match t.sty {
399 ty::TyStruct(def, substs) => {
400 let ccx = bcx.ccx();
401 // First get the size of all statically known fields.
402 // Don't use type_of::sizing_type_of because that expects t to be sized.
403 assert!(!t.is_simd());
404 let repr = adt::represent_type(ccx, t);
405 let sizing_type = adt::sizing_type_context_of(ccx, &repr, true);
406 debug!("DST {} sizing_type: {:?}", t, sizing_type);
407 let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
408 let sized_align = llalign_of_min(ccx, sizing_type.prefix());
409 debug!("DST {} statically sized prefix size: {} align: {}",
410 t, sized_size, sized_align);
411 let sized_size = C_uint(ccx, sized_size);
412 let sized_align = C_uint(ccx, sized_align);
413
414 // Recurse to get the size of the dynamically sized field (must be
415 // the last field).
416 let last_field = def.struct_variant().fields.last().unwrap();
417 let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
418 let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
419
420 // FIXME (#26403, #27023): We should be adding padding
421 // to `sized_size` (to accommodate the `unsized_align`
422 // required of the unsized field that follows) before
423 // summing it with `sized_size`. (Note that since #26403
424 // is unfixed, we do not yet add the necessary padding
425 // here. But this is where the add would go.)
426
427 // Return the sum of sizes and max of aligns.
428 let mut size = bcx.add(sized_size, unsized_size);
429
430 // Issue #27023: If there is a drop flag, *now* we add 1
431 // to the size. (We can do this without adding any
432 // padding because drop flags do not have any alignment
433 // constraints.)
434 if sizing_type.needs_drop_flag() {
435 size = bcx.add(size, C_uint(bcx.ccx(), 1_u64));
436 }
437
438 // Choose max of two known alignments (combined value must
439 // be aligned according to more restrictive of the two).
440 let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) {
441 (Some(sized_align), Some(unsized_align)) => {
442 // If both alignments are constant, (the sized_align should always be), then
443 // pick the correct alignment statically.
444 C_uint(ccx, std::cmp::max(sized_align, unsized_align))
445 }
446 _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
447 sized_align,
448 unsized_align)
449 };
450
451 // Issue #27023: must add any necessary padding to `size`
452 // (to make it a multiple of `align`) before returning it.
453 //
454 // Namely, the returned size should be, in C notation:
455 //
456 // `size + ((size & (align-1)) ? align : 0)`
457 //
458 // emulated via the semi-standard fast bit trick:
459 //
460 // `(size + (align-1)) & -align`
461
462 let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64));
463 let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
464
465 (size, align)
466 }
467 ty::TyTrait(..) => {
468 // info points to the vtable and the second entry in the vtable is the
469 // dynamic size of the object.
470 let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to());
471 let size_ptr = bcx.gepi(info, &[1]);
472 let align_ptr = bcx.gepi(info, &[2]);
473 (bcx.load(size_ptr), bcx.load(align_ptr))
474 }
475 ty::TySlice(_) | ty::TyStr => {
476 let unit_ty = t.sequence_element_type(bcx.tcx());
477 // The info in this case is the length of the str, so the size is that
478 // times the unit size.
479 let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
480 let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
481 let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
482 (bcx.mul(info, C_uint(bcx.ccx(), unit_size)),
483 C_uint(bcx.ccx(), unit_align))
484 }
485 _ => bug!("Unexpected unsized type, found {}", t)
486 }
487 }
488
489 fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>)
490 -> Block<'blk, 'tcx> {
491 let t = g.ty();
492
493 if collector::collecting_debug_information(bcx.ccx()) {
494 bcx.ccx()
495 .record_translation_item_as_generated(TransItem::DropGlue(bcx.tcx()
496 .erase_regions(&t)));
497 }
498
499 let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
500 // NB: v0 is an *alias* of type t here, not a direct value.
501 let _icx = push_ctxt("make_drop_glue");
502
503 // Only drop the value when it ... well, we used to check for
504 // non-null, (and maybe we need to continue doing so), but we now
505 // must definitely check for special bit-patterns corresponding to
506 // the special dtor markings.
507
508 let inttype = Type::int(bcx.ccx());
509 let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false);
510
511 match t.sty {
512 ty::TyBox(content_ty) => {
513 // Support for TyBox is built-in and its drop glue is
514 // special. It may move to library and have Drop impl. As
515 // a safe-guard, assert TyBox not used with TyContents.
516 assert!(!skip_dtor);
517 if !type_is_sized(bcx.tcx(), content_ty) {
518 let llval = expr::get_dataptr(bcx, v0);
519 let llbox = Load(bcx, llval);
520 let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
521 let drop_flag_not_dropped_already =
522 ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
523 with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
524 let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
525 let info = expr::get_meta(bcx, v0);
526 let info = Load(bcx, info);
527 let (llsize, llalign) =
528 size_and_align_of_dst(&bcx.build(), content_ty, info);
529
530 // `Box<ZeroSizeType>` does not allocate.
531 let needs_free = ICmp(bcx,
532 llvm::IntNE,
533 llsize,
534 C_uint(bcx.ccx(), 0u64),
535 DebugLoc::None);
536 with_cond(bcx, needs_free, |bcx| {
537 trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
538 })
539 })
540 } else {
541 let llval = v0;
542 let llbox = Load(bcx, llval);
543 let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
544 let drop_flag_not_dropped_already =
545 ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
546 with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
547 let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
548 trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
549 })
550 }
551 }
552 ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
553 match (def.dtor_kind(), skip_dtor) {
554 (ty::TraitDtor(true), false) => {
555 // FIXME(16758) Since the struct is unsized, it is hard to
556 // find the drop flag (which is at the end of the struct).
557 // Lets just ignore the flag and pretend everything will be
558 // OK.
559 if type_is_sized(bcx.tcx(), t) {
560 trans_struct_drop_flag(bcx, t, v0)
561 } else {
562 // Give the user a heads up that we are doing something
563 // stupid and dangerous.
564 bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \
565 because the struct is unsized. See issue \
566 #16758", t));
567 trans_struct_drop(bcx, t, v0)
568 }
569 }
570 (ty::TraitDtor(false), false) => {
571 trans_struct_drop(bcx, t, v0)
572 }
573 (ty::NoDtor, _) | (_, true) => {
574 // No dtor? Just the default case
575 iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
576 }
577 }
578 }
579 ty::TyTrait(..) => {
580 // No support in vtable for distinguishing destroying with
581 // versus without calling Drop::drop. Assert caller is
582 // okay with always calling the Drop impl, if any.
583 assert!(!skip_dtor);
584 let data_ptr = expr::get_dataptr(bcx, v0);
585 let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
586 let dtor = Load(bcx, vtable_ptr);
587 Call(bcx,
588 dtor,
589 &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
590 DebugLoc::None);
591 bcx
592 }
593 _ => {
594 if bcx.fcx.type_needs_drop(t) {
595 iter_structural_ty(bcx,
596 v0,
597 t,
598 |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
599 } else {
600 bcx
601 }
602 }
603 }
604 }