]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/glue.rs
db088bb22724f349e104453d2aa98f7ba263d4d5
[rustc.git] / src / librustc_trans / glue.rs
1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //!
12 //
13 // Code relating to drop glue.
14
15 use std;
16 use std::iter;
17
18 use llvm;
19 use llvm::{ValueRef, get_param};
20 use middle::lang_items::BoxFreeFnLangItem;
21 use rustc::ty::subst::{Substs};
22 use rustc::traits;
23 use rustc::ty::{self, layout, AdtDef, AdtKind, Ty, TypeFoldable};
24 use rustc::ty::subst::Kind;
25 use rustc::mir::tcx::LvalueTy;
26 use mir::lvalue::LvalueRef;
27 use adt;
28 use base::*;
29 use callee::Callee;
30 use cleanup::CleanupScope;
31 use common::*;
32 use machine::*;
33 use monomorphize;
34 use trans_item::TransItem;
35 use tvec;
36 use type_of::{type_of, sizing_type_of, align_of};
37 use type_::Type;
38 use value::Value;
39 use Disr;
40 use builder::Builder;
41
42 use syntax_pos::DUMMY_SP;
43 use mir::lvalue::Alignment;
44
45 pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) {
46 let content_ty = ptr.ty.to_ty(bcx.tcx());
47 let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem);
48 let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
49 let callee = Callee::def(bcx.ccx, def_id, substs);
50
51 let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
52
53 let llret = bcx.call(callee.reify(bcx.ccx),
54 &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize], None);
55 fn_ty.apply_attrs_callsite(llret);
56 }
57
58 pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
59 assert!(t.is_normalized_for_trans());
60
61 let t = scx.tcx().erase_regions(&t);
62
63 // Even if there is no dtor for t, there might be one deeper down and we
64 // might need to pass in the vtable ptr.
65 if !scx.type_is_sized(t) {
66 return t;
67 }
68
69 // FIXME (#22815): note that type_needs_drop conservatively
70 // approximates in some cases and may say a type expression
71 // requires drop glue when it actually does not.
72 //
73 // (In this case it is not clear whether any harm is done, i.e.
74 // erroneously returning `t` in some cases where we could have
75 // returned `tcx.types.i8` does not appear unsound. The impact on
76 // code quality is unknown at this time.)
77
78 if !scx.type_needs_drop(t) {
79 return scx.tcx().types.i8;
80 }
81 match t.sty {
82 ty::TyAdt(def, _) if def.is_box() => {
83 let typ = t.boxed_ty();
84 if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) {
85 scx.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| {
86 let layout = t.layout(&infcx).unwrap();
87 if layout.size(&scx.tcx().data_layout).bytes() == 0 {
88 // `Box<ZeroSizeType>` does not allocate.
89 scx.tcx().types.i8
90 } else {
91 t
92 }
93 })
94 } else {
95 t
96 }
97 }
98 _ => t
99 }
100 }
101
102 fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: LvalueRef<'tcx>) {
103 call_drop_glue(bcx, args, false, None)
104 }
105
106 pub fn call_drop_glue<'a, 'tcx>(
107 bcx: &Builder<'a, 'tcx>,
108 mut args: LvalueRef<'tcx>,
109 skip_dtor: bool,
110 funclet: Option<&'a Funclet>,
111 ) {
112 let t = args.ty.to_ty(bcx.tcx());
113 // NB: v is an *alias* of type t here, not a direct value.
114 debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor);
115 if bcx.ccx.shared().type_needs_drop(t) {
116 let ccx = bcx.ccx;
117 let g = if skip_dtor {
118 DropGlueKind::TyContents(t)
119 } else {
120 DropGlueKind::Ty(t)
121 };
122 let glue = get_drop_glue_core(ccx, g);
123 let glue_type = get_drop_glue_type(ccx.shared(), t);
124 if glue_type != t {
125 args.llval = bcx.pointercast(args.llval, type_of(ccx, glue_type).ptr_to());
126 }
127
128 // No drop-hint ==> call standard drop glue
129 bcx.call(glue, &[args.llval, args.llextra][..1 + args.has_extra() as usize],
130 funclet.map(|b| b.bundle()));
131 }
132 }
133
134 pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
135 get_drop_glue_core(ccx, DropGlueKind::Ty(t))
136 }
137
138 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
139 pub enum DropGlueKind<'tcx> {
140 /// The normal path; runs the dtor, and then recurs on the contents
141 Ty(Ty<'tcx>),
142 /// Skips the dtor, if any, for ty; drops the contents directly.
143 /// Note that the dtor is only skipped at the most *shallow*
144 /// level, namely, an `impl Drop for Ty` itself. So, for example,
145 /// if Ty is Newtype(S) then only the Drop impl for Newtype itself
146 /// will be skipped, while the Drop impl for S, if any, will be
147 /// invoked.
148 TyContents(Ty<'tcx>),
149 }
150
151 impl<'tcx> DropGlueKind<'tcx> {
152 pub fn ty(&self) -> Ty<'tcx> {
153 match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t }
154 }
155
156 pub fn map_ty<F>(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>
157 {
158 match *self {
159 DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)),
160 DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)),
161 }
162 }
163 }
164
165 fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef {
166 let g = g.map_ty(|t| get_drop_glue_type(ccx.shared(), t));
167 match ccx.drop_glues().borrow().get(&g) {
168 Some(&(glue, _)) => glue,
169 None => {
170 bug!("Could not find drop glue for {:?} -- {} -- {}.",
171 g,
172 TransItem::DropGlue(g).to_raw_string(),
173 ccx.codegen_unit().name());
174 }
175 }
176 }
177
178 pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) {
179 assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty()));
180 let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
181
182 let mut bcx = Builder::new_block(ccx, llfn, "entry-block");
183
184 ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
185 // All glue functions take values passed *by alias*; this is a
186 // requirement since in many contexts glue is invoked indirectly and
187 // the caller has no idea if it's dealing with something that can be
188 // passed by value.
189 //
190 // llfn is expected be declared to take a parameter of the appropriate
191 // type, so we don't need to explicitly cast the function parameter.
192
193 // NB: v0 is an *alias* of type t here, not a direct value.
194 // Only drop the value when it ... well, we used to check for
195 // non-null, (and maybe we need to continue doing so), but we now
196 // must definitely check for special bit-patterns corresponding to
197 // the special dtor markings.
198 let t = g.ty();
199
200 let value = get_param(llfn, 0);
201 let ptr = if ccx.shared().type_is_sized(t) {
202 LvalueRef::new_sized_ty(value, t, Alignment::AbiAligned)
203 } else {
204 LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t, Alignment::AbiAligned)
205 };
206
207 let skip_dtor = match g {
208 DropGlueKind::Ty(_) => false,
209 DropGlueKind::TyContents(_) => true
210 };
211
212 let bcx = match t.sty {
213 ty::TyAdt(def, _) if def.is_box() => {
214 // Support for Box is built-in as yet and its drop glue is special
215 // despite having a dummy Drop impl in the library.
216 assert!(!skip_dtor);
217 let content_ty = t.boxed_ty();
218 let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) {
219 let llbox = bcx.load(get_dataptr(&bcx, ptr.llval), None);
220 let info = bcx.load(get_meta(&bcx, ptr.llval), None);
221 LvalueRef::new_unsized_ty(llbox, info, content_ty, Alignment::AbiAligned)
222 } else {
223 LvalueRef::new_sized_ty(
224 bcx.load(ptr.llval, None),
225 content_ty, Alignment::AbiAligned)
226 };
227 drop_ty(&bcx, ptr);
228 trans_exchange_free_ty(&bcx, ptr);
229 bcx
230 }
231 ty::TyDynamic(..) => {
232 // No support in vtable for distinguishing destroying with
233 // versus without calling Drop::drop. Assert caller is
234 // okay with always calling the Drop impl, if any.
235 assert!(!skip_dtor);
236 let dtor = bcx.load(ptr.llextra, None);
237 bcx.call(dtor, &[ptr.llval], None);
238 bcx
239 }
240 ty::TyAdt(def, ..) if def.has_dtor() && !skip_dtor => {
241 let shallow_drop = def.is_union();
242 let tcx = bcx.tcx();
243
244 let def = t.ty_adt_def().unwrap();
245
246 // Be sure to put the contents into a scope so we can use an invoke
247 // instruction to call the user destructor but still call the field
248 // destructors if the user destructor panics.
249 //
250 // FIXME (#14875) panic-in-drop semantics might be unsupported; we
251 // might well consider changing below to more direct code.
252 // Issue #23611: schedule cleanup of contents, re-inspecting the
253 // discriminant (if any) in case of variant swap in drop code.
254 let contents_scope = if !shallow_drop {
255 CleanupScope::schedule_drop_adt_contents(&bcx, ptr)
256 } else {
257 CleanupScope::noop()
258 };
259
260 let trait_ref = ty::Binder(ty::TraitRef {
261 def_id: tcx.lang_items.drop_trait().unwrap(),
262 substs: tcx.mk_substs_trait(t, &[])
263 });
264 let vtbl = match fulfill_obligation(bcx.ccx.shared(), DUMMY_SP, trait_ref) {
265 traits::VtableImpl(data) => data,
266 _ => bug!("dtor for {:?} is not an impl???", t)
267 };
268 let dtor_did = def.destructor().unwrap();
269 let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs);
270 let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
271 let llret;
272 let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize];
273 if let Some(landing_pad) = contents_scope.landing_pad {
274 let normal_bcx = bcx.build_sibling_block("normal-return");
275 llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None);
276 bcx = normal_bcx;
277 } else {
278 llret = bcx.call(callee.reify(bcx.ccx), args, None);
279 }
280 fn_ty.apply_attrs_callsite(llret);
281 contents_scope.trans(&bcx);
282 bcx
283 }
284 ty::TyAdt(def, ..) if def.is_union() => {
285 bcx
286 }
287 _ => {
288 if bcx.ccx.shared().type_needs_drop(t) {
289 drop_structural_ty(bcx, ptr)
290 } else {
291 bcx
292 }
293 }
294 };
295 bcx.ret_void();
296 }
297
298 pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
299 -> (ValueRef, ValueRef) {
300 debug!("calculate size of DST: {}; with lost info: {:?}",
301 t, Value(info));
302 if bcx.ccx.shared().type_is_sized(t) {
303 let sizing_type = sizing_type_of(bcx.ccx, t);
304 let size = llsize_of_alloc(bcx.ccx, sizing_type);
305 let align = align_of(bcx.ccx, t);
306 debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
307 t, Value(info), size, align);
308 let size = C_uint(bcx.ccx, size);
309 let align = C_uint(bcx.ccx, align);
310 return (size, align);
311 }
312 match t.sty {
313 ty::TyAdt(def, substs) => {
314 let ccx = bcx.ccx;
315 // First get the size of all statically known fields.
316 // Don't use type_of::sizing_type_of because that expects t to be sized,
317 // and it also rounds up to alignment, which we want to avoid,
318 // as the unsized field's alignment could be smaller.
319 assert!(!t.is_simd());
320 let layout = ccx.layout_of(t);
321 debug!("DST {} layout: {:?}", t, layout);
322
323 let (sized_size, sized_align) = match *layout {
324 ty::layout::Layout::Univariant { ref variant, .. } => {
325 (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi())
326 }
327 _ => {
328 bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
329 t, layout);
330 }
331 };
332 debug!("DST {} statically sized prefix size: {} align: {}",
333 t, sized_size, sized_align);
334 let sized_size = C_uint(ccx, sized_size);
335 let sized_align = C_uint(ccx, sized_align);
336
337 // Recurse to get the size of the dynamically sized field (must be
338 // the last field).
339 let last_field = def.struct_variant().fields.last().unwrap();
340 let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
341 let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
342
343 // FIXME (#26403, #27023): We should be adding padding
344 // to `sized_size` (to accommodate the `unsized_align`
345 // required of the unsized field that follows) before
346 // summing it with `sized_size`. (Note that since #26403
347 // is unfixed, we do not yet add the necessary padding
348 // here. But this is where the add would go.)
349
350 // Return the sum of sizes and max of aligns.
351 let size = bcx.add(sized_size, unsized_size);
352
353 // Choose max of two known alignments (combined value must
354 // be aligned according to more restrictive of the two).
355 let align = match (const_to_opt_u128(sized_align, false),
356 const_to_opt_u128(unsized_align, false)) {
357 (Some(sized_align), Some(unsized_align)) => {
358 // If both alignments are constant, (the sized_align should always be), then
359 // pick the correct alignment statically.
360 C_uint(ccx, std::cmp::max(sized_align, unsized_align) as u64)
361 }
362 _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
363 sized_align,
364 unsized_align)
365 };
366
367 // Issue #27023: must add any necessary padding to `size`
368 // (to make it a multiple of `align`) before returning it.
369 //
370 // Namely, the returned size should be, in C notation:
371 //
372 // `size + ((size & (align-1)) ? align : 0)`
373 //
374 // emulated via the semi-standard fast bit trick:
375 //
376 // `(size + (align-1)) & -align`
377
378 let addend = bcx.sub(align, C_uint(bcx.ccx, 1_u64));
379 let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
380
381 (size, align)
382 }
383 ty::TyDynamic(..) => {
384 // info points to the vtable and the second entry in the vtable is the
385 // dynamic size of the object.
386 let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to());
387 let size_ptr = bcx.gepi(info, &[1]);
388 let align_ptr = bcx.gepi(info, &[2]);
389 (bcx.load(size_ptr, None), bcx.load(align_ptr, None))
390 }
391 ty::TySlice(_) | ty::TyStr => {
392 let unit_ty = t.sequence_element_type(bcx.tcx());
393 // The info in this case is the length of the str, so the size is that
394 // times the unit size.
395 let llunit_ty = sizing_type_of(bcx.ccx, unit_ty);
396 let unit_align = llalign_of_min(bcx.ccx, llunit_ty);
397 let unit_size = llsize_of_alloc(bcx.ccx, llunit_ty);
398 (bcx.mul(info, C_uint(bcx.ccx, unit_size)),
399 C_uint(bcx.ccx, unit_align))
400 }
401 _ => bug!("Unexpected unsized type, found {}", t)
402 }
403 }
404
405 // Iterates through the elements of a structural type, dropping them.
406 fn drop_structural_ty<'a, 'tcx>(
407 cx: Builder<'a, 'tcx>,
408 mut ptr: LvalueRef<'tcx>
409 ) -> Builder<'a, 'tcx> {
410 fn iter_variant_fields<'a, 'tcx>(
411 cx: &'a Builder<'a, 'tcx>,
412 av: LvalueRef<'tcx>,
413 adt_def: &'tcx AdtDef,
414 variant_index: usize,
415 substs: &'tcx Substs<'tcx>
416 ) {
417 let variant = &adt_def.variants[variant_index];
418 let tcx = cx.tcx();
419 for (i, field) in variant.fields.iter().enumerate() {
420 let arg = monomorphize::field_ty(tcx, substs, field);
421 let (field_ptr, align) = av.trans_field_ptr(&cx, i);
422 drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg, align));
423 }
424 }
425
426 let mut cx = cx;
427 let t = ptr.ty.to_ty(cx.tcx());
428 match t.sty {
429 ty::TyClosure(def_id, substs) => {
430 for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
431 let (llupvar, align) = ptr.trans_field_ptr(&cx, i);
432 drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty, align));
433 }
434 }
435 ty::TyArray(_, n) => {
436 let base = get_dataptr(&cx, ptr.llval);
437 let len = C_uint(cx.ccx, n);
438 let unit_ty = t.sequence_element_type(cx.tcx());
439 cx = tvec::slice_for_each(&cx, base, unit_ty, len,
440 |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
441 }
442 ty::TySlice(_) | ty::TyStr => {
443 let unit_ty = t.sequence_element_type(cx.tcx());
444 cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra,
445 |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
446 }
447 ty::TyTuple(ref args) => {
448 for (i, arg) in args.iter().enumerate() {
449 let (llfld_a, align) = ptr.trans_field_ptr(&cx, i);
450 drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg, align));
451 }
452 }
453 ty::TyAdt(adt, substs) => match adt.adt_kind() {
454 AdtKind::Struct => {
455 for (i, field) in adt.variants[0].fields.iter().enumerate() {
456 let field_ty = monomorphize::field_ty(cx.tcx(), substs, field);
457 let (llval, align) = ptr.trans_field_ptr(&cx, i);
458 let field_ptr = if cx.ccx.shared().type_is_sized(field_ty) {
459 LvalueRef::new_sized_ty(llval, field_ty, align)
460 } else {
461 LvalueRef::new_unsized_ty(llval, ptr.llextra, field_ty, align)
462 };
463 drop_ty(&cx, field_ptr);
464 }
465 }
466 AdtKind::Union => {
467 bug!("Union in `glue::drop_structural_ty`");
468 }
469 AdtKind::Enum => {
470 let n_variants = adt.variants.len();
471
472 // NB: we must hit the discriminant first so that structural
473 // comparison know not to proceed when the discriminants differ.
474
475 // Obtain a representation of the discriminant sufficient to translate
476 // destructuring; this may or may not involve the actual discriminant.
477 let l = cx.ccx.layout_of(t);
478 match *l {
479 layout::Univariant { .. } |
480 layout::UntaggedUnion { .. } => {
481 if n_variants != 0 {
482 assert!(n_variants == 1);
483 ptr.ty = LvalueTy::Downcast {
484 adt_def: adt,
485 substs: substs,
486 variant_index: 0,
487 };
488 iter_variant_fields(&cx, ptr, &adt, 0, substs);
489 }
490 }
491 layout::CEnum { .. } |
492 layout::General { .. } |
493 layout::RawNullablePointer { .. } |
494 layout::StructWrappedNullablePointer { .. } => {
495 let lldiscrim_a = adt::trans_get_discr(
496 &cx, t, ptr.llval, ptr.alignment, None, false);
497
498 // Create a fall-through basic block for the "else" case of
499 // the switch instruction we're about to generate. Note that
500 // we do **not** use an Unreachable instruction here, even
501 // though most of the time this basic block will never be hit.
502 //
503 // When an enum is dropped it's contents are currently
504 // overwritten to DTOR_DONE, which means the discriminant
505 // could have changed value to something not within the actual
506 // range of the discriminant. Currently this function is only
507 // used for drop glue so in this case we just return quickly
508 // from the outer function, and any other use case will only
509 // call this for an already-valid enum in which case the `ret
510 // void` will never be hit.
511 let ret_void_cx = cx.build_sibling_block("enum-iter-ret-void");
512 ret_void_cx.ret_void();
513 let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants);
514 let next_cx = cx.build_sibling_block("enum-iter-next");
515
516 for (i, variant) in adt.variants.iter().enumerate() {
517 let variant_cx_name = format!("enum-iter-variant-{}",
518 &variant.disr_val.to_string());
519 let variant_cx = cx.build_sibling_block(&variant_cx_name);
520 let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
521 variant_cx.add_case(llswitch, case_val, variant_cx.llbb());
522 ptr.ty = LvalueTy::Downcast {
523 adt_def: adt,
524 substs: substs,
525 variant_index: i,
526 };
527 iter_variant_fields(&variant_cx, ptr, &adt, i, substs);
528 variant_cx.br(next_cx.llbb());
529 }
530 cx = next_cx;
531 }
532 _ => bug!("{} is not an enum.", t),
533 }
534 }
535 },
536
537 _ => {
538 cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
539 }
540 }
541 return cx;
542 }