1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Representation of Algebraic Data Types
13 //! This module determines how to represent enums, structs, and tuples
14 //! based on their monomorphized types; it is responsible both for
15 //! choosing a representation and translating basic operations on
16 //! values of those types. (Note: exporting the representations for
17 //! debuggers is handled in debuginfo.rs, not here.)
19 //! Note that the interface treats everything as a general case of an
20 //! enum, so structs/tuples/etc. have one pseudo-variant with
21 //! discriminant 0; i.e., as if they were a univariant enum.
23 //! Having everything in one place will enable improvements to data
24 //! structure representation; possibilities include:
26 //! - User-specified alignment (e.g., cacheline-aligning parts of
27 //! concurrently accessed data structures); LLVM can't represent this
28 //! directly, so we'd have to insert padding fields in any structure
29 //! that might contain one and adjust GEP indices accordingly. See
32 //! - Store nested enums' discriminants in the same word. Rather, if
33 //! some variants start with enums, and those enums representations
34 //! have unused alignment padding between discriminant and body, the
35 //! outer enum's discriminant can be stored there and those variants
36 //! can start at offset 0. Kind of fancy, and might need work to
37 //! make copies of the inner enum type cooperate, but it could help
38 //! with `Option` or `Result` wrapped around another enum.
40 //! - Tagged pointers would be neat, but given that any type can be
41 //! used unboxed and any field can have pointers (including mutable)
42 //! taken to it, implementing them for Rust seems difficult.
44 pub use self::Repr
::*;
50 use llvm
::{ValueRef, True, IntEQ, IntNE}
;
51 use back
::abi
::FAT_PTR_ADDR
;
53 use middle
::ty
::{self, Ty}
;
56 use syntax
::attr
::IntType
;
58 use trans
::base
::InitAlloca
;
61 use trans
::cleanup
::CleanupMethods
;
64 use trans
::debuginfo
::DebugLoc
;
67 use trans
::monomorphize
;
68 use trans
::type_
::Type
;
71 type Hint
= attr
::ReprAttr
;
73 // Representation of the context surrounding an unsized type. I want
74 // to be able to track the drop flags that are injected by trans.
75 #[derive(Clone, Copy, PartialEq, Debug)]
76 pub struct TypeContext
{
78 needs_drop_flag
: bool
,
82 pub fn prefix(&self) -> Type { self.prefix }
83 pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag }
85 fn direct(t
: Type
) -> TypeContext
{
86 TypeContext { prefix: t, needs_drop_flag: false }
88 fn may_need_drop_flag(t
: Type
, needs_drop_flag
: bool
) -> TypeContext
{
89 TypeContext { prefix: t, needs_drop_flag: needs_drop_flag }
91 pub fn to_string(self) -> String
{
92 let TypeContext { prefix, needs_drop_flag }
= self;
93 format
!("TypeContext {{ prefix: {}, needs_drop_flag: {} }}",
94 prefix
.to_string(), needs_drop_flag
)
99 #[derive(Eq, PartialEq, Debug)]
100 pub enum Repr
<'tcx
> {
101 /// C-like enums; basically an int.
102 CEnum(IntType
, Disr
, Disr
), // discriminant range (signedness based on the IntType)
103 /// Single-case variants, and structs/tuples/records.
105 /// Structs with destructors need a dynamic destroyedness flag to
106 /// avoid running the destructor too many times; this is included
107 /// in the `Struct` if present.
108 /// (The flag if nonzero, represents the initialization value to use;
109 /// if zero, then use no flag at all.)
110 Univariant(Struct
<'tcx
>, u8),
111 /// General-case enums: for each case there is a struct, and they
112 /// all start with a field for the discriminant.
114 /// Types with destructors need a dynamic destroyedness flag to
115 /// avoid running the destructor too many times; the last argument
116 /// indicates whether such a flag is present.
117 /// (The flag, if nonzero, represents the initialization value to use;
118 /// if zero, then use no flag at all.)
119 General(IntType
, Vec
<Struct
<'tcx
>>, u8),
120 /// Two cases distinguished by a nullable pointer: the case with discriminant
121 /// `nndiscr` must have single field which is known to be nonnull due to its type.
122 /// The other case is known to be zero sized. Hence we represent the enum
123 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
124 /// otherwise it indicates the other case.
128 nullfields
: Vec
<Ty
<'tcx
>>
130 /// Two cases distinguished by a nullable pointer: the case with discriminant
131 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
132 /// field is known to be nonnull due to its type; if that field is null, then
133 /// it represents the other case, which is inhabited by at most one value
134 /// (and all other fields are undefined/unused).
136 /// For example, `std::option::Option` instantiated at a safe pointer type
137 /// is represented such that `None` is a null pointer and `Some` is the
138 /// identity function.
139 StructWrappedNullablePointer
{
140 nonnull
: Struct
<'tcx
>,
142 discrfield
: DiscrField
,
143 nullfields
: Vec
<Ty
<'tcx
>>,
147 /// For structs, and struct-like parts of anything fancier.
148 #[derive(Eq, PartialEq, Debug)]
149 pub struct Struct
<'tcx
> {
150 // If the struct is DST, then the size and alignment do not take into
151 // account the unsized fields of the struct.
156 pub fields
: Vec
<Ty
<'tcx
>>,
159 #[derive(Copy, Clone)]
160 pub struct MaybeSizedValue
{
165 impl MaybeSizedValue
{
166 pub fn sized(value
: ValueRef
) -> MaybeSizedValue
{
169 meta
: std
::ptr
::null_mut()
173 pub fn unsized_(value
: ValueRef
, meta
: ValueRef
) -> MaybeSizedValue
{
180 pub fn has_meta(&self) -> bool
{
185 /// Convenience for `represent_type`. There should probably be more or
186 /// these, for places in trans where the `Ty` isn't directly
188 pub fn represent_node
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
189 node
: ast
::NodeId
) -> Rc
<Repr
<'tcx
>> {
190 represent_type(bcx
.ccx(), node_id_type(bcx
, node
))
193 /// Decides how to represent a given type.
194 pub fn represent_type
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
197 debug
!("Representing: {}", t
);
198 match cx
.adt_reprs().borrow().get(&t
) {
199 Some(repr
) => return repr
.clone(),
203 let repr
= Rc
::new(represent_type_uncached(cx
, t
));
204 debug
!("Represented as: {:?}", repr
);
205 cx
.adt_reprs().borrow_mut().insert(t
, repr
.clone());
209 const fn repeat_u8_as_u32(val
: u8) -> u32 {
210 (val
as u32) << 24 | (val
as u32) << 16 | (val
as u32) << 8 | val
as u32
213 const fn repeat_u8_as_u64(val
: u8) -> u64 {
214 (repeat_u8_as_u32(val
) as u64) << 32 | repeat_u8_as_u32(val
) as u64
217 /// `DTOR_NEEDED_HINT` is a stack-local hint that just means
218 /// "we do not know whether the destructor has run or not; check the
219 /// drop-flag embedded in the value itself."
220 pub const DTOR_NEEDED_HINT
: u8 = 0x3d;
222 /// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
223 /// definitely been moved; you do not need to run its destructor."
225 /// (However, for now, such values may still end up being explicitly
226 /// zeroed by the generated code; this is the distinction between
227 /// `datum::DropFlagInfo::ZeroAndMaintain` versus
228 /// `datum::DropFlagInfo::DontZeroJustUse`.)
229 pub const DTOR_MOVED_HINT
: u8 = 0x2d;
231 pub const DTOR_NEEDED
: u8 = 0xd4;
233 pub const DTOR_NEEDED_U64
: u64 = repeat_u8_as_u64(DTOR_NEEDED
);
235 pub const DTOR_DONE
: u8 = 0x1d;
237 pub const DTOR_DONE_U64
: u64 = repeat_u8_as_u64(DTOR_DONE
);
239 fn dtor_to_init_u8(dtor
: bool
) -> u8 {
240 if dtor { DTOR_NEEDED }
else { 0 }
243 pub trait GetDtorType
<'tcx
> { fn dtor_type(&self) -> Ty<'tcx>; }
244 impl<'tcx
> GetDtorType
<'tcx
> for ty
::ctxt
<'tcx
> {
245 fn dtor_type(&self) -> Ty
<'tcx
> { self.types.u8 }
248 fn dtor_active(flag
: u8) -> bool
{
252 fn represent_type_uncached
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
253 t
: Ty
<'tcx
>) -> Repr
<'tcx
> {
255 ty
::TyTuple(ref elems
) => {
256 Univariant(mk_struct(cx
, &elems
[..], false, t
), 0)
258 ty
::TyStruct(def
, substs
) => {
259 let mut ftys
= def
.struct_variant().fields
.iter().map(|field
| {
260 monomorphize
::field_ty(cx
.tcx(), substs
, field
)
261 }).collect
::<Vec
<_
>>();
262 let packed
= cx
.tcx().lookup_packed(def
.did
);
263 // FIXME(16758) don't add a drop flag to unsized structs, as it
264 // won't actually be in the location we say it is because it'll be after
265 // the unsized field. Several other pieces of code assume that the unsized
266 // field is definitely the last one.
267 let dtor
= def
.dtor_kind().has_drop_flag() && type_is_sized(cx
.tcx(), t
);
269 ftys
.push(cx
.tcx().dtor_type());
272 Univariant(mk_struct(cx
, &ftys
[..], packed
, t
), dtor_to_init_u8(dtor
))
274 ty
::TyClosure(_
, ref substs
) => {
275 Univariant(mk_struct(cx
, &substs
.upvar_tys
, false, t
), 0)
277 ty
::TyEnum(def
, substs
) => {
278 let cases
= get_cases(cx
.tcx(), def
, substs
);
279 let hint
= *cx
.tcx().lookup_repr_hints(def
.did
).get(0)
280 .unwrap_or(&attr
::ReprAny
);
282 let dtor
= def
.dtor_kind().has_drop_flag();
284 if cases
.is_empty() {
285 // Uninhabitable; represent as unit
286 // (Typechecking will reject discriminant-sizing attrs.)
287 assert_eq
!(hint
, attr
::ReprAny
);
288 let ftys
= if dtor { vec!(cx.tcx().dtor_type()) }
else { vec!() }
;
289 return Univariant(mk_struct(cx
, &ftys
[..], false, t
),
290 dtor_to_init_u8(dtor
));
293 if !dtor
&& cases
.iter().all(|c
| c
.tys
.is_empty()) {
294 // All bodies empty -> intlike
295 let discrs
: Vec
<_
> = cases
.iter().map(|c
| Disr
::from(c
.discr
)).collect();
296 let bounds
= IntBounds
{
297 ulo
: discrs
.iter().min().unwrap().0,
298 uhi
: discrs
.iter().max().unwrap().0,
299 slo
: discrs
.iter().map(|n
| n
.0 as i64).min().unwrap(),
300 shi
: discrs
.iter().map(|n
| n
.0 as i64).max().unwrap()
302 return mk_cenum(cx
, hint
, &bounds
);
305 // Since there's at least one
306 // non-empty body, explicit discriminants should have
307 // been rejected by a checker before this point.
308 if !cases
.iter().enumerate().all(|(i
,c
)| c
.discr
== Disr
::from(i
)) {
309 cx
.sess().bug(&format
!("non-C-like enum {} with specified \
311 cx
.tcx().item_path_str(def
.did
)));
314 if cases
.len() == 1 {
315 // Equivalent to a struct/tuple/newtype.
316 // (Typechecking will reject discriminant-sizing attrs.)
317 assert_eq
!(hint
, attr
::ReprAny
);
318 let mut ftys
= cases
[0].tys
.clone();
319 if dtor { ftys.push(cx.tcx().dtor_type()); }
320 return Univariant(mk_struct(cx
, &ftys
[..], false, t
),
321 dtor_to_init_u8(dtor
));
324 if !dtor
&& cases
.len() == 2 && hint
== attr
::ReprAny
{
325 // Nullable pointer optimization
328 if cases
[1 - discr
].is_zerolen(cx
, t
) {
329 let st
= mk_struct(cx
, &cases
[discr
].tys
,
331 match cases
[discr
].find_ptr(cx
) {
332 Some(ref df
) if df
.len() == 1 && st
.fields
.len() == 1 => {
333 return RawNullablePointer
{
334 nndiscr
: Disr
::from(discr
),
336 nullfields
: cases
[1 - discr
].tys
.clone()
339 Some(mut discrfield
) => {
341 discrfield
.reverse();
342 return StructWrappedNullablePointer
{
343 nndiscr
: Disr
::from(discr
),
345 discrfield
: discrfield
,
346 nullfields
: cases
[1 - discr
].tys
.clone()
357 assert
!((cases
.len() - 1) as i64 >= 0);
358 let bounds
= IntBounds
{ ulo
: 0, uhi
: (cases
.len() - 1) as u64,
359 slo
: 0, shi
: (cases
.len() - 1) as i64 };
360 let min_ity
= range_to_inttype(cx
, hint
, &bounds
);
362 // Create the set of structs that represent each variant
363 // Use the minimum integer type we figured out above
364 let fields
: Vec
<_
> = cases
.iter().map(|c
| {
365 let mut ftys
= vec
!(ty_of_inttype(cx
.tcx(), min_ity
));
366 ftys
.extend_from_slice(&c
.tys
);
367 if dtor { ftys.push(cx.tcx().dtor_type()); }
368 mk_struct(cx
, &ftys
, false, t
)
372 // Check to see if we should use a different type for the
373 // discriminant. If the overall alignment of the type is
374 // the same as the first field in each variant, we can safely use
375 // an alignment-sized type.
376 // We increase the size of the discriminant to avoid LLVM copying
377 // padding when it doesn't need to. This normally causes unaligned
378 // load/stores and excessive memcpy/memset operations. By using a
379 // bigger integer size, LLVM can be sure about it's contents and
380 // won't be so conservative.
381 // This check is needed to avoid increasing the size of types when
382 // the alignment of the first field is smaller than the overall
383 // alignment of the type.
384 let (_
, align
) = union_size_and_align(&fields
);
385 let mut use_align
= true;
387 // Get the first non-zero-sized field
388 let field
= st
.fields
.iter().skip(1).filter(|ty
| {
389 let t
= type_of
::sizing_type_of(cx
, **ty
);
390 machine
::llsize_of_real(cx
, t
) != 0 ||
391 // This case is only relevant for zero-sized types with large alignment
392 machine
::llalign_of_min(cx
, t
) != 1
395 if let Some(field
) = field
{
396 let field_align
= type_of
::align_of(cx
, *field
);
397 if field_align
!= align
{
403 let ity
= if use_align
{
404 // Use the overall alignment
406 1 => attr
::UnsignedInt(ast
::UintTy
::U8
),
407 2 => attr
::UnsignedInt(ast
::UintTy
::U16
),
408 4 => attr
::UnsignedInt(ast
::UintTy
::U32
),
409 8 if machine
::llalign_of_min(cx
, Type
::i64(cx
)) == 8 =>
410 attr
::UnsignedInt(ast
::UintTy
::U64
),
411 _
=> min_ity
// use min_ity as a fallback
417 let fields
: Vec
<_
> = cases
.iter().map(|c
| {
418 let mut ftys
= vec
!(ty_of_inttype(cx
.tcx(), ity
));
419 ftys
.extend_from_slice(&c
.tys
);
420 if dtor { ftys.push(cx.tcx().dtor_type()); }
421 mk_struct(cx
, &ftys
[..], false, t
)
424 ensure_enum_fits_in_address_space(cx
, &fields
[..], t
);
426 General(ity
, fields
, dtor_to_init_u8(dtor
))
428 _
=> cx
.sess().bug(&format
!("adt::represent_type called on non-ADT type: {}", t
))
432 // this should probably all be in ty
438 /// This represents the (GEP) indices to follow to get to the discriminant field
439 pub type DiscrField
= Vec
<usize>;
441 fn find_discr_field_candidate
<'tcx
>(tcx
: &ty
::ctxt
<'tcx
>,
443 mut path
: DiscrField
) -> Option
<DiscrField
> {
445 // Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
446 ty
::TyRef(_
, ty
::TypeAndMut { ty, .. }
) | ty
::TyBox(ty
) if !type_is_sized(tcx
, ty
) => {
447 path
.push(FAT_PTR_ADDR
);
451 // Regular thin pointer: &T/&mut T/Box<T>
452 ty
::TyRef(..) | ty
::TyBox(..) => Some(path
),
454 // Functions are just pointers
455 ty
::TyBareFn(..) => Some(path
),
457 // Is this the NonZero lang item wrapping a pointer or integer type?
458 ty
::TyStruct(def
, substs
) if Some(def
.did
) == tcx
.lang_items
.non_zero() => {
459 let nonzero_fields
= &def
.struct_variant().fields
;
460 assert_eq
!(nonzero_fields
.len(), 1);
461 let field_ty
= monomorphize
::field_ty(tcx
, substs
, &nonzero_fields
[0]);
463 ty
::TyRawPtr(ty
::TypeAndMut { ty, .. }
) if !type_is_sized(tcx
, ty
) => {
464 path
.extend_from_slice(&[0, FAT_PTR_ADDR
]);
467 ty
::TyRawPtr(..) | ty
::TyInt(..) | ty
::TyUint(..) => {
475 // Perhaps one of the fields of this struct is non-zero
476 // let's recurse and find out
477 ty
::TyStruct(def
, substs
) => {
478 for (j
, field
) in def
.struct_variant().fields
.iter().enumerate() {
479 let field_ty
= monomorphize
::field_ty(tcx
, substs
, field
);
480 if let Some(mut fpath
) = find_discr_field_candidate(tcx
, field_ty
, path
.clone()) {
488 // Perhaps one of the upvars of this struct is non-zero
489 // Let's recurse and find out!
490 ty
::TyClosure(_
, ref substs
) => {
491 for (j
, &ty
) in substs
.upvar_tys
.iter().enumerate() {
492 if let Some(mut fpath
) = find_discr_field_candidate(tcx
, ty
, path
.clone()) {
500 // Can we use one of the fields in this tuple?
501 ty
::TyTuple(ref tys
) => {
502 for (j
, &ty
) in tys
.iter().enumerate() {
503 if let Some(mut fpath
) = find_discr_field_candidate(tcx
, ty
, path
.clone()) {
511 // Is this a fixed-size array of something non-zero
512 // with at least one element?
513 ty
::TyArray(ety
, d
) if d
> 0 => {
514 if let Some(mut vpath
) = find_discr_field_candidate(tcx
, ety
, path
) {
522 // Anything else is not a pointer
527 impl<'tcx
> Case
<'tcx
> {
528 fn is_zerolen
<'a
>(&self, cx
: &CrateContext
<'a
, 'tcx
>, scapegoat
: Ty
<'tcx
>) -> bool
{
529 mk_struct(cx
, &self.tys
, false, scapegoat
).size
== 0
532 fn find_ptr
<'a
>(&self, cx
: &CrateContext
<'a
, 'tcx
>) -> Option
<DiscrField
> {
533 for (i
, &ty
) in self.tys
.iter().enumerate() {
534 if let Some(mut path
) = find_discr_field_candidate(cx
.tcx(), ty
, vec
![]) {
543 fn get_cases
<'tcx
>(tcx
: &ty
::ctxt
<'tcx
>,
544 adt
: ty
::AdtDef
<'tcx
>,
545 substs
: &subst
::Substs
<'tcx
>)
547 adt
.variants
.iter().map(|vi
| {
548 let field_tys
= vi
.fields
.iter().map(|field
| {
549 monomorphize
::field_ty(tcx
, substs
, field
)
551 Case { discr: Disr::from(vi.disr_val), tys: field_tys }
555 fn mk_struct
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
556 tys
: &[Ty
<'tcx
>], packed
: bool
,
559 let sized
= tys
.iter().all(|&ty
| type_is_sized(cx
.tcx(), ty
));
560 let lltys
: Vec
<Type
> = if sized
{
561 tys
.iter().map(|&ty
| type_of
::sizing_type_of(cx
, ty
)).collect()
563 tys
.iter().filter(|&ty
| type_is_sized(cx
.tcx(), *ty
))
564 .map(|&ty
| type_of
::sizing_type_of(cx
, ty
)).collect()
567 ensure_struct_fits_in_address_space(cx
, &lltys
[..], packed
, scapegoat
);
569 let llty_rec
= Type
::struct_(cx
, &lltys
[..], packed
);
571 size
: machine
::llsize_of_alloc(cx
, llty_rec
),
572 align
: machine
::llalign_of_min(cx
, llty_rec
),
575 fields
: tys
.to_vec(),
587 fn mk_cenum
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
588 hint
: Hint
, bounds
: &IntBounds
)
590 let it
= range_to_inttype(cx
, hint
, bounds
);
592 attr
::SignedInt(_
) => CEnum(it
, Disr(bounds
.slo
as u64), Disr(bounds
.shi
as u64)),
593 attr
::UnsignedInt(_
) => CEnum(it
, Disr(bounds
.ulo
), Disr(bounds
.uhi
))
597 fn range_to_inttype(cx
: &CrateContext
, hint
: Hint
, bounds
: &IntBounds
) -> IntType
{
598 debug
!("range_to_inttype: {:?} {:?}", hint
, bounds
);
599 // Lists of sizes to try. u64 is always allowed as a fallback.
600 #[allow(non_upper_case_globals)]
601 const choose_shortest
: &'
static [IntType
] = &[
602 attr
::UnsignedInt(ast
::UintTy
::U8
), attr
::SignedInt(ast
::IntTy
::I8
),
603 attr
::UnsignedInt(ast
::UintTy
::U16
), attr
::SignedInt(ast
::IntTy
::I16
),
604 attr
::UnsignedInt(ast
::UintTy
::U32
), attr
::SignedInt(ast
::IntTy
::I32
)];
605 #[allow(non_upper_case_globals)]
606 const at_least_32
: &'
static [IntType
] = &[
607 attr
::UnsignedInt(ast
::UintTy
::U32
), attr
::SignedInt(ast
::IntTy
::I32
)];
611 attr
::ReprInt(span
, ity
) => {
612 if !bounds_usable(cx
, ity
, bounds
) {
613 cx
.sess().span_bug(span
, "representation hint insufficient for discriminant range")
617 attr
::ReprExtern
=> {
618 attempts
= match &cx
.sess().target
.target
.arch
[..] {
619 // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32`
620 // appears to be used on Linux and NetBSD, but some systems may use the variant
621 // corresponding to `choose_shortest`. However, we don't run on those yet...?
622 "arm" => at_least_32
,
627 attempts
= choose_shortest
;
629 attr
::ReprPacked
=> {
630 cx
.tcx().sess
.bug("range_to_inttype: found ReprPacked on an enum");
633 cx
.tcx().sess
.bug("range_to_inttype: found ReprSimd on an enum");
636 for &ity
in attempts
{
637 if bounds_usable(cx
, ity
, bounds
) {
641 return attr
::UnsignedInt(ast
::UintTy
::U64
);
644 pub fn ll_inttype(cx
: &CrateContext
, ity
: IntType
) -> Type
{
646 attr
::SignedInt(t
) => Type
::int_from_ty(cx
, t
),
647 attr
::UnsignedInt(t
) => Type
::uint_from_ty(cx
, t
)
651 fn bounds_usable(cx
: &CrateContext
, ity
: IntType
, bounds
: &IntBounds
) -> bool
{
652 debug
!("bounds_usable: {:?} {:?}", ity
, bounds
);
654 attr
::SignedInt(_
) => {
655 let lllo
= C_integral(ll_inttype(cx
, ity
), bounds
.slo
as u64, true);
656 let llhi
= C_integral(ll_inttype(cx
, ity
), bounds
.shi
as u64, true);
657 bounds
.slo
== const_to_int(lllo
) as i64 && bounds
.shi
== const_to_int(llhi
) as i64
659 attr
::UnsignedInt(_
) => {
660 let lllo
= C_integral(ll_inttype(cx
, ity
), bounds
.ulo
, false);
661 let llhi
= C_integral(ll_inttype(cx
, ity
), bounds
.uhi
, false);
662 bounds
.ulo
== const_to_uint(lllo
) as u64 && bounds
.uhi
== const_to_uint(llhi
) as u64
667 pub fn ty_of_inttype
<'tcx
>(tcx
: &ty
::ctxt
<'tcx
>, ity
: IntType
) -> Ty
<'tcx
> {
669 attr
::SignedInt(t
) => tcx
.mk_mach_int(t
),
670 attr
::UnsignedInt(t
) => tcx
.mk_mach_uint(t
)
674 // LLVM doesn't like types that don't fit in the address space
675 fn ensure_struct_fits_in_address_space
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
678 scapegoat
: Ty
<'tcx
>) {
680 for &llty
in fields
{
681 // Invariant: offset < ccx.obj_size_bound() <= 1<<61
683 let type_align
= machine
::llalign_of_min(ccx
, llty
);
684 offset
= roundup(offset
, type_align
);
686 // type_align is a power-of-2, so still offset < ccx.obj_size_bound()
687 // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound()
688 // so the sum is less than 1<<62 (and therefore can't overflow).
689 offset
+= machine
::llsize_of_alloc(ccx
, llty
);
691 if offset
>= ccx
.obj_size_bound() {
692 ccx
.report_overbig_object(scapegoat
);
697 fn union_size_and_align(sts
: &[Struct
]) -> (machine
::llsize
, machine
::llalign
) {
698 let size
= sts
.iter().map(|st
| st
.size
).max().unwrap();
699 let align
= sts
.iter().map(|st
| st
.align
).max().unwrap();
700 (roundup(size
, align
), align
)
703 fn ensure_enum_fits_in_address_space
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
705 scapegoat
: Ty
<'tcx
>) {
706 let (total_size
, _
) = union_size_and_align(fields
);
708 if total_size
>= ccx
.obj_size_bound() {
709 ccx
.report_overbig_object(scapegoat
);
714 /// LLVM-level types are a little complicated.
716 /// C-like enums need to be actual ints, not wrapped in a struct,
717 /// because that changes the ABI on some platforms (see issue #10308).
719 /// For nominal types, in some cases, we need to use LLVM named structs
720 /// and fill in the actual contents in a second pass to prevent
721 /// unbounded recursion; see also the comments in `trans::type_of`.
722 pub fn type_of
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>, r
: &Repr
<'tcx
>) -> Type
{
723 let c
= generic_type_of(cx
, r
, None
, false, false, false);
724 assert
!(!c
.needs_drop_flag
);
729 // Pass dst=true if the type you are passing is a DST. Yes, we could figure
730 // this out, but if you call this on an unsized type without realising it, you
731 // are going to get the wrong type (it will not include the unsized parts of it).
732 pub fn sizing_type_of
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
733 r
: &Repr
<'tcx
>, dst
: bool
) -> Type
{
734 let c
= generic_type_of(cx
, r
, None
, true, dst
, false);
735 assert
!(!c
.needs_drop_flag
);
738 pub fn sizing_type_context_of
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
739 r
: &Repr
<'tcx
>, dst
: bool
) -> TypeContext
{
740 generic_type_of(cx
, r
, None
, true, dst
, true)
742 pub fn incomplete_type_of
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
743 r
: &Repr
<'tcx
>, name
: &str) -> Type
{
744 let c
= generic_type_of(cx
, r
, Some(name
), false, false, false);
745 assert
!(!c
.needs_drop_flag
);
748 pub fn finish_type_of
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
749 r
: &Repr
<'tcx
>, llty
: &mut Type
) {
751 CEnum(..) | General(..) | RawNullablePointer { .. }
=> { }
752 Univariant(ref st
, _
) | StructWrappedNullablePointer { nonnull: ref st, .. }
=>
753 llty
.set_struct_body(&struct_llfields(cx
, st
, false, false),
758 fn generic_type_of
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>,
763 delay_drop_flag
: bool
) -> TypeContext
{
764 debug
!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}",
765 r
, name
, sizing
, dst
, delay_drop_flag
);
767 CEnum(ity
, _
, _
) => TypeContext
::direct(ll_inttype(cx
, ity
)),
768 RawNullablePointer { nnty, .. }
=>
769 TypeContext
::direct(type_of
::sizing_type_of(cx
, nnty
)),
770 StructWrappedNullablePointer { nonnull: ref st, .. }
=> {
774 Type
::struct_(cx
, &struct_llfields(cx
, st
, sizing
, dst
),
778 assert_eq
!(sizing
, false);
779 TypeContext
::direct(Type
::named_struct(cx
, name
))
783 Univariant(ref st
, dtor_needed
) => {
784 let dtor_needed
= dtor_needed
!= 0;
787 let mut fields
= struct_llfields(cx
, st
, sizing
, dst
);
788 if delay_drop_flag
&& dtor_needed
{
791 TypeContext
::may_need_drop_flag(
792 Type
::struct_(cx
, &fields
,
794 delay_drop_flag
&& dtor_needed
)
797 // Hypothesis: named_struct's can never need a
798 // drop flag. (... needs validation.)
799 assert_eq
!(sizing
, false);
800 TypeContext
::direct(Type
::named_struct(cx
, name
))
804 General(ity
, ref sts
, dtor_needed
) => {
805 let dtor_needed
= dtor_needed
!= 0;
806 // We need a representation that has:
807 // * The alignment of the most-aligned field
808 // * The size of the largest variant (rounded up to that alignment)
809 // * No alignment padding anywhere any variant has actual data
810 // (currently matters only for enums small enough to be immediate)
811 // * The discriminant in an obvious place.
813 // So we start with the discriminant, pad it up to the alignment with
814 // more of its own type, then use alignment-sized ints to get the rest
817 // FIXME #10604: this breaks when vector types are present.
818 let (size
, align
) = union_size_and_align(&sts
[..]);
819 let align_s
= align
as u64;
820 assert_eq
!(size
% align_s
, 0);
821 let align_units
= size
/ align_s
- 1;
823 let discr_ty
= ll_inttype(cx
, ity
);
824 let discr_size
= machine
::llsize_of_alloc(cx
, discr_ty
);
825 let fill_ty
= match align_s
{
826 1 => Type
::array(&Type
::i8(cx
), align_units
),
827 2 => Type
::array(&Type
::i16(cx
), align_units
),
828 4 => Type
::array(&Type
::i32(cx
), align_units
),
829 8 if machine
::llalign_of_min(cx
, Type
::i64(cx
)) == 8 =>
830 Type
::array(&Type
::i64(cx
), align_units
),
831 a
if a
.count_ones() == 1 => Type
::array(&Type
::vector(&Type
::i32(cx
), a
/ 4),
833 _
=> panic
!("unsupported enum alignment: {}", align
)
835 assert_eq
!(machine
::llalign_of_min(cx
, fill_ty
), align
);
836 assert_eq
!(align_s
% discr_size
, 0);
837 let mut fields
: Vec
<Type
> =
839 Type
::array(&discr_ty
, align_s
/ discr_size
- 1),
840 fill_ty
].iter().cloned().collect();
841 if delay_drop_flag
&& dtor_needed
{
846 TypeContext
::may_need_drop_flag(
847 Type
::struct_(cx
, &fields
[..], false),
848 delay_drop_flag
&& dtor_needed
)
851 let mut llty
= Type
::named_struct(cx
, name
);
852 llty
.set_struct_body(&fields
[..], false);
853 TypeContext
::may_need_drop_flag(
855 delay_drop_flag
&& dtor_needed
)
862 fn struct_llfields
<'a
, 'tcx
>(cx
: &CrateContext
<'a
, 'tcx
>, st
: &Struct
<'tcx
>,
863 sizing
: bool
, dst
: bool
) -> Vec
<Type
> {
865 st
.fields
.iter().filter(|&ty
| !dst
|| type_is_sized(cx
.tcx(), *ty
))
866 .map(|&ty
| type_of
::sizing_type_of(cx
, ty
)).collect()
868 st
.fields
.iter().map(|&ty
| type_of
::in_memory_type_of(cx
, ty
)).collect()
872 /// Obtain a representation of the discriminant sufficient to translate
873 /// destructuring; this may or may not involve the actual discriminant.
875 /// This should ideally be less tightly tied to `_match`.
876 pub fn trans_switch
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
880 -> (_match
::BranchKind
, Option
<ValueRef
>) {
882 CEnum(..) | General(..) |
883 RawNullablePointer { .. }
| StructWrappedNullablePointer { .. }
=> {
884 (_match
::Switch
, Some(trans_get_discr(bcx
, r
, scrutinee
, None
,
888 // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
889 (_match
::Single
, None
)
894 pub fn is_discr_signed
<'tcx
>(r
: &Repr
<'tcx
>) -> bool
{
896 CEnum(ity
, _
, _
) => ity
.is_signed(),
897 General(ity
, _
, _
) => ity
.is_signed(),
898 Univariant(..) => false,
899 RawNullablePointer { .. }
=> false,
900 StructWrappedNullablePointer { .. }
=> false,
904 /// Obtain the actual discriminant of a value.
905 pub fn trans_get_discr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, r
: &Repr
<'tcx
>,
906 scrutinee
: ValueRef
, cast_to
: Option
<Type
>,
909 debug
!("trans_get_discr r: {:?}", r
);
911 CEnum(ity
, min
, max
) => {
912 load_discr(bcx
, ity
, scrutinee
, min
, max
, range_assert
)
914 General(ity
, ref cases
, _
) => {
915 let ptr
= StructGEP(bcx
, scrutinee
, 0);
916 load_discr(bcx
, ity
, ptr
, Disr(0), Disr(cases
.len() as u64 - 1),
919 Univariant(..) => C_u8(bcx
.ccx(), 0),
920 RawNullablePointer { nndiscr, nnty, .. }
=> {
921 let cmp
= if nndiscr
== Disr(0) { IntEQ }
else { IntNE }
;
922 let llptrty
= type_of
::sizing_type_of(bcx
.ccx(), nnty
);
923 ICmp(bcx
, cmp
, Load(bcx
, scrutinee
), C_null(llptrty
), DebugLoc
::None
)
925 StructWrappedNullablePointer { nndiscr, ref discrfield, .. }
=> {
926 struct_wrapped_nullable_bitdiscr(bcx
, nndiscr
, discrfield
, scrutinee
)
931 Some(llty
) => if is_discr_signed(r
) { SExt(bcx, val, llty) }
else { ZExt(bcx, val, llty) }
935 fn struct_wrapped_nullable_bitdiscr(bcx
: Block
, nndiscr
: Disr
, discrfield
: &DiscrField
,
936 scrutinee
: ValueRef
) -> ValueRef
{
937 let llptrptr
= GEPi(bcx
, scrutinee
, &discrfield
[..]);
938 let llptr
= Load(bcx
, llptrptr
);
939 let cmp
= if nndiscr
== Disr(0) { IntEQ }
else { IntNE }
;
940 ICmp(bcx
, cmp
, llptr
, C_null(val_ty(llptr
)), DebugLoc
::None
)
943 /// Helper for cases where the discriminant is simply loaded.
944 fn load_discr(bcx
: Block
, ity
: IntType
, ptr
: ValueRef
, min
: Disr
, max
: Disr
,
947 let llty
= ll_inttype(bcx
.ccx(), ity
);
948 assert_eq
!(val_ty(ptr
), llty
.ptr_to());
949 let bits
= machine
::llbitsize_of_real(bcx
.ccx(), llty
);
951 let bits
= bits
as usize;
952 let mask
= Disr(!0u64 >> (64 - bits
));
953 // For a (max) discr of -1, max will be `-1 as usize`, which overflows.
954 // However, that is fine here (it would still represent the full range),
955 if max
.wrapping_add(Disr(1)) & mask
== min
& mask
|| !range_assert
{
956 // i.e., if the range is everything. The lo==hi case would be
957 // rejected by the LLVM verifier (it would mean either an
958 // empty set, which is impossible, or the entire range of the
959 // type, which is pointless).
962 // llvm::ConstantRange can deal with ranges that wrap around,
963 // so an overflow on (max + 1) is fine.
964 LoadRangeAssert(bcx
, ptr
, min
.0, max
.0.wrapping_add(1), /* signed: */ True
)
968 /// Yield information about how to dispatch a case of the
969 /// discriminant-like value returned by `trans_switch`.
971 /// This should ideally be less tightly tied to `_match`.
972 pub fn trans_case
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, r
: &Repr
, discr
: Disr
)
975 CEnum(ity
, _
, _
) => {
976 C_integral(ll_inttype(bcx
.ccx(), ity
), discr
.0, true)
978 General(ity
, _
, _
) => {
979 C_integral(ll_inttype(bcx
.ccx(), ity
), discr
.0, true)
982 bcx
.ccx().sess().bug("no cases for univariants or structs")
984 RawNullablePointer { .. }
|
985 StructWrappedNullablePointer { .. }
=> {
986 assert
!(discr
== Disr(0) || discr
== Disr(1));
987 C_bool(bcx
.ccx(), discr
!= Disr(0))
992 /// Set the discriminant for a new value of the given case of the given
994 pub fn trans_set_discr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, r
: &Repr
<'tcx
>,
995 val
: ValueRef
, discr
: Disr
) {
997 CEnum(ity
, min
, max
) => {
998 assert_discr_in_range(ity
, min
, max
, discr
);
999 Store(bcx
, C_integral(ll_inttype(bcx
.ccx(), ity
), discr
.0, true),
1002 General(ity
, ref cases
, dtor
) => {
1003 if dtor_active(dtor
) {
1004 let ptr
= trans_field_ptr(bcx
, r
, MaybeSizedValue
::sized(val
), discr
,
1005 cases
[discr
.0 as usize].fields
.len() - 2);
1006 Store(bcx
, C_u8(bcx
.ccx(), DTOR_NEEDED
), ptr
);
1008 Store(bcx
, C_integral(ll_inttype(bcx
.ccx(), ity
), discr
.0, true),
1009 StructGEP(bcx
, val
, 0));
1011 Univariant(ref st
, dtor
) => {
1012 assert_eq
!(discr
, Disr(0));
1013 if dtor_active(dtor
) {
1014 Store(bcx
, C_u8(bcx
.ccx(), DTOR_NEEDED
),
1015 StructGEP(bcx
, val
, st
.fields
.len() - 1));
1018 RawNullablePointer { nndiscr, nnty, ..}
=> {
1019 if discr
!= nndiscr
{
1020 let llptrty
= type_of
::sizing_type_of(bcx
.ccx(), nnty
);
1021 Store(bcx
, C_null(llptrty
), val
);
1024 StructWrappedNullablePointer { nndiscr, ref discrfield, .. }
=> {
1025 if discr
!= nndiscr
{
1026 let llptrptr
= GEPi(bcx
, val
, &discrfield
[..]);
1027 let llptrty
= val_ty(llptrptr
).element_type();
1028 Store(bcx
, C_null(llptrty
), llptrptr
);
1034 fn assert_discr_in_range(ity
: IntType
, min
: Disr
, max
: Disr
, discr
: Disr
) {
1036 attr
::UnsignedInt(_
) => {
1037 assert
!(min
<= discr
);
1038 assert
!(discr
<= max
)
1040 attr
::SignedInt(_
) => {
1041 assert
!(min
.0 as i64 <= discr
.0 as i64);
1042 assert
!(discr
.0 as i64 <= max
.0 as i64);
1047 /// The number of fields in a given case; for use when obtaining this
1048 /// information from the type or definition is less convenient.
1049 pub fn num_args(r
: &Repr
, discr
: Disr
) -> usize {
1052 Univariant(ref st
, dtor
) => {
1053 assert_eq
!(discr
, Disr(0));
1054 st
.fields
.len() - (if dtor_active(dtor
) { 1 }
else { 0 }
)
1056 General(_
, ref cases
, dtor
) => {
1057 cases
[discr
.0 as usize].fields
.len() - 1 - (if dtor_active(dtor
) { 1 }
else { 0 }
)
1059 RawNullablePointer { nndiscr, ref nullfields, .. }
=> {
1060 if discr
== nndiscr { 1 }
else { nullfields.len() }
1062 StructWrappedNullablePointer
{ ref nonnull
, nndiscr
,
1063 ref nullfields
, .. } => {
1064 if discr
== nndiscr { nonnull.fields.len() }
else { nullfields.len() }
1069 /// Access a field, at a point when the value's case is known.
1070 pub fn trans_field_ptr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, r
: &Repr
<'tcx
>,
1071 val
: MaybeSizedValue
, discr
: Disr
, ix
: usize) -> ValueRef
{
1072 // Note: if this ever needs to generate conditionals (e.g., if we
1073 // decide to do some kind of cdr-coding-like non-unique repr
1074 // someday), it will need to return a possibly-new bcx as well.
1077 bcx
.ccx().sess().bug("element access in C-like enum")
1079 Univariant(ref st
, _dtor
) => {
1080 assert_eq
!(discr
, Disr(0));
1081 struct_field_ptr(bcx
, st
, val
, ix
, false)
1083 General(_
, ref cases
, _
) => {
1084 struct_field_ptr(bcx
, &cases
[discr
.0 as usize], val
, ix
+ 1, true)
1086 RawNullablePointer { nndiscr, ref nullfields, .. }
|
1087 StructWrappedNullablePointer { nndiscr, ref nullfields, .. }
if discr
!= nndiscr
=> {
1088 // The unit-like case might have a nonzero number of unit-like fields.
1089 // (e.d., Result of Either with (), as one side.)
1090 let ty
= type_of
::type_of(bcx
.ccx(), nullfields
[ix
]);
1091 assert_eq
!(machine
::llsize_of_alloc(bcx
.ccx(), ty
), 0);
1092 // The contents of memory at this pointer can't matter, but use
1093 // the value that's "reasonable" in case of pointer comparison.
1094 PointerCast(bcx
, val
.value
, ty
.ptr_to())
1096 RawNullablePointer { nndiscr, nnty, .. }
=> {
1098 assert_eq
!(discr
, nndiscr
);
1099 let ty
= type_of
::type_of(bcx
.ccx(), nnty
);
1100 PointerCast(bcx
, val
.value
, ty
.ptr_to())
1102 StructWrappedNullablePointer { ref nonnull, nndiscr, .. }
=> {
1103 assert_eq
!(discr
, nndiscr
);
1104 struct_field_ptr(bcx
, nonnull
, val
, ix
, false)
1109 pub fn struct_field_ptr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, st
: &Struct
<'tcx
>, val
: MaybeSizedValue
,
1110 ix
: usize, needs_cast
: bool
) -> ValueRef
{
1111 let ccx
= bcx
.ccx();
1112 let ptr_val
= if needs_cast
{
1113 let fields
= st
.fields
.iter().map(|&ty
| {
1114 type_of
::in_memory_type_of(ccx
, ty
)
1115 }).collect
::<Vec
<_
>>();
1116 let real_ty
= Type
::struct_(ccx
, &fields
[..], st
.packed
);
1117 PointerCast(bcx
, val
.value
, real_ty
.ptr_to())
1122 let fty
= st
.fields
[ix
];
1123 // Simple case - we can just GEP the field
1124 // * First field - Always aligned properly
1125 // * Packed struct - There is no alignment padding
1126 // * Field is sized - pointer is properly aligned already
1127 if ix
== 0 || st
.packed
|| type_is_sized(bcx
.tcx(), fty
) {
1128 return StructGEP(bcx
, ptr_val
, ix
);
1131 // If the type of the last field is [T] or str, then we don't need to do
1134 ty
::TySlice(..) | ty
::TyStr
=> {
1135 return StructGEP(bcx
, ptr_val
, ix
);
1140 // There's no metadata available, log the case and just do the GEP.
1141 if !val
.has_meta() {
1142 debug
!("Unsized field `{}`, of `{}` has no metadata for adjustment",
1144 bcx
.val_to_string(ptr_val
));
1145 return StructGEP(bcx
, ptr_val
, ix
);
1148 let dbloc
= DebugLoc
::None
;
1150 // We need to get the pointer manually now.
1151 // We do this by casting to a *i8, then offsetting it by the appropriate amount.
1152 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
1153 // because the field may have an arbitrary alignment in the LLVM representation
1157 // struct Foo<T: ?Sized> {
1162 // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
1163 // the `y` field has 16-bit alignment.
1165 let meta
= val
.meta
;
1167 // Calculate the unaligned offset of the unsized field.
1169 for &ty
in &st
.fields
[0..ix
] {
1170 let llty
= type_of
::sizing_type_of(ccx
, ty
);
1171 let type_align
= type_of
::align_of(ccx
, ty
);
1172 offset
= roundup(offset
, type_align
);
1173 offset
+= machine
::llsize_of_alloc(ccx
, llty
);
1175 let unaligned_offset
= C_uint(bcx
.ccx(), offset
);
1177 // Get the alignment of the field
1178 let (_
, align
) = glue
::size_and_align_of_dst(bcx
, fty
, meta
);
1180 // Bump the unaligned offset up to the appropriate alignment using the
1181 // following expression:
1183 // (unaligned offset + (align - 1)) & -align
1186 let align_sub_1
= Sub(bcx
, align
, C_uint(bcx
.ccx(), 1u64), dbloc
);
1187 let offset
= And(bcx
,
1188 Add(bcx
, unaligned_offset
, align_sub_1
, dbloc
),
1189 Neg(bcx
, align
, dbloc
),
1192 debug
!("struct_field_ptr: DST field offset: {}",
1193 bcx
.val_to_string(offset
));
1195 // Cast and adjust pointer
1196 let byte_ptr
= PointerCast(bcx
, ptr_val
, Type
::i8p(bcx
.ccx()));
1197 let byte_ptr
= GEP(bcx
, byte_ptr
, &[offset
]);
1199 // Finally, cast back to the type expected
1200 let ll_fty
= type_of
::in_memory_type_of(bcx
.ccx(), fty
);
1201 debug
!("struct_field_ptr: Field type is {}", ll_fty
.to_string());
1202 PointerCast(bcx
, byte_ptr
, ll_fty
.ptr_to())
1205 pub fn fold_variants
<'blk
, 'tcx
, F
>(bcx
: Block
<'blk
, 'tcx
>,
1209 -> Block
<'blk
, 'tcx
> where
1210 F
: FnMut(Block
<'blk
, 'tcx
>, &Struct
<'tcx
>, ValueRef
) -> Block
<'blk
, 'tcx
>,
1214 Univariant(ref st
, _
) => {
1217 General(ity
, ref cases
, _
) => {
1218 let ccx
= bcx
.ccx();
1220 // See the comments in trans/base.rs for more information (inside
1221 // iter_structural_ty), but the gist here is that if the enum's
1222 // discriminant is *not* in the range that we're expecting (in which
1223 // case we'll take the fall-through branch on the switch
1224 // instruction) then we can't just optimize this to an Unreachable
1227 // Currently we still have filling drop, so this means that the drop
1228 // glue for enums may be called when the enum has been paved over
1229 // with the "I've been dropped" value. In this case the default
1230 // branch of the switch instruction will actually be taken at
1231 // runtime, so the basic block isn't actually unreachable, so we
1232 // need to make it do something with defined behavior. In this case
1233 // we just return early from the function.
1235 // Note that this is also why the `trans_get_discr` below has
1236 // `false` to indicate that loading the discriminant should
1237 // not have a range assert.
1238 let ret_void_cx
= fcx
.new_temp_block("enum-variant-iter-ret-void");
1239 RetVoid(ret_void_cx
, DebugLoc
::None
);
1241 let discr_val
= trans_get_discr(bcx
, r
, value
, None
, false);
1242 let llswitch
= Switch(bcx
, discr_val
, ret_void_cx
.llbb
, cases
.len());
1243 let bcx_next
= fcx
.new_temp_block("enum-variant-iter-next");
1245 for (discr
, case
) in cases
.iter().enumerate() {
1246 let mut variant_cx
= fcx
.new_temp_block(
1247 &format
!("enum-variant-iter-{}", &discr
.to_string())
1249 let rhs_val
= C_integral(ll_inttype(ccx
, ity
), discr
as u64, true);
1250 AddCase(llswitch
, rhs_val
, variant_cx
.llbb
);
1252 let fields
= case
.fields
.iter().map(|&ty
|
1253 type_of
::type_of(bcx
.ccx(), ty
)).collect
::<Vec
<_
>>();
1254 let real_ty
= Type
::struct_(ccx
, &fields
[..], case
.packed
);
1255 let variant_value
= PointerCast(variant_cx
, value
, real_ty
.ptr_to());
1257 variant_cx
= f(variant_cx
, case
, variant_value
);
1258 Br(variant_cx
, bcx_next
.llbb
, DebugLoc
::None
);
1267 /// Access the struct drop flag, if present.
1268 pub fn trans_drop_flag_ptr
<'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
1271 -> datum
::DatumBlock
<'blk
, 'tcx
, datum
::Expr
>
1273 let tcx
= bcx
.tcx();
1274 let ptr_ty
= bcx
.tcx().mk_imm_ptr(tcx
.dtor_type());
1276 Univariant(ref st
, dtor
) if dtor_active(dtor
) => {
1277 let flag_ptr
= StructGEP(bcx
, val
, st
.fields
.len() - 1);
1278 datum
::immediate_rvalue_bcx(bcx
, flag_ptr
, ptr_ty
).to_expr_datumblock()
1280 General(_
, _
, dtor
) if dtor_active(dtor
) => {
1282 let custom_cleanup_scope
= fcx
.push_custom_cleanup_scope();
1283 let scratch
= unpack_datum
!(bcx
, datum
::lvalue_scratch_datum(
1284 bcx
, tcx
.dtor_type(), "drop_flag",
1285 InitAlloca
::Uninit("drop flag itself has no dtor"),
1286 cleanup
::CustomScope(custom_cleanup_scope
), (), |_
, bcx
, _
| {
1287 debug
!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}",
1292 bcx
= fold_variants(bcx
, r
, val
, |variant_cx
, st
, value
| {
1293 let ptr
= struct_field_ptr(variant_cx
, st
, MaybeSizedValue
::sized(value
),
1294 (st
.fields
.len() - 1), false);
1295 datum
::Datum
::new(ptr
, ptr_ty
, datum
::Lvalue
::new("adt::trans_drop_flag_ptr"))
1296 .store_to(variant_cx
, scratch
.val
)
1298 let expr_datum
= scratch
.to_expr_datum();
1299 fcx
.pop_custom_cleanup_scope(custom_cleanup_scope
);
1300 datum
::DatumBlock
::new(bcx
, expr_datum
)
1302 _
=> bcx
.ccx().sess().bug("tried to get drop flag of non-droppable type")
1306 /// Construct a constant value, suitable for initializing a
1307 /// GlobalVariable, given a case and constant values for its fields.
1308 /// Note that this may have a different LLVM type (and different
1309 /// alignment!) from the representation's `type_of`, so it needs a
1310 /// pointer cast before use.
1312 /// The LLVM type system does not directly support unions, and only
1313 /// pointers can be bitcast, so a constant (and, by extension, the
1314 /// GlobalVariable initialized by it) will have a type that can vary
1315 /// depending on which case of an enum it is.
1317 /// To understand the alignment situation, consider `enum E { V64(u64),
1318 /// V32(u32, u32) }` on Windows. The type has 8-byte alignment to
1319 /// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
1320 /// i32, i32}`, which is 4-byte aligned.
1322 /// Currently the returned value has the same size as the type, but
1323 /// this could be changed in the future to avoid allocating unnecessary
1324 /// space after values of shorter-than-maximum cases.
1325 pub fn trans_const
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>, r
: &Repr
<'tcx
>, discr
: Disr
,
1326 vals
: &[ValueRef
]) -> ValueRef
{
1328 CEnum(ity
, min
, max
) => {
1329 assert_eq
!(vals
.len(), 0);
1330 assert_discr_in_range(ity
, min
, max
, discr
);
1331 C_integral(ll_inttype(ccx
, ity
), discr
.0, true)
1333 General(ity
, ref cases
, _
) => {
1334 let case
= &cases
[discr
.0 as usize];
1335 let (max_sz
, _
) = union_size_and_align(&cases
[..]);
1336 let lldiscr
= C_integral(ll_inttype(ccx
, ity
), discr
.0 as u64, true);
1337 let mut f
= vec
![lldiscr
];
1338 f
.extend_from_slice(vals
);
1339 let mut contents
= build_const_struct(ccx
, case
, &f
[..]);
1340 contents
.extend_from_slice(&[padding(ccx
, max_sz
- case
.size
)]);
1341 C_struct(ccx
, &contents
[..], false)
1343 Univariant(ref st
, _dro
) => {
1344 assert_eq
!(discr
, Disr(0));
1345 let contents
= build_const_struct(ccx
, st
, vals
);
1346 C_struct(ccx
, &contents
[..], st
.packed
)
1348 RawNullablePointer { nndiscr, nnty, .. }
=> {
1349 if discr
== nndiscr
{
1350 assert_eq
!(vals
.len(), 1);
1353 C_null(type_of
::sizing_type_of(ccx
, nnty
))
1356 StructWrappedNullablePointer { ref nonnull, nndiscr, .. }
=> {
1357 if discr
== nndiscr
{
1358 C_struct(ccx
, &build_const_struct(ccx
,
1363 let vals
= nonnull
.fields
.iter().map(|&ty
| {
1364 // Always use null even if it's not the `discrfield`th
1365 // field; see #8506.
1366 C_null(type_of
::sizing_type_of(ccx
, ty
))
1367 }).collect
::<Vec
<ValueRef
>>();
1368 C_struct(ccx
, &build_const_struct(ccx
,
1377 /// Compute struct field offsets relative to struct begin.
1378 fn compute_struct_field_offsets
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1379 st
: &Struct
<'tcx
>) -> Vec
<u64> {
1380 let mut offsets
= vec
!();
1383 for &ty
in &st
.fields
{
1384 let llty
= type_of
::sizing_type_of(ccx
, ty
);
1386 let type_align
= type_of
::align_of(ccx
, ty
);
1387 offset
= roundup(offset
, type_align
);
1389 offsets
.push(offset
);
1390 offset
+= machine
::llsize_of_alloc(ccx
, llty
);
1392 assert_eq
!(st
.fields
.len(), offsets
.len());
1396 /// Building structs is a little complicated, because we might need to
1397 /// insert padding if a field's value is less aligned than its type.
1399 /// Continuing the example from `trans_const`, a value of type `(u32,
1400 /// E)` should have the `E` at offset 8, but if that field's
1401 /// initializer is 4-byte aligned then simply translating the tuple as
1402 /// a two-element struct will locate it at offset 4, and accesses to it
1403 /// will read the wrong memory.
1404 fn build_const_struct
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1405 st
: &Struct
<'tcx
>, vals
: &[ValueRef
])
1407 assert_eq
!(vals
.len(), st
.fields
.len());
1409 let target_offsets
= compute_struct_field_offsets(ccx
, st
);
1411 // offset of current value
1413 let mut cfields
= Vec
::new();
1414 for (&val
, target_offset
) in vals
.iter().zip(target_offsets
) {
1416 let val_align
= machine
::llalign_of_min(ccx
, val_ty(val
));
1417 offset
= roundup(offset
, val_align
);
1419 if offset
!= target_offset
{
1420 cfields
.push(padding(ccx
, target_offset
- offset
));
1421 offset
= target_offset
;
1423 assert
!(!is_undef(val
));
1425 offset
+= machine
::llsize_of_alloc(ccx
, val_ty(val
));
1428 assert
!(st
.sized
&& offset
<= st
.size
);
1429 if offset
!= st
.size
{
1430 cfields
.push(padding(ccx
, st
.size
- offset
));
1436 fn padding(ccx
: &CrateContext
, size
: u64) -> ValueRef
{
1437 C_undef(Type
::array(&Type
::i8(ccx
), size
))
1440 // FIXME this utility routine should be somewhere more general
1442 fn roundup(x
: u64, a
: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
1444 /// Get the discriminant of a constant value.
1445 pub fn const_get_discrim(ccx
: &CrateContext
, r
: &Repr
, val
: ValueRef
) -> Disr
{
1447 CEnum(ity
, _
, _
) => {
1449 attr
::SignedInt(..) => Disr(const_to_int(val
) as u64),
1450 attr
::UnsignedInt(..) => Disr(const_to_uint(val
)),
1453 General(ity
, _
, _
) => {
1455 attr
::SignedInt(..) => Disr(const_to_int(const_get_elt(ccx
, val
, &[0])) as u64),
1456 attr
::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(ccx
, val
, &[0])))
1459 Univariant(..) => Disr(0),
1460 RawNullablePointer { .. }
| StructWrappedNullablePointer { .. }
=> {
1461 ccx
.sess().bug("const discrim access of non c-like enum")
1466 /// Extract a field of a constant value, as appropriate for its
1469 /// (Not to be confused with `common::const_get_elt`, which operates on
1470 /// raw LLVM-level structs and arrays.)
1471 pub fn const_get_field(ccx
: &CrateContext
, r
: &Repr
, val
: ValueRef
,
1472 _discr
: Disr
, ix
: usize) -> ValueRef
{
1474 CEnum(..) => ccx
.sess().bug("element access in C-like enum const"),
1475 Univariant(..) => const_struct_field(ccx
, val
, ix
),
1476 General(..) => const_struct_field(ccx
, val
, ix
+ 1),
1477 RawNullablePointer { .. }
=> {
1481 StructWrappedNullablePointer{ .. }
=> const_struct_field(ccx
, val
, ix
)
1485 /// Extract field of struct-like const, skipping our alignment padding.
1486 fn const_struct_field(ccx
: &CrateContext
, val
: ValueRef
, ix
: usize) -> ValueRef
{
1487 // Get the ix-th non-undef element of the struct.
1488 let mut real_ix
= 0; // actual position in the struct
1489 let mut ix
= ix
; // logical index relative to real_ix
1493 field
= const_get_elt(ccx
, val
, &[real_ix
]);
1494 if !is_undef(field
) {
1497 real_ix
= real_ix
+ 1;
1503 real_ix
= real_ix
+ 1;