]> git.proxmox.com Git - rustc.git/blame - src/librustc_trans/trans/adt.rs
Imported Upstream version 1.3.0+dfsg1
[rustc.git] / src / librustc_trans / trans / adt.rs
CommitLineData
1a4d82fc
JJ
1// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11//! # Representation of Algebraic Data Types
12//!
13//! This module determines how to represent enums, structs, and tuples
14//! based on their monomorphized types; it is responsible both for
15//! choosing a representation and translating basic operations on
16//! values of those types. (Note: exporting the representations for
17//! debuggers is handled in debuginfo.rs, not here.)
18//!
19//! Note that the interface treats everything as a general case of an
20//! enum, so structs/tuples/etc. have one pseudo-variant with
21//! discriminant 0; i.e., as if they were a univariant enum.
22//!
23//! Having everything in one place will enable improvements to data
24//! structure representation; possibilities include:
25//!
26//! - User-specified alignment (e.g., cacheline-aligning parts of
27//! concurrently accessed data structures); LLVM can't represent this
28//! directly, so we'd have to insert padding fields in any structure
29//! that might contain one and adjust GEP indices accordingly. See
30//! issue #4578.
31//!
32//! - Store nested enums' discriminants in the same word. Rather, if
33//! some variants start with enums, and those enums representations
34//! have unused alignment padding between discriminant and body, the
35//! outer enum's discriminant can be stored there and those variants
36//! can start at offset 0. Kind of fancy, and might need work to
37//! make copies of the inner enum type cooperate, but it could help
38//! with `Option` or `Result` wrapped around another enum.
39//!
40//! - Tagged pointers would be neat, but given that any type can be
41//! used unboxed and any field can have pointers (including mutable)
42//! taken to it, implementing them for Rust seems difficult.
43
1a4d82fc
JJ
44pub use self::Repr::*;
45
1a4d82fc
JJ
46use std::rc::Rc;
47
48use llvm::{ValueRef, True, IntEQ, IntNE};
49use back::abi::FAT_PTR_ADDR;
50use middle::subst;
c1a9b12d 51use middle::ty::{self, Ty};
1a4d82fc
JJ
52use middle::ty::Disr;
53use syntax::ast;
54use syntax::attr;
55use syntax::attr::IntType;
56use trans::_match;
57use trans::build::*;
58use trans::cleanup;
59use trans::cleanup::CleanupMethods;
60use trans::common::*;
61use trans::datum;
85aaf69f 62use trans::debuginfo::DebugLoc;
1a4d82fc
JJ
63use trans::machine;
64use trans::monomorphize;
65use trans::type_::Type;
66use trans::type_of;
1a4d82fc
JJ
67
68type Hint = attr::ReprAttr;
69
70/// Representations.
85aaf69f 71#[derive(Eq, PartialEq, Debug)]
1a4d82fc
JJ
72pub enum Repr<'tcx> {
73 /// C-like enums; basically an int.
74 CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
75 /// Single-case variants, and structs/tuples/records.
76 ///
77 /// Structs with destructors need a dynamic destroyedness flag to
78 /// avoid running the destructor too many times; this is included
79 /// in the `Struct` if present.
c34b1796
AL
80 /// (The flag if nonzero, represents the initialization value to use;
81 /// if zero, then use no flag at all.)
82 Univariant(Struct<'tcx>, u8),
1a4d82fc
JJ
83 /// General-case enums: for each case there is a struct, and they
84 /// all start with a field for the discriminant.
85 ///
86 /// Types with destructors need a dynamic destroyedness flag to
87 /// avoid running the destructor too many times; the last argument
88 /// indicates whether such a flag is present.
c34b1796
AL
89 /// (The flag, if nonzero, represents the initialization value to use;
90 /// if zero, then use no flag at all.)
91 General(IntType, Vec<Struct<'tcx>>, u8),
1a4d82fc
JJ
92 /// Two cases distinguished by a nullable pointer: the case with discriminant
93 /// `nndiscr` must have single field which is known to be nonnull due to its type.
94 /// The other case is known to be zero sized. Hence we represent the enum
95 /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
96 /// otherwise it indicates the other case.
97 RawNullablePointer {
98 nndiscr: Disr,
99 nnty: Ty<'tcx>,
100 nullfields: Vec<Ty<'tcx>>
101 },
102 /// Two cases distinguished by a nullable pointer: the case with discriminant
103 /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
104 /// field is known to be nonnull due to its type; if that field is null, then
105 /// it represents the other case, which is inhabited by at most one value
106 /// (and all other fields are undefined/unused).
107 ///
108 /// For example, `std::option::Option` instantiated at a safe pointer type
109 /// is represented such that `None` is a null pointer and `Some` is the
110 /// identity function.
111 StructWrappedNullablePointer {
112 nonnull: Struct<'tcx>,
113 nndiscr: Disr,
114 discrfield: DiscrField,
115 nullfields: Vec<Ty<'tcx>>,
116 }
117}
118
119/// For structs, and struct-like parts of anything fancier.
85aaf69f 120#[derive(Eq, PartialEq, Debug)]
1a4d82fc
JJ
121pub struct Struct<'tcx> {
122 // If the struct is DST, then the size and alignment do not take into
123 // account the unsized fields of the struct.
124 pub size: u64,
125 pub align: u32,
126 pub sized: bool,
127 pub packed: bool,
128 pub fields: Vec<Ty<'tcx>>
129}
130
131/// Convenience for `represent_type`. There should probably be more or
132/// these, for places in trans where the `Ty` isn't directly
133/// available.
134pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
135 node: ast::NodeId) -> Rc<Repr<'tcx>> {
136 represent_type(bcx.ccx(), node_id_type(bcx, node))
137}
138
139/// Decides how to represent a given type.
140pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
d9579d0f
AL
141 t: Ty<'tcx>)
142 -> Rc<Repr<'tcx>> {
62682a34 143 debug!("Representing: {}", t);
1a4d82fc
JJ
144 match cx.adt_reprs().borrow().get(&t) {
145 Some(repr) => return repr.clone(),
146 None => {}
147 }
148
149 let repr = Rc::new(represent_type_uncached(cx, t));
150 debug!("Represented as: {:?}", repr);
151 cx.adt_reprs().borrow_mut().insert(t, repr.clone());
152 repr
153}
154
c34b1796
AL
155macro_rules! repeat_u8_as_u32 {
156 ($name:expr) => { (($name as u32) << 24 |
157 ($name as u32) << 16 |
158 ($name as u32) << 8 |
159 ($name as u32)) }
160}
161macro_rules! repeat_u8_as_u64 {
162 ($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
163 (repeat_u8_as_u32!($name) as u64)) }
164}
165
c1a9b12d
SL
166/// `DTOR_NEEDED_HINT` is a stack-local hint that just means
167/// "we do not know whether the destructor has run or not; check the
168/// drop-flag embedded in the value itself."
169pub const DTOR_NEEDED_HINT: u8 = 0x3d;
170
171/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
172/// definitely been moved; you do not need to run its destructor."
173///
174/// (However, for now, such values may still end up being explicitly
175/// zeroed by the generated code; this is the distinction between
176/// `datum::DropFlagInfo::ZeroAndMaintain` versus
177/// `datum::DropFlagInfo::DontZeroJustUse`.)
178pub const DTOR_MOVED_HINT: u8 = 0x2d;
179
c34b1796
AL
180pub const DTOR_NEEDED: u8 = 0xd4;
181pub const DTOR_NEEDED_U32: u32 = repeat_u8_as_u32!(DTOR_NEEDED);
182pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64!(DTOR_NEEDED);
183#[allow(dead_code)]
184pub fn dtor_needed_usize(ccx: &CrateContext) -> usize {
185 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
186 "32" => DTOR_NEEDED_U32 as usize,
187 "64" => DTOR_NEEDED_U64 as usize,
188 tws => panic!("Unsupported target word size for int: {}", tws),
189 }
190}
191
192pub const DTOR_DONE: u8 = 0x1d;
193pub const DTOR_DONE_U32: u32 = repeat_u8_as_u32!(DTOR_DONE);
194pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64!(DTOR_DONE);
195#[allow(dead_code)]
196pub fn dtor_done_usize(ccx: &CrateContext) -> usize {
197 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
198 "32" => DTOR_DONE_U32 as usize,
199 "64" => DTOR_DONE_U64 as usize,
200 tws => panic!("Unsupported target word size for int: {}", tws),
201 }
202}
203
204fn dtor_to_init_u8(dtor: bool) -> u8 {
205 if dtor { DTOR_NEEDED } else { 0 }
206}
207
208pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; }
209impl<'tcx> GetDtorType<'tcx> for ty::ctxt<'tcx> {
210 fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 }
211}
212
213fn dtor_active(flag: u8) -> bool {
214 flag != 0
215}
216
1a4d82fc
JJ
217fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
218 t: Ty<'tcx>) -> Repr<'tcx> {
219 match t.sty {
62682a34 220 ty::TyTuple(ref elems) => {
c34b1796 221 Univariant(mk_struct(cx, &elems[..], false, t), 0)
1a4d82fc 222 }
62682a34 223 ty::TyStruct(def_id, substs) => {
c1a9b12d 224 let fields = cx.tcx().lookup_struct_fields(def_id);
1a4d82fc 225 let mut ftys = fields.iter().map(|field| {
c1a9b12d 226 let fty = cx.tcx().lookup_field_type(def_id, field.id, substs);
1a4d82fc
JJ
227 monomorphize::normalize_associated_type(cx.tcx(), &fty)
228 }).collect::<Vec<_>>();
c1a9b12d
SL
229 let packed = cx.tcx().lookup_packed(def_id);
230 let dtor = cx.tcx().ty_dtor(def_id).has_drop_flag();
d9579d0f
AL
231 if dtor {
232 ftys.push(cx.tcx().dtor_type());
233 }
1a4d82fc 234
c34b1796 235 Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor))
1a4d82fc 236 }
c1a9b12d
SL
237 ty::TyClosure(_, ref substs) => {
238 Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0)
1a4d82fc 239 }
62682a34 240 ty::TyEnum(def_id, substs) => {
1a4d82fc 241 let cases = get_cases(cx.tcx(), def_id, substs);
c1a9b12d 242 let hint = *cx.tcx().lookup_repr_hints(def_id).get(0)
1a4d82fc
JJ
243 .unwrap_or(&attr::ReprAny);
244
c1a9b12d 245 let dtor = cx.tcx().ty_dtor(def_id).has_drop_flag();
1a4d82fc 246
9346a6ac 247 if cases.is_empty() {
1a4d82fc
JJ
248 // Uninhabitable; represent as unit
249 // (Typechecking will reject discriminant-sizing attrs.)
250 assert_eq!(hint, attr::ReprAny);
c34b1796 251 let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() };
85aaf69f 252 return Univariant(mk_struct(cx, &ftys[..], false, t),
c34b1796 253 dtor_to_init_u8(dtor));
1a4d82fc
JJ
254 }
255
9346a6ac 256 if !dtor && cases.iter().all(|c| c.tys.is_empty()) {
1a4d82fc
JJ
257 // All bodies empty -> intlike
258 let discrs: Vec<u64> = cases.iter().map(|c| c.discr).collect();
259 let bounds = IntBounds {
260 ulo: *discrs.iter().min().unwrap(),
261 uhi: *discrs.iter().max().unwrap(),
262 slo: discrs.iter().map(|n| *n as i64).min().unwrap(),
263 shi: discrs.iter().map(|n| *n as i64).max().unwrap()
264 };
265 return mk_cenum(cx, hint, &bounds);
266 }
267
268 // Since there's at least one
269 // non-empty body, explicit discriminants should have
270 // been rejected by a checker before this point.
271 if !cases.iter().enumerate().all(|(i,c)| c.discr == (i as Disr)) {
272 cx.sess().bug(&format!("non-C-like enum {} with specified \
c1a9b12d
SL
273 discriminants",
274 cx.tcx().item_path_str(def_id)));
1a4d82fc
JJ
275 }
276
277 if cases.len() == 1 {
278 // Equivalent to a struct/tuple/newtype.
279 // (Typechecking will reject discriminant-sizing attrs.)
280 assert_eq!(hint, attr::ReprAny);
281 let mut ftys = cases[0].tys.clone();
c34b1796 282 if dtor { ftys.push(cx.tcx().dtor_type()); }
85aaf69f 283 return Univariant(mk_struct(cx, &ftys[..], false, t),
c34b1796 284 dtor_to_init_u8(dtor));
1a4d82fc
JJ
285 }
286
287 if !dtor && cases.len() == 2 && hint == attr::ReprAny {
288 // Nullable pointer optimization
289 let mut discr = 0;
290 while discr < 2 {
291 if cases[1 - discr].is_zerolen(cx, t) {
c34b1796 292 let st = mk_struct(cx, &cases[discr].tys,
1a4d82fc
JJ
293 false, t);
294 match cases[discr].find_ptr(cx) {
295 Some(ref df) if df.len() == 1 && st.fields.len() == 1 => {
296 return RawNullablePointer {
297 nndiscr: discr as Disr,
298 nnty: st.fields[0],
299 nullfields: cases[1 - discr].tys.clone()
300 };
301 }
302 Some(mut discrfield) => {
303 discrfield.push(0);
304 discrfield.reverse();
305 return StructWrappedNullablePointer {
306 nndiscr: discr as Disr,
307 nonnull: st,
308 discrfield: discrfield,
309 nullfields: cases[1 - discr].tys.clone()
310 };
311 }
312 None => {}
313 }
314 }
315 discr += 1;
316 }
317 }
318
319 // The general case.
320 assert!((cases.len() - 1) as i64 >= 0);
321 let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64,
322 slo: 0, shi: (cases.len() - 1) as i64 };
323 let min_ity = range_to_inttype(cx, hint, &bounds);
324
325 // Create the set of structs that represent each variant
326 // Use the minimum integer type we figured out above
327 let fields : Vec<_> = cases.iter().map(|c| {
328 let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity));
85aaf69f 329 ftys.push_all(&c.tys);
c34b1796 330 if dtor { ftys.push(cx.tcx().dtor_type()); }
85aaf69f 331 mk_struct(cx, &ftys, false, t)
1a4d82fc
JJ
332 }).collect();
333
334
335 // Check to see if we should use a different type for the
336 // discriminant. If the overall alignment of the type is
337 // the same as the first field in each variant, we can safely use
338 // an alignment-sized type.
339 // We increase the size of the discriminant to avoid LLVM copying
340 // padding when it doesn't need to. This normally causes unaligned
341 // load/stores and excessive memcpy/memset operations. By using a
342 // bigger integer size, LLVM can be sure about it's contents and
343 // won't be so conservative.
344 // This check is needed to avoid increasing the size of types when
345 // the alignment of the first field is smaller than the overall
346 // alignment of the type.
85aaf69f 347 let (_, align) = union_size_and_align(&fields);
1a4d82fc 348 let mut use_align = true;
85aaf69f 349 for st in &fields {
1a4d82fc
JJ
350 // Get the first non-zero-sized field
351 let field = st.fields.iter().skip(1).filter(|ty| {
352 let t = type_of::sizing_type_of(cx, **ty);
353 machine::llsize_of_real(cx, t) != 0 ||
354 // This case is only relevant for zero-sized types with large alignment
355 machine::llalign_of_min(cx, t) != 1
356 }).next();
357
358 if let Some(field) = field {
359 let field_align = type_of::align_of(cx, *field);
360 if field_align != align {
361 use_align = false;
362 break;
363 }
364 }
365 }
366 let ity = if use_align {
367 // Use the overall alignment
368 match align {
369 1 => attr::UnsignedInt(ast::TyU8),
370 2 => attr::UnsignedInt(ast::TyU16),
371 4 => attr::UnsignedInt(ast::TyU32),
372 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 =>
373 attr::UnsignedInt(ast::TyU64),
374 _ => min_ity // use min_ity as a fallback
375 }
376 } else {
377 min_ity
378 };
379
380 let fields : Vec<_> = cases.iter().map(|c| {
381 let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity));
c34b1796
AL
382 ftys.push_all(&c.tys);
383 if dtor { ftys.push(cx.tcx().dtor_type()); }
85aaf69f 384 mk_struct(cx, &ftys[..], false, t)
1a4d82fc
JJ
385 }).collect();
386
85aaf69f 387 ensure_enum_fits_in_address_space(cx, &fields[..], t);
1a4d82fc 388
c34b1796 389 General(ity, fields, dtor_to_init_u8(dtor))
1a4d82fc 390 }
62682a34 391 _ => cx.sess().bug(&format!("adt::represent_type called on non-ADT type: {}", t))
1a4d82fc
JJ
392 }
393}
394
395// this should probably all be in ty
396struct Case<'tcx> {
397 discr: Disr,
398 tys: Vec<Ty<'tcx>>
399}
400
401/// This represents the (GEP) indices to follow to get to the discriminant field
c34b1796 402pub type DiscrField = Vec<usize>;
1a4d82fc
JJ
403
404fn find_discr_field_candidate<'tcx>(tcx: &ty::ctxt<'tcx>,
405 ty: Ty<'tcx>,
406 mut path: DiscrField) -> Option<DiscrField> {
407 match ty.sty {
408 // Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
c1a9b12d 409 ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => {
1a4d82fc
JJ
410 path.push(FAT_PTR_ADDR);
411 Some(path)
412 },
413
414 // Regular thin pointer: &T/&mut T/Box<T>
62682a34 415 ty::TyRef(..) | ty::TyBox(..) => Some(path),
1a4d82fc
JJ
416
417 // Functions are just pointers
62682a34 418 ty::TyBareFn(..) => Some(path),
1a4d82fc
JJ
419
420 // Is this the NonZero lang item wrapping a pointer or integer type?
62682a34 421 ty::TyStruct(did, substs) if Some(did) == tcx.lang_items.non_zero() => {
c1a9b12d 422 let nonzero_fields = tcx.lookup_struct_fields(did);
1a4d82fc 423 assert_eq!(nonzero_fields.len(), 1);
c1a9b12d 424 let nonzero_field = tcx.lookup_field_type(did, nonzero_fields[0].id, substs);
1a4d82fc 425 match nonzero_field.sty {
c1a9b12d 426 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => {
d9579d0f
AL
427 path.push_all(&[0, FAT_PTR_ADDR]);
428 Some(path)
429 },
62682a34 430 ty::TyRawPtr(..) | ty::TyInt(..) | ty::TyUint(..) => {
1a4d82fc
JJ
431 path.push(0);
432 Some(path)
433 },
434 _ => None
435 }
436 },
437
438 // Perhaps one of the fields of this struct is non-zero
439 // let's recurse and find out
62682a34 440 ty::TyStruct(def_id, substs) => {
c1a9b12d 441 let fields = tcx.lookup_struct_fields(def_id);
1a4d82fc 442 for (j, field) in fields.iter().enumerate() {
c1a9b12d 443 let field_ty = tcx.lookup_field_type(def_id, field.id, substs);
1a4d82fc
JJ
444 if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) {
445 fpath.push(j);
446 return Some(fpath);
447 }
448 }
449 None
450 },
451
d9579d0f
AL
452 // Perhaps one of the upvars of this struct is non-zero
453 // Let's recurse and find out!
c1a9b12d
SL
454 ty::TyClosure(_, ref substs) => {
455 for (j, &ty) in substs.upvar_tys.iter().enumerate() {
d9579d0f
AL
456 if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
457 fpath.push(j);
458 return Some(fpath);
459 }
460 }
461 None
462 },
463
1a4d82fc 464 // Can we use one of the fields in this tuple?
62682a34 465 ty::TyTuple(ref tys) => {
1a4d82fc
JJ
466 for (j, &ty) in tys.iter().enumerate() {
467 if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
468 fpath.push(j);
469 return Some(fpath);
470 }
471 }
472 None
473 },
474
475 // Is this a fixed-size array of something non-zero
476 // with at least one element?
62682a34 477 ty::TyArray(ety, d) if d > 0 => {
1a4d82fc
JJ
478 if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) {
479 vpath.push(0);
480 Some(vpath)
481 } else {
482 None
483 }
484 },
485
486 // Anything else is not a pointer
487 _ => None
488 }
489}
490
491impl<'tcx> Case<'tcx> {
492 fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool {
c34b1796 493 mk_struct(cx, &self.tys, false, scapegoat).size == 0
1a4d82fc
JJ
494 }
495
496 fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<DiscrField> {
497 for (i, &ty) in self.tys.iter().enumerate() {
498 if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) {
499 path.push(i);
500 return Some(path);
501 }
502 }
503 None
504 }
505}
506
507fn get_cases<'tcx>(tcx: &ty::ctxt<'tcx>,
508 def_id: ast::DefId,
509 substs: &subst::Substs<'tcx>)
510 -> Vec<Case<'tcx>> {
c1a9b12d 511 tcx.enum_variants(def_id).iter().map(|vi| {
1a4d82fc
JJ
512 let arg_tys = vi.args.iter().map(|&raw_ty| {
513 monomorphize::apply_param_substs(tcx, substs, &raw_ty)
514 }).collect();
515 Case { discr: vi.disr_val, tys: arg_tys }
516 }).collect()
517}
518
519fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
520 tys: &[Ty<'tcx>], packed: bool,
521 scapegoat: Ty<'tcx>)
522 -> Struct<'tcx> {
523 let sized = tys.iter().all(|&ty| type_is_sized(cx.tcx(), ty));
524 let lltys : Vec<Type> = if sized {
d9579d0f 525 tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
1a4d82fc
JJ
526 } else {
527 tys.iter().filter(|&ty| type_is_sized(cx.tcx(), *ty))
528 .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
529 };
530
85aaf69f 531 ensure_struct_fits_in_address_space(cx, &lltys[..], packed, scapegoat);
1a4d82fc 532
85aaf69f 533 let llty_rec = Type::struct_(cx, &lltys[..], packed);
1a4d82fc
JJ
534 Struct {
535 size: machine::llsize_of_alloc(cx, llty_rec),
536 align: machine::llalign_of_min(cx, llty_rec),
537 sized: sized,
538 packed: packed,
539 fields: tys.to_vec(),
540 }
541}
542
85aaf69f 543#[derive(Debug)]
1a4d82fc
JJ
544struct IntBounds {
545 slo: i64,
546 shi: i64,
547 ulo: u64,
548 uhi: u64
549}
550
551fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
552 hint: Hint, bounds: &IntBounds)
553 -> Repr<'tcx> {
554 let it = range_to_inttype(cx, hint, bounds);
555 match it {
556 attr::SignedInt(_) => CEnum(it, bounds.slo as Disr, bounds.shi as Disr),
557 attr::UnsignedInt(_) => CEnum(it, bounds.ulo, bounds.uhi)
558 }
559}
560
561fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType {
562 debug!("range_to_inttype: {:?} {:?}", hint, bounds);
563 // Lists of sizes to try. u64 is always allowed as a fallback.
564 #[allow(non_upper_case_globals)]
c34b1796 565 const choose_shortest: &'static [IntType] = &[
1a4d82fc
JJ
566 attr::UnsignedInt(ast::TyU8), attr::SignedInt(ast::TyI8),
567 attr::UnsignedInt(ast::TyU16), attr::SignedInt(ast::TyI16),
568 attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)];
569 #[allow(non_upper_case_globals)]
c34b1796 570 const at_least_32: &'static [IntType] = &[
1a4d82fc
JJ
571 attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)];
572
573 let attempts;
574 match hint {
575 attr::ReprInt(span, ity) => {
576 if !bounds_usable(cx, ity, bounds) {
577 cx.sess().span_bug(span, "representation hint insufficient for discriminant range")
578 }
579 return ity;
580 }
581 attr::ReprExtern => {
c34b1796 582 attempts = match &cx.sess().target.target.arch[..] {
1a4d82fc
JJ
583 // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32`
584 // appears to be used on Linux and NetBSD, but some systems may use the variant
585 // corresponding to `choose_shortest`. However, we don't run on those yet...?
586 "arm" => at_least_32,
587 _ => at_least_32,
588 }
589 }
590 attr::ReprAny => {
591 attempts = choose_shortest;
592 },
593 attr::ReprPacked => {
594 cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum");
595 }
596 }
85aaf69f 597 for &ity in attempts {
1a4d82fc
JJ
598 if bounds_usable(cx, ity, bounds) {
599 return ity;
600 }
601 }
602 return attr::UnsignedInt(ast::TyU64);
603}
604
605pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type {
606 match ity {
607 attr::SignedInt(t) => Type::int_from_ty(cx, t),
608 attr::UnsignedInt(t) => Type::uint_from_ty(cx, t)
609 }
610}
611
612fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool {
613 debug!("bounds_usable: {:?} {:?}", ity, bounds);
614 match ity {
615 attr::SignedInt(_) => {
616 let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true);
617 let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true);
618 bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64
619 }
620 attr::UnsignedInt(_) => {
621 let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false);
622 let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false);
623 bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64
624 }
625 }
626}
627
628pub fn ty_of_inttype<'tcx>(tcx: &ty::ctxt<'tcx>, ity: IntType) -> Ty<'tcx> {
629 match ity {
c1a9b12d
SL
630 attr::SignedInt(t) => tcx.mk_mach_int(t),
631 attr::UnsignedInt(t) => tcx.mk_mach_uint(t)
1a4d82fc
JJ
632 }
633}
634
635// LLVM doesn't like types that don't fit in the address space
636fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
637 fields: &[Type],
638 packed: bool,
639 scapegoat: Ty<'tcx>) {
640 let mut offset = 0;
85aaf69f 641 for &llty in fields {
1a4d82fc
JJ
642 // Invariant: offset < ccx.obj_size_bound() <= 1<<61
643 if !packed {
644 let type_align = machine::llalign_of_min(ccx, llty);
645 offset = roundup(offset, type_align);
646 }
647 // type_align is a power-of-2, so still offset < ccx.obj_size_bound()
648 // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound()
649 // so the sum is less than 1<<62 (and therefore can't overflow).
650 offset += machine::llsize_of_alloc(ccx, llty);
651
652 if offset >= ccx.obj_size_bound() {
653 ccx.report_overbig_object(scapegoat);
654 }
655 }
656}
657
658fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) {
659 let size = sts.iter().map(|st| st.size).max().unwrap();
85aaf69f
SL
660 let align = sts.iter().map(|st| st.align).max().unwrap();
661 (roundup(size, align), align)
1a4d82fc
JJ
662}
663
664fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1a4d82fc
JJ
665 fields: &[Struct],
666 scapegoat: Ty<'tcx>) {
85aaf69f 667 let (total_size, _) = union_size_and_align(fields);
1a4d82fc
JJ
668
669 if total_size >= ccx.obj_size_bound() {
670 ccx.report_overbig_object(scapegoat);
671 }
672}
673
674
675/// LLVM-level types are a little complicated.
676///
677/// C-like enums need to be actual ints, not wrapped in a struct,
678/// because that changes the ABI on some platforms (see issue #10308).
679///
680/// For nominal types, in some cases, we need to use LLVM named structs
681/// and fill in the actual contents in a second pass to prevent
682/// unbounded recursion; see also the comments in `trans::type_of`.
683pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
684 generic_type_of(cx, r, None, false, false)
685}
686// Pass dst=true if the type you are passing is a DST. Yes, we could figure
687// this out, but if you call this on an unsized type without realising it, you
688// are going to get the wrong type (it will not include the unsized parts of it).
689pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
690 r: &Repr<'tcx>, dst: bool) -> Type {
691 generic_type_of(cx, r, None, true, dst)
692}
693pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
694 r: &Repr<'tcx>, name: &str) -> Type {
695 generic_type_of(cx, r, Some(name), false, false)
696}
697pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
698 r: &Repr<'tcx>, llty: &mut Type) {
699 match *r {
700 CEnum(..) | General(..) | RawNullablePointer { .. } => { }
701 Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
c34b1796 702 llty.set_struct_body(&struct_llfields(cx, st, false, false),
1a4d82fc
JJ
703 st.packed)
704 }
705}
706
707fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
708 r: &Repr<'tcx>,
709 name: Option<&str>,
710 sizing: bool,
711 dst: bool) -> Type {
712 match *r {
713 CEnum(ity, _, _) => ll_inttype(cx, ity),
714 RawNullablePointer { nnty, .. } => type_of::sizing_type_of(cx, nnty),
715 Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => {
716 match name {
717 None => {
c34b1796 718 Type::struct_(cx, &struct_llfields(cx, st, sizing, dst),
1a4d82fc
JJ
719 st.packed)
720 }
721 Some(name) => { assert_eq!(sizing, false); Type::named_struct(cx, name) }
722 }
723 }
724 General(ity, ref sts, _) => {
725 // We need a representation that has:
726 // * The alignment of the most-aligned field
727 // * The size of the largest variant (rounded up to that alignment)
728 // * No alignment padding anywhere any variant has actual data
729 // (currently matters only for enums small enough to be immediate)
730 // * The discriminant in an obvious place.
731 //
732 // So we start with the discriminant, pad it up to the alignment with
733 // more of its own type, then use alignment-sized ints to get the rest
734 // of the size.
735 //
736 // FIXME #10604: this breaks when vector types are present.
85aaf69f 737 let (size, align) = union_size_and_align(&sts[..]);
1a4d82fc 738 let align_s = align as u64;
85aaf69f
SL
739 assert_eq!(size % align_s, 0);
740 let align_units = size / align_s - 1;
741
1a4d82fc
JJ
742 let discr_ty = ll_inttype(cx, ity);
743 let discr_size = machine::llsize_of_alloc(cx, discr_ty);
1a4d82fc
JJ
744 let fill_ty = match align_s {
745 1 => Type::array(&Type::i8(cx), align_units),
746 2 => Type::array(&Type::i16(cx), align_units),
747 4 => Type::array(&Type::i32(cx), align_units),
748 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 =>
749 Type::array(&Type::i64(cx), align_units),
750 a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4),
751 align_units),
752 _ => panic!("unsupported enum alignment: {}", align)
753 };
754 assert_eq!(machine::llalign_of_min(cx, fill_ty), align);
755 assert_eq!(align_s % discr_size, 0);
756 let fields = [discr_ty,
757 Type::array(&discr_ty, align_s / discr_size - 1),
758 fill_ty];
759 match name {
85aaf69f 760 None => Type::struct_(cx, &fields[..], false),
1a4d82fc
JJ
761 Some(name) => {
762 let mut llty = Type::named_struct(cx, name);
85aaf69f 763 llty.set_struct_body(&fields[..], false);
1a4d82fc
JJ
764 llty
765 }
766 }
767 }
768 }
769}
770
771fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>,
772 sizing: bool, dst: bool) -> Vec<Type> {
773 if sizing {
774 st.fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty))
775 .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
776 } else {
c34b1796 777 st.fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect()
1a4d82fc
JJ
778 }
779}
780
781/// Obtain a representation of the discriminant sufficient to translate
782/// destructuring; this may or may not involve the actual discriminant.
783///
784/// This should ideally be less tightly tied to `_match`.
785pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
786 r: &Repr<'tcx>, scrutinee: ValueRef)
787 -> (_match::BranchKind, Option<ValueRef>) {
788 match *r {
789 CEnum(..) | General(..) |
790 RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
791 (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None)))
792 }
793 Univariant(..) => {
d9579d0f 794 // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
1a4d82fc
JJ
795 (_match::Single, None)
796 }
797 }
798}
799
62682a34
SL
800pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool {
801 match *r {
802 CEnum(ity, _, _) => ity.is_signed(),
803 General(ity, _, _) => ity.is_signed(),
804 Univariant(..) => false,
805 RawNullablePointer { .. } => false,
806 StructWrappedNullablePointer { .. } => false,
807 }
808}
1a4d82fc
JJ
809
810/// Obtain the actual discriminant of a value.
811pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
812 scrutinee: ValueRef, cast_to: Option<Type>)
813 -> ValueRef {
1a4d82fc 814 debug!("trans_get_discr r: {:?}", r);
62682a34
SL
815 let val = match *r {
816 CEnum(ity, min, max) => load_discr(bcx, ity, scrutinee, min, max),
1a4d82fc
JJ
817 General(ity, ref cases, _) => {
818 let ptr = GEPi(bcx, scrutinee, &[0, 0]);
62682a34 819 load_discr(bcx, ity, ptr, 0, (cases.len() - 1) as Disr)
1a4d82fc 820 }
62682a34 821 Univariant(..) => C_u8(bcx.ccx(), 0),
1a4d82fc
JJ
822 RawNullablePointer { nndiscr, nnty, .. } => {
823 let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
824 let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
62682a34 825 ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None)
1a4d82fc
JJ
826 }
827 StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
62682a34 828 struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
1a4d82fc 829 }
62682a34 830 };
1a4d82fc
JJ
831 match cast_to {
832 None => val,
62682a34 833 Some(llty) => if is_discr_signed(r) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) }
1a4d82fc
JJ
834 }
835}
836
837fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField,
838 scrutinee: ValueRef) -> ValueRef {
85aaf69f 839 let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]);
1a4d82fc
JJ
840 let llptr = Load(bcx, llptrptr);
841 let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
85aaf69f 842 ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None)
1a4d82fc
JJ
843}
844
845/// Helper for cases where the discriminant is simply loaded.
846fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
847 -> ValueRef {
848 let llty = ll_inttype(bcx.ccx(), ity);
849 assert_eq!(val_ty(ptr), llty.ptr_to());
850 let bits = machine::llbitsize_of_real(bcx.ccx(), llty);
851 assert!(bits <= 64);
c34b1796
AL
852 let bits = bits as usize;
853 let mask = (!0u64 >> (64 - bits)) as Disr;
854 // For a (max) discr of -1, max will be `-1 as usize`, which overflows.
855 // However, that is fine here (it would still represent the full range),
856 if (max.wrapping_add(1)) & mask == min & mask {
1a4d82fc
JJ
857 // i.e., if the range is everything. The lo==hi case would be
858 // rejected by the LLVM verifier (it would mean either an
859 // empty set, which is impossible, or the entire range of the
860 // type, which is pointless).
861 Load(bcx, ptr)
862 } else {
863 // llvm::ConstantRange can deal with ranges that wrap around,
864 // so an overflow on (max + 1) is fine.
c34b1796 865 LoadRangeAssert(bcx, ptr, min, (max.wrapping_add(1)), /* signed: */ True)
1a4d82fc
JJ
866 }
867}
868
869/// Yield information about how to dispatch a case of the
870/// discriminant-like value returned by `trans_switch`.
871///
872/// This should ideally be less tightly tied to `_match`.
873pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
874 -> _match::OptResult<'blk, 'tcx> {
875 match *r {
876 CEnum(ity, _, _) => {
877 _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
878 discr as u64, true)))
879 }
880 General(ity, _, _) => {
881 _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
882 discr as u64, true)))
883 }
884 Univariant(..) => {
885 bcx.ccx().sess().bug("no cases for univariants or structs")
886 }
887 RawNullablePointer { .. } |
888 StructWrappedNullablePointer { .. } => {
889 assert!(discr == 0 || discr == 1);
890 _match::SingleResult(Result::new(bcx, C_bool(bcx.ccx(), discr != 0)))
891 }
892 }
893}
894
895/// Set the discriminant for a new value of the given case of the given
896/// representation.
897pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
898 val: ValueRef, discr: Disr) {
899 match *r {
900 CEnum(ity, min, max) => {
901 assert_discr_in_range(ity, min, max, discr);
902 Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
d9579d0f 903 val);
1a4d82fc
JJ
904 }
905 General(ity, ref cases, dtor) => {
c34b1796 906 if dtor_active(dtor) {
1a4d82fc 907 let ptr = trans_field_ptr(bcx, r, val, discr,
c34b1796
AL
908 cases[discr as usize].fields.len() - 2);
909 Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize), ptr);
1a4d82fc
JJ
910 }
911 Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
d9579d0f 912 GEPi(bcx, val, &[0, 0]));
1a4d82fc
JJ
913 }
914 Univariant(ref st, dtor) => {
915 assert_eq!(discr, 0);
c34b1796
AL
916 if dtor_active(dtor) {
917 Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED as usize),
1a4d82fc
JJ
918 GEPi(bcx, val, &[0, st.fields.len() - 1]));
919 }
920 }
921 RawNullablePointer { nndiscr, nnty, ..} => {
922 if discr != nndiscr {
923 let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
d9579d0f 924 Store(bcx, C_null(llptrty), val);
1a4d82fc
JJ
925 }
926 }
927 StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
928 if discr != nndiscr {
85aaf69f 929 let llptrptr = GEPi(bcx, val, &discrfield[..]);
1a4d82fc 930 let llptrty = val_ty(llptrptr).element_type();
d9579d0f 931 Store(bcx, C_null(llptrty), llptrptr);
1a4d82fc
JJ
932 }
933 }
934 }
935}
936
937fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) {
938 match ity {
939 attr::UnsignedInt(_) => assert!(min <= discr && discr <= max),
940 attr::SignedInt(_) => assert!(min as i64 <= discr as i64 && discr as i64 <= max as i64)
941 }
942}
943
944/// The number of fields in a given case; for use when obtaining this
945/// information from the type or definition is less convenient.
c34b1796 946pub fn num_args(r: &Repr, discr: Disr) -> usize {
1a4d82fc
JJ
947 match *r {
948 CEnum(..) => 0,
949 Univariant(ref st, dtor) => {
950 assert_eq!(discr, 0);
c34b1796 951 st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
1a4d82fc
JJ
952 }
953 General(_, ref cases, dtor) => {
c34b1796 954 cases[discr as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
1a4d82fc
JJ
955 }
956 RawNullablePointer { nndiscr, ref nullfields, .. } => {
957 if discr == nndiscr { 1 } else { nullfields.len() }
958 }
959 StructWrappedNullablePointer { ref nonnull, nndiscr,
960 ref nullfields, .. } => {
961 if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() }
962 }
963 }
964}
965
966/// Access a field, at a point when the value's case is known.
967pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
c34b1796 968 val: ValueRef, discr: Disr, ix: usize) -> ValueRef {
1a4d82fc
JJ
969 // Note: if this ever needs to generate conditionals (e.g., if we
970 // decide to do some kind of cdr-coding-like non-unique repr
971 // someday), it will need to return a possibly-new bcx as well.
972 match *r {
973 CEnum(..) => {
974 bcx.ccx().sess().bug("element access in C-like enum")
975 }
976 Univariant(ref st, _dtor) => {
977 assert_eq!(discr, 0);
978 struct_field_ptr(bcx, st, val, ix, false)
979 }
980 General(_, ref cases, _) => {
c34b1796 981 struct_field_ptr(bcx, &cases[discr as usize], val, ix + 1, true)
1a4d82fc
JJ
982 }
983 RawNullablePointer { nndiscr, ref nullfields, .. } |
984 StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => {
985 // The unit-like case might have a nonzero number of unit-like fields.
986 // (e.d., Result of Either with (), as one side.)
987 let ty = type_of::type_of(bcx.ccx(), nullfields[ix]);
988 assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
989 // The contents of memory at this pointer can't matter, but use
990 // the value that's "reasonable" in case of pointer comparison.
991 PointerCast(bcx, val, ty.ptr_to())
992 }
993 RawNullablePointer { nndiscr, nnty, .. } => {
994 assert_eq!(ix, 0);
995 assert_eq!(discr, nndiscr);
996 let ty = type_of::type_of(bcx.ccx(), nnty);
997 PointerCast(bcx, val, ty.ptr_to())
998 }
999 StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
1000 assert_eq!(discr, nndiscr);
1001 struct_field_ptr(bcx, nonnull, val, ix, false)
1002 }
1003 }
1004}
1005
1006pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: ValueRef,
c34b1796 1007 ix: usize, needs_cast: bool) -> ValueRef {
1a4d82fc
JJ
1008 let val = if needs_cast {
1009 let ccx = bcx.ccx();
1010 let fields = st.fields.iter().map(|&ty| type_of::type_of(ccx, ty)).collect::<Vec<_>>();
85aaf69f 1011 let real_ty = Type::struct_(ccx, &fields[..], st.packed);
1a4d82fc
JJ
1012 PointerCast(bcx, val, real_ty.ptr_to())
1013 } else {
1014 val
1015 };
1016
1017 GEPi(bcx, val, &[0, ix])
1018}
1019
1020pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
1021 r: &Repr<'tcx>,
1022 value: ValueRef,
1023 mut f: F)
1024 -> Block<'blk, 'tcx> where
1025 F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>,
1026{
1027 let fcx = bcx.fcx;
1028 match *r {
1029 Univariant(ref st, _) => {
1030 f(bcx, st, value)
1031 }
1032 General(ity, ref cases, _) => {
1033 let ccx = bcx.ccx();
62682a34
SL
1034
1035 // See the comments in trans/base.rs for more information (inside
1036 // iter_structural_ty), but the gist here is that if the enum's
1037 // discriminant is *not* in the range that we're expecting (in which
1038 // case we'll take the fall-through branch on the switch
1039 // instruction) then we can't just optimize this to an Unreachable
1040 // block.
1041 //
1042 // Currently we still have filling drop, so this means that the drop
1043 // glue for enums may be called when the enum has been paved over
1044 // with the "I've been dropped" value. In this case the default
1045 // branch of the switch instruction will actually be taken at
1046 // runtime, so the basic block isn't actually unreachable, so we
1047 // need to make it do something with defined behavior. In this case
1048 // we just return early from the function.
1049 let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void");
1050 RetVoid(ret_void_cx, DebugLoc::None);
1a4d82fc
JJ
1051
1052 let discr_val = trans_get_discr(bcx, r, value, None);
62682a34 1053 let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len());
1a4d82fc
JJ
1054 let bcx_next = fcx.new_temp_block("enum-variant-iter-next");
1055
1056 for (discr, case) in cases.iter().enumerate() {
1057 let mut variant_cx = fcx.new_temp_block(
c34b1796 1058 &format!("enum-variant-iter-{}", &discr.to_string())
1a4d82fc
JJ
1059 );
1060 let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true);
1061 AddCase(llswitch, rhs_val, variant_cx.llbb);
1062
1063 let fields = case.fields.iter().map(|&ty|
1064 type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>();
85aaf69f 1065 let real_ty = Type::struct_(ccx, &fields[..], case.packed);
1a4d82fc
JJ
1066 let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
1067
1068 variant_cx = f(variant_cx, case, variant_value);
85aaf69f 1069 Br(variant_cx, bcx_next.llbb, DebugLoc::None);
1a4d82fc
JJ
1070 }
1071
1072 bcx_next
1073 }
1074 _ => unreachable!()
1075 }
1076}
1077
1078/// Access the struct drop flag, if present.
d9579d0f
AL
1079pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1080 r: &Repr<'tcx>,
1081 val: ValueRef)
1a4d82fc
JJ
1082 -> datum::DatumBlock<'blk, 'tcx, datum::Expr>
1083{
1084 let tcx = bcx.tcx();
c1a9b12d 1085 let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type());
1a4d82fc 1086 match *r {
c34b1796 1087 Univariant(ref st, dtor) if dtor_active(dtor) => {
1a4d82fc
JJ
1088 let flag_ptr = GEPi(bcx, val, &[0, st.fields.len() - 1]);
1089 datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
1090 }
c34b1796 1091 General(_, _, dtor) if dtor_active(dtor) => {
1a4d82fc
JJ
1092 let fcx = bcx.fcx;
1093 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1094 let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
c34b1796 1095 bcx, tcx.dtor_type(), "drop_flag",
1a4d82fc
JJ
1096 cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx
1097 ));
1098 bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
1099 let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false);
c1a9b12d 1100 datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
1a4d82fc
JJ
1101 .store_to(variant_cx, scratch.val)
1102 });
1103 let expr_datum = scratch.to_expr_datum();
1104 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1105 datum::DatumBlock::new(bcx, expr_datum)
1106 }
1107 _ => bcx.ccx().sess().bug("tried to get drop flag of non-droppable type")
1108 }
1109}
1110
1111/// Construct a constant value, suitable for initializing a
1112/// GlobalVariable, given a case and constant values for its fields.
1113/// Note that this may have a different LLVM type (and different
1114/// alignment!) from the representation's `type_of`, so it needs a
1115/// pointer cast before use.
1116///
1117/// The LLVM type system does not directly support unions, and only
1118/// pointers can be bitcast, so a constant (and, by extension, the
1119/// GlobalVariable initialized by it) will have a type that can vary
1120/// depending on which case of an enum it is.
1121///
1122/// To understand the alignment situation, consider `enum E { V64(u64),
1123/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to
1124/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
1125/// i32, i32}`, which is 4-byte aligned.
1126///
1127/// Currently the returned value has the same size as the type, but
1128/// this could be changed in the future to avoid allocating unnecessary
1129/// space after values of shorter-than-maximum cases.
1130pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr,
1131 vals: &[ValueRef]) -> ValueRef {
1132 match *r {
1133 CEnum(ity, min, max) => {
1134 assert_eq!(vals.len(), 0);
1135 assert_discr_in_range(ity, min, max, discr);
1136 C_integral(ll_inttype(ccx, ity), discr as u64, true)
1137 }
1138 General(ity, ref cases, _) => {
c34b1796 1139 let case = &cases[discr as usize];
85aaf69f 1140 let (max_sz, _) = union_size_and_align(&cases[..]);
1a4d82fc
JJ
1141 let lldiscr = C_integral(ll_inttype(ccx, ity), discr as u64, true);
1142 let mut f = vec![lldiscr];
1143 f.push_all(vals);
85aaf69f 1144 let mut contents = build_const_struct(ccx, case, &f[..]);
1a4d82fc 1145 contents.push_all(&[padding(ccx, max_sz - case.size)]);
85aaf69f 1146 C_struct(ccx, &contents[..], false)
1a4d82fc
JJ
1147 }
1148 Univariant(ref st, _dro) => {
1149 assert!(discr == 0);
1150 let contents = build_const_struct(ccx, st, vals);
85aaf69f 1151 C_struct(ccx, &contents[..], st.packed)
1a4d82fc
JJ
1152 }
1153 RawNullablePointer { nndiscr, nnty, .. } => {
1154 if discr == nndiscr {
1155 assert_eq!(vals.len(), 1);
1156 vals[0]
1157 } else {
1158 C_null(type_of::sizing_type_of(ccx, nnty))
1159 }
1160 }
1161 StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
1162 if discr == nndiscr {
1163 C_struct(ccx, &build_const_struct(ccx,
1164 nonnull,
c34b1796 1165 vals),
1a4d82fc
JJ
1166 false)
1167 } else {
1168 let vals = nonnull.fields.iter().map(|&ty| {
1169 // Always use null even if it's not the `discrfield`th
1170 // field; see #8506.
1171 C_null(type_of::sizing_type_of(ccx, ty))
1172 }).collect::<Vec<ValueRef>>();
1173 C_struct(ccx, &build_const_struct(ccx,
1174 nonnull,
c34b1796 1175 &vals[..]),
1a4d82fc
JJ
1176 false)
1177 }
1178 }
1179 }
1180}
1181
1182/// Compute struct field offsets relative to struct begin.
1183fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1184 st: &Struct<'tcx>) -> Vec<u64> {
1185 let mut offsets = vec!();
1186
1187 let mut offset = 0;
85aaf69f 1188 for &ty in &st.fields {
1a4d82fc
JJ
1189 let llty = type_of::sizing_type_of(ccx, ty);
1190 if !st.packed {
1191 let type_align = type_of::align_of(ccx, ty);
1192 offset = roundup(offset, type_align);
1193 }
1194 offsets.push(offset);
1195 offset += machine::llsize_of_alloc(ccx, llty);
1196 }
1197 assert_eq!(st.fields.len(), offsets.len());
1198 offsets
1199}
1200
1201/// Building structs is a little complicated, because we might need to
1202/// insert padding if a field's value is less aligned than its type.
1203///
1204/// Continuing the example from `trans_const`, a value of type `(u32,
1205/// E)` should have the `E` at offset 8, but if that field's
1206/// initializer is 4-byte aligned then simply translating the tuple as
1207/// a two-element struct will locate it at offset 4, and accesses to it
1208/// will read the wrong memory.
1209fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1210 st: &Struct<'tcx>, vals: &[ValueRef])
1211 -> Vec<ValueRef> {
1212 assert_eq!(vals.len(), st.fields.len());
1213
1214 let target_offsets = compute_struct_field_offsets(ccx, st);
1215
1216 // offset of current value
1217 let mut offset = 0;
1218 let mut cfields = Vec::new();
62682a34 1219 for (&val, target_offset) in vals.iter().zip(target_offsets) {
1a4d82fc
JJ
1220 if !st.packed {
1221 let val_align = machine::llalign_of_min(ccx, val_ty(val));
1222 offset = roundup(offset, val_align);
1223 }
1224 if offset != target_offset {
1225 cfields.push(padding(ccx, target_offset - offset));
1226 offset = target_offset;
1227 }
1228 assert!(!is_undef(val));
1229 cfields.push(val);
1230 offset += machine::llsize_of_alloc(ccx, val_ty(val));
1231 }
1232
1233 assert!(st.sized && offset <= st.size);
1234 if offset != st.size {
1235 cfields.push(padding(ccx, st.size - offset));
1236 }
1237
1238 cfields
1239}
1240
1241fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
1242 C_undef(Type::array(&Type::i8(ccx), size))
1243}
1244
1245// FIXME this utility routine should be somewhere more general
1246#[inline]
1247fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
1248
1249/// Get the discriminant of a constant value.
1250pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr {
1251 match *r {
1252 CEnum(ity, _, _) => {
1253 match ity {
1254 attr::SignedInt(..) => const_to_int(val) as Disr,
1255 attr::UnsignedInt(..) => const_to_uint(val) as Disr
1256 }
1257 }
1258 General(ity, _, _) => {
1259 match ity {
1260 attr::SignedInt(..) => const_to_int(const_get_elt(ccx, val, &[0])) as Disr,
1261 attr::UnsignedInt(..) => const_to_uint(const_get_elt(ccx, val, &[0])) as Disr
1262 }
1263 }
1264 Univariant(..) => 0,
1265 RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
1266 ccx.sess().bug("const discrim access of non c-like enum")
1267 }
1268 }
1269}
1270
1271/// Extract a field of a constant value, as appropriate for its
1272/// representation.
1273///
1274/// (Not to be confused with `common::const_get_elt`, which operates on
1275/// raw LLVM-level structs and arrays.)
1276pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef,
c34b1796 1277 _discr: Disr, ix: usize) -> ValueRef {
1a4d82fc
JJ
1278 match *r {
1279 CEnum(..) => ccx.sess().bug("element access in C-like enum const"),
1280 Univariant(..) => const_struct_field(ccx, val, ix),
1281 General(..) => const_struct_field(ccx, val, ix + 1),
1282 RawNullablePointer { .. } => {
1283 assert_eq!(ix, 0);
1284 val
1285 },
1286 StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix)
1287 }
1288}
1289
1290/// Extract field of struct-like const, skipping our alignment padding.
c34b1796 1291fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: usize) -> ValueRef {
1a4d82fc
JJ
1292 // Get the ix-th non-undef element of the struct.
1293 let mut real_ix = 0; // actual position in the struct
1294 let mut ix = ix; // logical index relative to real_ix
1295 let mut field;
1296 loop {
1297 loop {
1298 field = const_get_elt(ccx, val, &[real_ix]);
1299 if !is_undef(field) {
1300 break;
1301 }
1302 real_ix = real_ix + 1;
1303 }
1304 if ix == 0 {
1305 return field;
1306 }
1307 ix = ix - 1;
1308 real_ix = real_ix + 1;
1309 }
1310}