]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | //! # Representation of Algebraic Data Types | |
12 | //! | |
13 | //! This module determines how to represent enums, structs, and tuples | |
14 | //! based on their monomorphized types; it is responsible both for | |
15 | //! choosing a representation and translating basic operations on | |
16 | //! values of those types. (Note: exporting the representations for | |
17 | //! debuggers is handled in debuginfo.rs, not here.) | |
18 | //! | |
19 | //! Note that the interface treats everything as a general case of an | |
20 | //! enum, so structs/tuples/etc. have one pseudo-variant with | |
21 | //! discriminant 0; i.e., as if they were a univariant enum. | |
22 | //! | |
23 | //! Having everything in one place will enable improvements to data | |
24 | //! structure representation; possibilities include: | |
25 | //! | |
26 | //! - User-specified alignment (e.g., cacheline-aligning parts of | |
27 | //! concurrently accessed data structures); LLVM can't represent this | |
28 | //! directly, so we'd have to insert padding fields in any structure | |
29 | //! that might contain one and adjust GEP indices accordingly. See | |
30 | //! issue #4578. | |
31 | //! | |
32 | //! - Store nested enums' discriminants in the same word. Rather, if | |
33 | //! some variants start with enums, and those enums representations | |
34 | //! have unused alignment padding between discriminant and body, the | |
35 | //! outer enum's discriminant can be stored there and those variants | |
36 | //! can start at offset 0. Kind of fancy, and might need work to | |
37 | //! make copies of the inner enum type cooperate, but it could help | |
38 | //! with `Option` or `Result` wrapped around another enum. | |
39 | //! | |
40 | //! - Tagged pointers would be neat, but given that any type can be | |
41 | //! used unboxed and any field can have pointers (including mutable) | |
42 | //! taken to it, implementing them for Rust seems difficult. | |
43 | ||
1a4d82fc | 44 | pub use self::Repr::*; |
9cc50fc6 | 45 | use super::Disr; |
1a4d82fc | 46 | |
92a42be0 | 47 | use std; |
1a4d82fc JJ |
48 | use std::rc::Rc; |
49 | ||
50 | use llvm::{ValueRef, True, IntEQ, IntNE}; | |
51 | use back::abi::FAT_PTR_ADDR; | |
52 | use middle::subst; | |
c1a9b12d | 53 | use middle::ty::{self, Ty}; |
1a4d82fc | 54 | use syntax::ast; |
b039eaaf SL |
55 | use syntax::attr; |
56 | use syntax::attr::IntType; | |
1a4d82fc | 57 | use trans::_match; |
9cc50fc6 | 58 | use trans::base::InitAlloca; |
1a4d82fc JJ |
59 | use trans::build::*; |
60 | use trans::cleanup; | |
61 | use trans::cleanup::CleanupMethods; | |
62 | use trans::common::*; | |
63 | use trans::datum; | |
85aaf69f | 64 | use trans::debuginfo::DebugLoc; |
92a42be0 | 65 | use trans::glue; |
1a4d82fc JJ |
66 | use trans::machine; |
67 | use trans::monomorphize; | |
68 | use trans::type_::Type; | |
69 | use trans::type_of; | |
1a4d82fc JJ |
70 | |
71 | type Hint = attr::ReprAttr; | |
72 | ||
e9174d1e SL |
73 | // Representation of the context surrounding an unsized type. I want |
74 | // to be able to track the drop flags that are injected by trans. | |
75 | #[derive(Clone, Copy, PartialEq, Debug)] | |
76 | pub struct TypeContext { | |
77 | prefix: Type, | |
78 | needs_drop_flag: bool, | |
79 | } | |
80 | ||
81 | impl TypeContext { | |
82 | pub fn prefix(&self) -> Type { self.prefix } | |
83 | pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag } | |
84 | ||
85 | fn direct(t: Type) -> TypeContext { | |
86 | TypeContext { prefix: t, needs_drop_flag: false } | |
87 | } | |
88 | fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext { | |
89 | TypeContext { prefix: t, needs_drop_flag: needs_drop_flag } | |
90 | } | |
91 | pub fn to_string(self) -> String { | |
92 | let TypeContext { prefix, needs_drop_flag } = self; | |
93 | format!("TypeContext {{ prefix: {}, needs_drop_flag: {} }}", | |
94 | prefix.to_string(), needs_drop_flag) | |
95 | } | |
96 | } | |
97 | ||
1a4d82fc | 98 | /// Representations. |
85aaf69f | 99 | #[derive(Eq, PartialEq, Debug)] |
1a4d82fc JJ |
100 | pub enum Repr<'tcx> { |
101 | /// C-like enums; basically an int. | |
102 | CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType) | |
103 | /// Single-case variants, and structs/tuples/records. | |
104 | /// | |
105 | /// Structs with destructors need a dynamic destroyedness flag to | |
106 | /// avoid running the destructor too many times; this is included | |
107 | /// in the `Struct` if present. | |
c34b1796 AL |
108 | /// (The flag if nonzero, represents the initialization value to use; |
109 | /// if zero, then use no flag at all.) | |
110 | Univariant(Struct<'tcx>, u8), | |
1a4d82fc JJ |
111 | /// General-case enums: for each case there is a struct, and they |
112 | /// all start with a field for the discriminant. | |
113 | /// | |
114 | /// Types with destructors need a dynamic destroyedness flag to | |
115 | /// avoid running the destructor too many times; the last argument | |
116 | /// indicates whether such a flag is present. | |
c34b1796 AL |
117 | /// (The flag, if nonzero, represents the initialization value to use; |
118 | /// if zero, then use no flag at all.) | |
119 | General(IntType, Vec<Struct<'tcx>>, u8), | |
1a4d82fc JJ |
120 | /// Two cases distinguished by a nullable pointer: the case with discriminant |
121 | /// `nndiscr` must have single field which is known to be nonnull due to its type. | |
122 | /// The other case is known to be zero sized. Hence we represent the enum | |
123 | /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant, | |
124 | /// otherwise it indicates the other case. | |
125 | RawNullablePointer { | |
126 | nndiscr: Disr, | |
127 | nnty: Ty<'tcx>, | |
128 | nullfields: Vec<Ty<'tcx>> | |
129 | }, | |
130 | /// Two cases distinguished by a nullable pointer: the case with discriminant | |
131 | /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th | |
132 | /// field is known to be nonnull due to its type; if that field is null, then | |
133 | /// it represents the other case, which is inhabited by at most one value | |
134 | /// (and all other fields are undefined/unused). | |
135 | /// | |
136 | /// For example, `std::option::Option` instantiated at a safe pointer type | |
137 | /// is represented such that `None` is a null pointer and `Some` is the | |
138 | /// identity function. | |
139 | StructWrappedNullablePointer { | |
140 | nonnull: Struct<'tcx>, | |
141 | nndiscr: Disr, | |
142 | discrfield: DiscrField, | |
143 | nullfields: Vec<Ty<'tcx>>, | |
144 | } | |
145 | } | |
146 | ||
147 | /// For structs, and struct-like parts of anything fancier. | |
85aaf69f | 148 | #[derive(Eq, PartialEq, Debug)] |
1a4d82fc JJ |
149 | pub struct Struct<'tcx> { |
150 | // If the struct is DST, then the size and alignment do not take into | |
151 | // account the unsized fields of the struct. | |
152 | pub size: u64, | |
153 | pub align: u32, | |
154 | pub sized: bool, | |
155 | pub packed: bool, | |
e9174d1e | 156 | pub fields: Vec<Ty<'tcx>>, |
1a4d82fc JJ |
157 | } |
158 | ||
92a42be0 SL |
159 | #[derive(Copy, Clone)] |
160 | pub struct MaybeSizedValue { | |
161 | pub value: ValueRef, | |
162 | pub meta: ValueRef, | |
163 | } | |
164 | ||
165 | impl MaybeSizedValue { | |
166 | pub fn sized(value: ValueRef) -> MaybeSizedValue { | |
167 | MaybeSizedValue { | |
168 | value: value, | |
169 | meta: std::ptr::null_mut() | |
170 | } | |
171 | } | |
172 | ||
173 | pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue { | |
174 | MaybeSizedValue { | |
175 | value: value, | |
176 | meta: meta | |
177 | } | |
178 | } | |
179 | ||
180 | pub fn has_meta(&self) -> bool { | |
181 | !self.meta.is_null() | |
182 | } | |
183 | } | |
184 | ||
1a4d82fc JJ |
185 | /// Convenience for `represent_type`. There should probably be more or |
186 | /// these, for places in trans where the `Ty` isn't directly | |
187 | /// available. | |
188 | pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, | |
189 | node: ast::NodeId) -> Rc<Repr<'tcx>> { | |
190 | represent_type(bcx.ccx(), node_id_type(bcx, node)) | |
191 | } | |
192 | ||
193 | /// Decides how to represent a given type. | |
194 | pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
d9579d0f AL |
195 | t: Ty<'tcx>) |
196 | -> Rc<Repr<'tcx>> { | |
62682a34 | 197 | debug!("Representing: {}", t); |
1a4d82fc JJ |
198 | match cx.adt_reprs().borrow().get(&t) { |
199 | Some(repr) => return repr.clone(), | |
200 | None => {} | |
201 | } | |
202 | ||
203 | let repr = Rc::new(represent_type_uncached(cx, t)); | |
204 | debug!("Represented as: {:?}", repr); | |
205 | cx.adt_reprs().borrow_mut().insert(t, repr.clone()); | |
206 | repr | |
207 | } | |
208 | ||
e9174d1e SL |
209 | const fn repeat_u8_as_u32(val: u8) -> u32 { |
210 | (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32 | |
c34b1796 | 211 | } |
e9174d1e SL |
212 | |
213 | const fn repeat_u8_as_u64(val: u8) -> u64 { | |
214 | (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64 | |
c34b1796 AL |
215 | } |
216 | ||
c1a9b12d SL |
217 | /// `DTOR_NEEDED_HINT` is a stack-local hint that just means |
218 | /// "we do not know whether the destructor has run or not; check the | |
219 | /// drop-flag embedded in the value itself." | |
220 | pub const DTOR_NEEDED_HINT: u8 = 0x3d; | |
221 | ||
222 | /// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has | |
223 | /// definitely been moved; you do not need to run its destructor." | |
224 | /// | |
225 | /// (However, for now, such values may still end up being explicitly | |
226 | /// zeroed by the generated code; this is the distinction between | |
227 | /// `datum::DropFlagInfo::ZeroAndMaintain` versus | |
228 | /// `datum::DropFlagInfo::DontZeroJustUse`.) | |
229 | pub const DTOR_MOVED_HINT: u8 = 0x2d; | |
230 | ||
c34b1796 | 231 | pub const DTOR_NEEDED: u8 = 0xd4; |
c34b1796 | 232 | #[allow(dead_code)] |
7453a54e | 233 | pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED); |
c34b1796 AL |
234 | |
235 | pub const DTOR_DONE: u8 = 0x1d; | |
c34b1796 | 236 | #[allow(dead_code)] |
7453a54e | 237 | pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE); |
c34b1796 AL |
238 | |
239 | fn dtor_to_init_u8(dtor: bool) -> u8 { | |
240 | if dtor { DTOR_NEEDED } else { 0 } | |
241 | } | |
242 | ||
243 | pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; } | |
244 | impl<'tcx> GetDtorType<'tcx> for ty::ctxt<'tcx> { | |
245 | fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 } | |
246 | } | |
247 | ||
248 | fn dtor_active(flag: u8) -> bool { | |
249 | flag != 0 | |
250 | } | |
251 | ||
1a4d82fc JJ |
252 | fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, |
253 | t: Ty<'tcx>) -> Repr<'tcx> { | |
254 | match t.sty { | |
62682a34 | 255 | ty::TyTuple(ref elems) => { |
c34b1796 | 256 | Univariant(mk_struct(cx, &elems[..], false, t), 0) |
1a4d82fc | 257 | } |
e9174d1e SL |
258 | ty::TyStruct(def, substs) => { |
259 | let mut ftys = def.struct_variant().fields.iter().map(|field| { | |
260 | monomorphize::field_ty(cx.tcx(), substs, field) | |
1a4d82fc | 261 | }).collect::<Vec<_>>(); |
e9174d1e | 262 | let packed = cx.tcx().lookup_packed(def.did); |
92a42be0 SL |
263 | // FIXME(16758) don't add a drop flag to unsized structs, as it |
264 | // won't actually be in the location we say it is because it'll be after | |
265 | // the unsized field. Several other pieces of code assume that the unsized | |
266 | // field is definitely the last one. | |
267 | let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t); | |
d9579d0f AL |
268 | if dtor { |
269 | ftys.push(cx.tcx().dtor_type()); | |
270 | } | |
1a4d82fc | 271 | |
c34b1796 | 272 | Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor)) |
1a4d82fc | 273 | } |
c1a9b12d SL |
274 | ty::TyClosure(_, ref substs) => { |
275 | Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0) | |
1a4d82fc | 276 | } |
e9174d1e SL |
277 | ty::TyEnum(def, substs) => { |
278 | let cases = get_cases(cx.tcx(), def, substs); | |
279 | let hint = *cx.tcx().lookup_repr_hints(def.did).get(0) | |
1a4d82fc JJ |
280 | .unwrap_or(&attr::ReprAny); |
281 | ||
e9174d1e | 282 | let dtor = def.dtor_kind().has_drop_flag(); |
1a4d82fc | 283 | |
9346a6ac | 284 | if cases.is_empty() { |
1a4d82fc JJ |
285 | // Uninhabitable; represent as unit |
286 | // (Typechecking will reject discriminant-sizing attrs.) | |
287 | assert_eq!(hint, attr::ReprAny); | |
c34b1796 | 288 | let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() }; |
85aaf69f | 289 | return Univariant(mk_struct(cx, &ftys[..], false, t), |
c34b1796 | 290 | dtor_to_init_u8(dtor)); |
1a4d82fc JJ |
291 | } |
292 | ||
9346a6ac | 293 | if !dtor && cases.iter().all(|c| c.tys.is_empty()) { |
1a4d82fc | 294 | // All bodies empty -> intlike |
9cc50fc6 | 295 | let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect(); |
1a4d82fc | 296 | let bounds = IntBounds { |
9cc50fc6 SL |
297 | ulo: discrs.iter().min().unwrap().0, |
298 | uhi: discrs.iter().max().unwrap().0, | |
299 | slo: discrs.iter().map(|n| n.0 as i64).min().unwrap(), | |
300 | shi: discrs.iter().map(|n| n.0 as i64).max().unwrap() | |
1a4d82fc JJ |
301 | }; |
302 | return mk_cenum(cx, hint, &bounds); | |
303 | } | |
304 | ||
305 | // Since there's at least one | |
306 | // non-empty body, explicit discriminants should have | |
307 | // been rejected by a checker before this point. | |
9cc50fc6 | 308 | if !cases.iter().enumerate().all(|(i,c)| c.discr == Disr::from(i)) { |
1a4d82fc | 309 | cx.sess().bug(&format!("non-C-like enum {} with specified \ |
c1a9b12d | 310 | discriminants", |
e9174d1e | 311 | cx.tcx().item_path_str(def.did))); |
1a4d82fc JJ |
312 | } |
313 | ||
314 | if cases.len() == 1 { | |
315 | // Equivalent to a struct/tuple/newtype. | |
316 | // (Typechecking will reject discriminant-sizing attrs.) | |
317 | assert_eq!(hint, attr::ReprAny); | |
318 | let mut ftys = cases[0].tys.clone(); | |
c34b1796 | 319 | if dtor { ftys.push(cx.tcx().dtor_type()); } |
85aaf69f | 320 | return Univariant(mk_struct(cx, &ftys[..], false, t), |
c34b1796 | 321 | dtor_to_init_u8(dtor)); |
1a4d82fc JJ |
322 | } |
323 | ||
324 | if !dtor && cases.len() == 2 && hint == attr::ReprAny { | |
325 | // Nullable pointer optimization | |
326 | let mut discr = 0; | |
327 | while discr < 2 { | |
328 | if cases[1 - discr].is_zerolen(cx, t) { | |
c34b1796 | 329 | let st = mk_struct(cx, &cases[discr].tys, |
1a4d82fc JJ |
330 | false, t); |
331 | match cases[discr].find_ptr(cx) { | |
332 | Some(ref df) if df.len() == 1 && st.fields.len() == 1 => { | |
333 | return RawNullablePointer { | |
9cc50fc6 | 334 | nndiscr: Disr::from(discr), |
1a4d82fc JJ |
335 | nnty: st.fields[0], |
336 | nullfields: cases[1 - discr].tys.clone() | |
337 | }; | |
338 | } | |
339 | Some(mut discrfield) => { | |
340 | discrfield.push(0); | |
341 | discrfield.reverse(); | |
342 | return StructWrappedNullablePointer { | |
9cc50fc6 | 343 | nndiscr: Disr::from(discr), |
1a4d82fc JJ |
344 | nonnull: st, |
345 | discrfield: discrfield, | |
346 | nullfields: cases[1 - discr].tys.clone() | |
347 | }; | |
348 | } | |
349 | None => {} | |
350 | } | |
351 | } | |
352 | discr += 1; | |
353 | } | |
354 | } | |
355 | ||
356 | // The general case. | |
357 | assert!((cases.len() - 1) as i64 >= 0); | |
358 | let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64, | |
359 | slo: 0, shi: (cases.len() - 1) as i64 }; | |
360 | let min_ity = range_to_inttype(cx, hint, &bounds); | |
361 | ||
362 | // Create the set of structs that represent each variant | |
363 | // Use the minimum integer type we figured out above | |
364 | let fields : Vec<_> = cases.iter().map(|c| { | |
365 | let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity)); | |
92a42be0 | 366 | ftys.extend_from_slice(&c.tys); |
c34b1796 | 367 | if dtor { ftys.push(cx.tcx().dtor_type()); } |
85aaf69f | 368 | mk_struct(cx, &ftys, false, t) |
1a4d82fc JJ |
369 | }).collect(); |
370 | ||
371 | ||
372 | // Check to see if we should use a different type for the | |
373 | // discriminant. If the overall alignment of the type is | |
374 | // the same as the first field in each variant, we can safely use | |
375 | // an alignment-sized type. | |
376 | // We increase the size of the discriminant to avoid LLVM copying | |
377 | // padding when it doesn't need to. This normally causes unaligned | |
378 | // load/stores and excessive memcpy/memset operations. By using a | |
379 | // bigger integer size, LLVM can be sure about it's contents and | |
380 | // won't be so conservative. | |
381 | // This check is needed to avoid increasing the size of types when | |
382 | // the alignment of the first field is smaller than the overall | |
383 | // alignment of the type. | |
85aaf69f | 384 | let (_, align) = union_size_and_align(&fields); |
1a4d82fc | 385 | let mut use_align = true; |
85aaf69f | 386 | for st in &fields { |
1a4d82fc JJ |
387 | // Get the first non-zero-sized field |
388 | let field = st.fields.iter().skip(1).filter(|ty| { | |
389 | let t = type_of::sizing_type_of(cx, **ty); | |
390 | machine::llsize_of_real(cx, t) != 0 || | |
391 | // This case is only relevant for zero-sized types with large alignment | |
392 | machine::llalign_of_min(cx, t) != 1 | |
393 | }).next(); | |
394 | ||
395 | if let Some(field) = field { | |
396 | let field_align = type_of::align_of(cx, *field); | |
397 | if field_align != align { | |
398 | use_align = false; | |
399 | break; | |
400 | } | |
401 | } | |
402 | } | |
403 | let ity = if use_align { | |
404 | // Use the overall alignment | |
405 | match align { | |
7453a54e SL |
406 | 1 => attr::UnsignedInt(ast::UintTy::U8), |
407 | 2 => attr::UnsignedInt(ast::UintTy::U16), | |
408 | 4 => attr::UnsignedInt(ast::UintTy::U32), | |
1a4d82fc | 409 | 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => |
7453a54e | 410 | attr::UnsignedInt(ast::UintTy::U64), |
1a4d82fc JJ |
411 | _ => min_ity // use min_ity as a fallback |
412 | } | |
413 | } else { | |
414 | min_ity | |
415 | }; | |
416 | ||
417 | let fields : Vec<_> = cases.iter().map(|c| { | |
418 | let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity)); | |
92a42be0 | 419 | ftys.extend_from_slice(&c.tys); |
c34b1796 | 420 | if dtor { ftys.push(cx.tcx().dtor_type()); } |
85aaf69f | 421 | mk_struct(cx, &ftys[..], false, t) |
1a4d82fc JJ |
422 | }).collect(); |
423 | ||
85aaf69f | 424 | ensure_enum_fits_in_address_space(cx, &fields[..], t); |
1a4d82fc | 425 | |
c34b1796 | 426 | General(ity, fields, dtor_to_init_u8(dtor)) |
1a4d82fc | 427 | } |
62682a34 | 428 | _ => cx.sess().bug(&format!("adt::represent_type called on non-ADT type: {}", t)) |
1a4d82fc JJ |
429 | } |
430 | } | |
431 | ||
432 | // this should probably all be in ty | |
433 | struct Case<'tcx> { | |
434 | discr: Disr, | |
435 | tys: Vec<Ty<'tcx>> | |
436 | } | |
437 | ||
438 | /// This represents the (GEP) indices to follow to get to the discriminant field | |
c34b1796 | 439 | pub type DiscrField = Vec<usize>; |
1a4d82fc JJ |
440 | |
441 | fn find_discr_field_candidate<'tcx>(tcx: &ty::ctxt<'tcx>, | |
442 | ty: Ty<'tcx>, | |
443 | mut path: DiscrField) -> Option<DiscrField> { | |
444 | match ty.sty { | |
445 | // Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait | |
c1a9b12d | 446 | ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => { |
1a4d82fc JJ |
447 | path.push(FAT_PTR_ADDR); |
448 | Some(path) | |
449 | }, | |
450 | ||
451 | // Regular thin pointer: &T/&mut T/Box<T> | |
62682a34 | 452 | ty::TyRef(..) | ty::TyBox(..) => Some(path), |
1a4d82fc JJ |
453 | |
454 | // Functions are just pointers | |
62682a34 | 455 | ty::TyBareFn(..) => Some(path), |
1a4d82fc JJ |
456 | |
457 | // Is this the NonZero lang item wrapping a pointer or integer type? | |
e9174d1e SL |
458 | ty::TyStruct(def, substs) if Some(def.did) == tcx.lang_items.non_zero() => { |
459 | let nonzero_fields = &def.struct_variant().fields; | |
1a4d82fc | 460 | assert_eq!(nonzero_fields.len(), 1); |
e9174d1e SL |
461 | let field_ty = monomorphize::field_ty(tcx, substs, &nonzero_fields[0]); |
462 | match field_ty.sty { | |
c1a9b12d | 463 | ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => { |
92a42be0 | 464 | path.extend_from_slice(&[0, FAT_PTR_ADDR]); |
d9579d0f AL |
465 | Some(path) |
466 | }, | |
62682a34 | 467 | ty::TyRawPtr(..) | ty::TyInt(..) | ty::TyUint(..) => { |
1a4d82fc JJ |
468 | path.push(0); |
469 | Some(path) | |
470 | }, | |
471 | _ => None | |
472 | } | |
473 | }, | |
474 | ||
475 | // Perhaps one of the fields of this struct is non-zero | |
476 | // let's recurse and find out | |
e9174d1e SL |
477 | ty::TyStruct(def, substs) => { |
478 | for (j, field) in def.struct_variant().fields.iter().enumerate() { | |
479 | let field_ty = monomorphize::field_ty(tcx, substs, field); | |
1a4d82fc JJ |
480 | if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) { |
481 | fpath.push(j); | |
482 | return Some(fpath); | |
483 | } | |
484 | } | |
485 | None | |
486 | }, | |
487 | ||
d9579d0f AL |
488 | // Perhaps one of the upvars of this struct is non-zero |
489 | // Let's recurse and find out! | |
c1a9b12d SL |
490 | ty::TyClosure(_, ref substs) => { |
491 | for (j, &ty) in substs.upvar_tys.iter().enumerate() { | |
d9579d0f AL |
492 | if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { |
493 | fpath.push(j); | |
494 | return Some(fpath); | |
495 | } | |
496 | } | |
497 | None | |
498 | }, | |
499 | ||
1a4d82fc | 500 | // Can we use one of the fields in this tuple? |
62682a34 | 501 | ty::TyTuple(ref tys) => { |
1a4d82fc JJ |
502 | for (j, &ty) in tys.iter().enumerate() { |
503 | if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { | |
504 | fpath.push(j); | |
505 | return Some(fpath); | |
506 | } | |
507 | } | |
508 | None | |
509 | }, | |
510 | ||
511 | // Is this a fixed-size array of something non-zero | |
512 | // with at least one element? | |
62682a34 | 513 | ty::TyArray(ety, d) if d > 0 => { |
1a4d82fc JJ |
514 | if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) { |
515 | vpath.push(0); | |
516 | Some(vpath) | |
517 | } else { | |
518 | None | |
519 | } | |
520 | }, | |
521 | ||
522 | // Anything else is not a pointer | |
523 | _ => None | |
524 | } | |
525 | } | |
526 | ||
527 | impl<'tcx> Case<'tcx> { | |
528 | fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool { | |
c34b1796 | 529 | mk_struct(cx, &self.tys, false, scapegoat).size == 0 |
1a4d82fc JJ |
530 | } |
531 | ||
532 | fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<DiscrField> { | |
533 | for (i, &ty) in self.tys.iter().enumerate() { | |
534 | if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) { | |
535 | path.push(i); | |
536 | return Some(path); | |
537 | } | |
538 | } | |
539 | None | |
540 | } | |
541 | } | |
542 | ||
543 | fn get_cases<'tcx>(tcx: &ty::ctxt<'tcx>, | |
e9174d1e | 544 | adt: ty::AdtDef<'tcx>, |
1a4d82fc JJ |
545 | substs: &subst::Substs<'tcx>) |
546 | -> Vec<Case<'tcx>> { | |
e9174d1e SL |
547 | adt.variants.iter().map(|vi| { |
548 | let field_tys = vi.fields.iter().map(|field| { | |
549 | monomorphize::field_ty(tcx, substs, field) | |
1a4d82fc | 550 | }).collect(); |
9cc50fc6 | 551 | Case { discr: Disr::from(vi.disr_val), tys: field_tys } |
1a4d82fc JJ |
552 | }).collect() |
553 | } | |
554 | ||
555 | fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
556 | tys: &[Ty<'tcx>], packed: bool, | |
557 | scapegoat: Ty<'tcx>) | |
558 | -> Struct<'tcx> { | |
559 | let sized = tys.iter().all(|&ty| type_is_sized(cx.tcx(), ty)); | |
560 | let lltys : Vec<Type> = if sized { | |
d9579d0f | 561 | tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect() |
1a4d82fc JJ |
562 | } else { |
563 | tys.iter().filter(|&ty| type_is_sized(cx.tcx(), *ty)) | |
564 | .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() | |
565 | }; | |
566 | ||
85aaf69f | 567 | ensure_struct_fits_in_address_space(cx, &lltys[..], packed, scapegoat); |
1a4d82fc | 568 | |
85aaf69f | 569 | let llty_rec = Type::struct_(cx, &lltys[..], packed); |
1a4d82fc JJ |
570 | Struct { |
571 | size: machine::llsize_of_alloc(cx, llty_rec), | |
572 | align: machine::llalign_of_min(cx, llty_rec), | |
573 | sized: sized, | |
574 | packed: packed, | |
575 | fields: tys.to_vec(), | |
576 | } | |
577 | } | |
578 | ||
85aaf69f | 579 | #[derive(Debug)] |
1a4d82fc JJ |
580 | struct IntBounds { |
581 | slo: i64, | |
582 | shi: i64, | |
583 | ulo: u64, | |
584 | uhi: u64 | |
585 | } | |
586 | ||
587 | fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
588 | hint: Hint, bounds: &IntBounds) | |
589 | -> Repr<'tcx> { | |
590 | let it = range_to_inttype(cx, hint, bounds); | |
591 | match it { | |
9cc50fc6 SL |
592 | attr::SignedInt(_) => CEnum(it, Disr(bounds.slo as u64), Disr(bounds.shi as u64)), |
593 | attr::UnsignedInt(_) => CEnum(it, Disr(bounds.ulo), Disr(bounds.uhi)) | |
1a4d82fc JJ |
594 | } |
595 | } | |
596 | ||
597 | fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType { | |
598 | debug!("range_to_inttype: {:?} {:?}", hint, bounds); | |
599 | // Lists of sizes to try. u64 is always allowed as a fallback. | |
600 | #[allow(non_upper_case_globals)] | |
c34b1796 | 601 | const choose_shortest: &'static [IntType] = &[ |
7453a54e SL |
602 | attr::UnsignedInt(ast::UintTy::U8), attr::SignedInt(ast::IntTy::I8), |
603 | attr::UnsignedInt(ast::UintTy::U16), attr::SignedInt(ast::IntTy::I16), | |
604 | attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)]; | |
1a4d82fc | 605 | #[allow(non_upper_case_globals)] |
c34b1796 | 606 | const at_least_32: &'static [IntType] = &[ |
7453a54e | 607 | attr::UnsignedInt(ast::UintTy::U32), attr::SignedInt(ast::IntTy::I32)]; |
1a4d82fc JJ |
608 | |
609 | let attempts; | |
610 | match hint { | |
611 | attr::ReprInt(span, ity) => { | |
612 | if !bounds_usable(cx, ity, bounds) { | |
613 | cx.sess().span_bug(span, "representation hint insufficient for discriminant range") | |
614 | } | |
615 | return ity; | |
616 | } | |
617 | attr::ReprExtern => { | |
c34b1796 | 618 | attempts = match &cx.sess().target.target.arch[..] { |
1a4d82fc JJ |
619 | // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32` |
620 | // appears to be used on Linux and NetBSD, but some systems may use the variant | |
621 | // corresponding to `choose_shortest`. However, we don't run on those yet...? | |
622 | "arm" => at_least_32, | |
623 | _ => at_least_32, | |
624 | } | |
625 | } | |
626 | attr::ReprAny => { | |
627 | attempts = choose_shortest; | |
628 | }, | |
629 | attr::ReprPacked => { | |
630 | cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum"); | |
631 | } | |
e9174d1e SL |
632 | attr::ReprSimd => { |
633 | cx.tcx().sess.bug("range_to_inttype: found ReprSimd on an enum"); | |
634 | } | |
1a4d82fc | 635 | } |
85aaf69f | 636 | for &ity in attempts { |
1a4d82fc JJ |
637 | if bounds_usable(cx, ity, bounds) { |
638 | return ity; | |
639 | } | |
640 | } | |
7453a54e | 641 | return attr::UnsignedInt(ast::UintTy::U64); |
1a4d82fc JJ |
642 | } |
643 | ||
644 | pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type { | |
645 | match ity { | |
646 | attr::SignedInt(t) => Type::int_from_ty(cx, t), | |
647 | attr::UnsignedInt(t) => Type::uint_from_ty(cx, t) | |
648 | } | |
649 | } | |
650 | ||
651 | fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool { | |
652 | debug!("bounds_usable: {:?} {:?}", ity, bounds); | |
653 | match ity { | |
654 | attr::SignedInt(_) => { | |
655 | let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true); | |
656 | let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true); | |
657 | bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64 | |
658 | } | |
659 | attr::UnsignedInt(_) => { | |
660 | let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false); | |
661 | let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false); | |
662 | bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64 | |
663 | } | |
664 | } | |
665 | } | |
666 | ||
667 | pub fn ty_of_inttype<'tcx>(tcx: &ty::ctxt<'tcx>, ity: IntType) -> Ty<'tcx> { | |
668 | match ity { | |
c1a9b12d SL |
669 | attr::SignedInt(t) => tcx.mk_mach_int(t), |
670 | attr::UnsignedInt(t) => tcx.mk_mach_uint(t) | |
1a4d82fc JJ |
671 | } |
672 | } | |
673 | ||
674 | // LLVM doesn't like types that don't fit in the address space | |
675 | fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, | |
676 | fields: &[Type], | |
677 | packed: bool, | |
678 | scapegoat: Ty<'tcx>) { | |
679 | let mut offset = 0; | |
85aaf69f | 680 | for &llty in fields { |
1a4d82fc JJ |
681 | // Invariant: offset < ccx.obj_size_bound() <= 1<<61 |
682 | if !packed { | |
683 | let type_align = machine::llalign_of_min(ccx, llty); | |
684 | offset = roundup(offset, type_align); | |
685 | } | |
686 | // type_align is a power-of-2, so still offset < ccx.obj_size_bound() | |
687 | // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound() | |
688 | // so the sum is less than 1<<62 (and therefore can't overflow). | |
689 | offset += machine::llsize_of_alloc(ccx, llty); | |
690 | ||
691 | if offset >= ccx.obj_size_bound() { | |
692 | ccx.report_overbig_object(scapegoat); | |
693 | } | |
694 | } | |
695 | } | |
696 | ||
697 | fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) { | |
698 | let size = sts.iter().map(|st| st.size).max().unwrap(); | |
85aaf69f SL |
699 | let align = sts.iter().map(|st| st.align).max().unwrap(); |
700 | (roundup(size, align), align) | |
1a4d82fc JJ |
701 | } |
702 | ||
703 | fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, | |
1a4d82fc JJ |
704 | fields: &[Struct], |
705 | scapegoat: Ty<'tcx>) { | |
85aaf69f | 706 | let (total_size, _) = union_size_and_align(fields); |
1a4d82fc JJ |
707 | |
708 | if total_size >= ccx.obj_size_bound() { | |
709 | ccx.report_overbig_object(scapegoat); | |
710 | } | |
711 | } | |
712 | ||
713 | ||
714 | /// LLVM-level types are a little complicated. | |
715 | /// | |
716 | /// C-like enums need to be actual ints, not wrapped in a struct, | |
717 | /// because that changes the ABI on some platforms (see issue #10308). | |
718 | /// | |
719 | /// For nominal types, in some cases, we need to use LLVM named structs | |
720 | /// and fill in the actual contents in a second pass to prevent | |
721 | /// unbounded recursion; see also the comments in `trans::type_of`. | |
722 | pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { | |
e9174d1e SL |
723 | let c = generic_type_of(cx, r, None, false, false, false); |
724 | assert!(!c.needs_drop_flag); | |
725 | c.prefix | |
1a4d82fc | 726 | } |
e9174d1e SL |
727 | |
728 | ||
1a4d82fc JJ |
729 | // Pass dst=true if the type you are passing is a DST. Yes, we could figure |
730 | // this out, but if you call this on an unsized type without realising it, you | |
731 | // are going to get the wrong type (it will not include the unsized parts of it). | |
732 | pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
733 | r: &Repr<'tcx>, dst: bool) -> Type { | |
e9174d1e SL |
734 | let c = generic_type_of(cx, r, None, true, dst, false); |
735 | assert!(!c.needs_drop_flag); | |
736 | c.prefix | |
737 | } | |
738 | pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
739 | r: &Repr<'tcx>, dst: bool) -> TypeContext { | |
740 | generic_type_of(cx, r, None, true, dst, true) | |
1a4d82fc JJ |
741 | } |
742 | pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
743 | r: &Repr<'tcx>, name: &str) -> Type { | |
e9174d1e SL |
744 | let c = generic_type_of(cx, r, Some(name), false, false, false); |
745 | assert!(!c.needs_drop_flag); | |
746 | c.prefix | |
1a4d82fc JJ |
747 | } |
748 | pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
749 | r: &Repr<'tcx>, llty: &mut Type) { | |
750 | match *r { | |
751 | CEnum(..) | General(..) | RawNullablePointer { .. } => { } | |
752 | Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => | |
c34b1796 | 753 | llty.set_struct_body(&struct_llfields(cx, st, false, false), |
1a4d82fc JJ |
754 | st.packed) |
755 | } | |
756 | } | |
757 | ||
758 | fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, | |
759 | r: &Repr<'tcx>, | |
760 | name: Option<&str>, | |
761 | sizing: bool, | |
e9174d1e SL |
762 | dst: bool, |
763 | delay_drop_flag: bool) -> TypeContext { | |
764 | debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}", | |
765 | r, name, sizing, dst, delay_drop_flag); | |
1a4d82fc | 766 | match *r { |
e9174d1e SL |
767 | CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)), |
768 | RawNullablePointer { nnty, .. } => | |
769 | TypeContext::direct(type_of::sizing_type_of(cx, nnty)), | |
770 | StructWrappedNullablePointer { nonnull: ref st, .. } => { | |
1a4d82fc JJ |
771 | match name { |
772 | None => { | |
e9174d1e SL |
773 | TypeContext::direct( |
774 | Type::struct_(cx, &struct_llfields(cx, st, sizing, dst), | |
775 | st.packed)) | |
776 | } | |
777 | Some(name) => { | |
778 | assert_eq!(sizing, false); | |
779 | TypeContext::direct(Type::named_struct(cx, name)) | |
1a4d82fc | 780 | } |
1a4d82fc JJ |
781 | } |
782 | } | |
e9174d1e SL |
783 | Univariant(ref st, dtor_needed) => { |
784 | let dtor_needed = dtor_needed != 0; | |
785 | match name { | |
786 | None => { | |
787 | let mut fields = struct_llfields(cx, st, sizing, dst); | |
788 | if delay_drop_flag && dtor_needed { | |
789 | fields.pop(); | |
790 | } | |
791 | TypeContext::may_need_drop_flag( | |
792 | Type::struct_(cx, &fields, | |
793 | st.packed), | |
794 | delay_drop_flag && dtor_needed) | |
795 | } | |
796 | Some(name) => { | |
797 | // Hypothesis: named_struct's can never need a | |
798 | // drop flag. (... needs validation.) | |
799 | assert_eq!(sizing, false); | |
800 | TypeContext::direct(Type::named_struct(cx, name)) | |
801 | } | |
802 | } | |
803 | } | |
804 | General(ity, ref sts, dtor_needed) => { | |
805 | let dtor_needed = dtor_needed != 0; | |
1a4d82fc JJ |
806 | // We need a representation that has: |
807 | // * The alignment of the most-aligned field | |
808 | // * The size of the largest variant (rounded up to that alignment) | |
809 | // * No alignment padding anywhere any variant has actual data | |
810 | // (currently matters only for enums small enough to be immediate) | |
811 | // * The discriminant in an obvious place. | |
812 | // | |
813 | // So we start with the discriminant, pad it up to the alignment with | |
814 | // more of its own type, then use alignment-sized ints to get the rest | |
815 | // of the size. | |
816 | // | |
817 | // FIXME #10604: this breaks when vector types are present. | |
85aaf69f | 818 | let (size, align) = union_size_and_align(&sts[..]); |
1a4d82fc | 819 | let align_s = align as u64; |
85aaf69f SL |
820 | assert_eq!(size % align_s, 0); |
821 | let align_units = size / align_s - 1; | |
822 | ||
1a4d82fc JJ |
823 | let discr_ty = ll_inttype(cx, ity); |
824 | let discr_size = machine::llsize_of_alloc(cx, discr_ty); | |
1a4d82fc JJ |
825 | let fill_ty = match align_s { |
826 | 1 => Type::array(&Type::i8(cx), align_units), | |
827 | 2 => Type::array(&Type::i16(cx), align_units), | |
828 | 4 => Type::array(&Type::i32(cx), align_units), | |
829 | 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => | |
830 | Type::array(&Type::i64(cx), align_units), | |
831 | a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4), | |
832 | align_units), | |
833 | _ => panic!("unsupported enum alignment: {}", align) | |
834 | }; | |
835 | assert_eq!(machine::llalign_of_min(cx, fill_ty), align); | |
836 | assert_eq!(align_s % discr_size, 0); | |
e9174d1e SL |
837 | let mut fields: Vec<Type> = |
838 | [discr_ty, | |
839 | Type::array(&discr_ty, align_s / discr_size - 1), | |
840 | fill_ty].iter().cloned().collect(); | |
841 | if delay_drop_flag && dtor_needed { | |
842 | fields.pop(); | |
843 | } | |
1a4d82fc | 844 | match name { |
e9174d1e SL |
845 | None => { |
846 | TypeContext::may_need_drop_flag( | |
847 | Type::struct_(cx, &fields[..], false), | |
848 | delay_drop_flag && dtor_needed) | |
849 | } | |
1a4d82fc JJ |
850 | Some(name) => { |
851 | let mut llty = Type::named_struct(cx, name); | |
85aaf69f | 852 | llty.set_struct_body(&fields[..], false); |
e9174d1e SL |
853 | TypeContext::may_need_drop_flag( |
854 | llty, | |
855 | delay_drop_flag && dtor_needed) | |
1a4d82fc JJ |
856 | } |
857 | } | |
858 | } | |
859 | } | |
860 | } | |
861 | ||
862 | fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>, | |
863 | sizing: bool, dst: bool) -> Vec<Type> { | |
864 | if sizing { | |
865 | st.fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty)) | |
866 | .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() | |
867 | } else { | |
c34b1796 | 868 | st.fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect() |
1a4d82fc JJ |
869 | } |
870 | } | |
871 | ||
872 | /// Obtain a representation of the discriminant sufficient to translate | |
873 | /// destructuring; this may or may not involve the actual discriminant. | |
874 | /// | |
875 | /// This should ideally be less tightly tied to `_match`. | |
876 | pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, | |
7453a54e SL |
877 | r: &Repr<'tcx>, |
878 | scrutinee: ValueRef, | |
879 | range_assert: bool) | |
1a4d82fc JJ |
880 | -> (_match::BranchKind, Option<ValueRef>) { |
881 | match *r { | |
882 | CEnum(..) | General(..) | | |
883 | RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { | |
7453a54e SL |
884 | (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, |
885 | range_assert))) | |
1a4d82fc JJ |
886 | } |
887 | Univariant(..) => { | |
d9579d0f | 888 | // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). |
1a4d82fc JJ |
889 | (_match::Single, None) |
890 | } | |
891 | } | |
892 | } | |
893 | ||
62682a34 SL |
894 | pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool { |
895 | match *r { | |
896 | CEnum(ity, _, _) => ity.is_signed(), | |
897 | General(ity, _, _) => ity.is_signed(), | |
898 | Univariant(..) => false, | |
899 | RawNullablePointer { .. } => false, | |
900 | StructWrappedNullablePointer { .. } => false, | |
901 | } | |
902 | } | |
1a4d82fc JJ |
903 | |
904 | /// Obtain the actual discriminant of a value. | |
905 | pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, | |
7453a54e SL |
906 | scrutinee: ValueRef, cast_to: Option<Type>, |
907 | range_assert: bool) | |
1a4d82fc | 908 | -> ValueRef { |
1a4d82fc | 909 | debug!("trans_get_discr r: {:?}", r); |
62682a34 | 910 | let val = match *r { |
7453a54e SL |
911 | CEnum(ity, min, max) => { |
912 | load_discr(bcx, ity, scrutinee, min, max, range_assert) | |
913 | } | |
1a4d82fc | 914 | General(ity, ref cases, _) => { |
e9174d1e | 915 | let ptr = StructGEP(bcx, scrutinee, 0); |
7453a54e SL |
916 | load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1), |
917 | range_assert) | |
1a4d82fc | 918 | } |
62682a34 | 919 | Univariant(..) => C_u8(bcx.ccx(), 0), |
1a4d82fc | 920 | RawNullablePointer { nndiscr, nnty, .. } => { |
9cc50fc6 | 921 | let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; |
1a4d82fc | 922 | let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); |
62682a34 | 923 | ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) |
1a4d82fc JJ |
924 | } |
925 | StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { | |
62682a34 | 926 | struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) |
1a4d82fc | 927 | } |
62682a34 | 928 | }; |
1a4d82fc JJ |
929 | match cast_to { |
930 | None => val, | |
62682a34 | 931 | Some(llty) => if is_discr_signed(r) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } |
1a4d82fc JJ |
932 | } |
933 | } | |
934 | ||
935 | fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField, | |
936 | scrutinee: ValueRef) -> ValueRef { | |
85aaf69f | 937 | let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]); |
1a4d82fc | 938 | let llptr = Load(bcx, llptrptr); |
9cc50fc6 | 939 | let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; |
85aaf69f | 940 | ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None) |
1a4d82fc JJ |
941 | } |
942 | ||
943 | /// Helper for cases where the discriminant is simply loaded. | |
7453a54e SL |
944 | fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr, |
945 | range_assert: bool) | |
1a4d82fc JJ |
946 | -> ValueRef { |
947 | let llty = ll_inttype(bcx.ccx(), ity); | |
948 | assert_eq!(val_ty(ptr), llty.ptr_to()); | |
949 | let bits = machine::llbitsize_of_real(bcx.ccx(), llty); | |
950 | assert!(bits <= 64); | |
9cc50fc6 SL |
951 | let bits = bits as usize; |
952 | let mask = Disr(!0u64 >> (64 - bits)); | |
c34b1796 AL |
953 | // For a (max) discr of -1, max will be `-1 as usize`, which overflows. |
954 | // However, that is fine here (it would still represent the full range), | |
7453a54e | 955 | if max.wrapping_add(Disr(1)) & mask == min & mask || !range_assert { |
1a4d82fc JJ |
956 | // i.e., if the range is everything. The lo==hi case would be |
957 | // rejected by the LLVM verifier (it would mean either an | |
958 | // empty set, which is impossible, or the entire range of the | |
959 | // type, which is pointless). | |
960 | Load(bcx, ptr) | |
961 | } else { | |
962 | // llvm::ConstantRange can deal with ranges that wrap around, | |
963 | // so an overflow on (max + 1) is fine. | |
9cc50fc6 | 964 | LoadRangeAssert(bcx, ptr, min.0, max.0.wrapping_add(1), /* signed: */ True) |
1a4d82fc JJ |
965 | } |
966 | } | |
967 | ||
968 | /// Yield information about how to dispatch a case of the | |
969 | /// discriminant-like value returned by `trans_switch`. | |
970 | /// | |
971 | /// This should ideally be less tightly tied to `_match`. | |
972 | pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) | |
92a42be0 | 973 | -> ValueRef { |
1a4d82fc JJ |
974 | match *r { |
975 | CEnum(ity, _, _) => { | |
9cc50fc6 | 976 | C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) |
1a4d82fc JJ |
977 | } |
978 | General(ity, _, _) => { | |
9cc50fc6 | 979 | C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) |
1a4d82fc JJ |
980 | } |
981 | Univariant(..) => { | |
982 | bcx.ccx().sess().bug("no cases for univariants or structs") | |
983 | } | |
984 | RawNullablePointer { .. } | | |
985 | StructWrappedNullablePointer { .. } => { | |
9cc50fc6 SL |
986 | assert!(discr == Disr(0) || discr == Disr(1)); |
987 | C_bool(bcx.ccx(), discr != Disr(0)) | |
1a4d82fc JJ |
988 | } |
989 | } | |
990 | } | |
991 | ||
992 | /// Set the discriminant for a new value of the given case of the given | |
993 | /// representation. | |
994 | pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, | |
995 | val: ValueRef, discr: Disr) { | |
996 | match *r { | |
997 | CEnum(ity, min, max) => { | |
998 | assert_discr_in_range(ity, min, max, discr); | |
9cc50fc6 | 999 | Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), |
d9579d0f | 1000 | val); |
1a4d82fc JJ |
1001 | } |
1002 | General(ity, ref cases, dtor) => { | |
c34b1796 | 1003 | if dtor_active(dtor) { |
92a42be0 | 1004 | let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr, |
9cc50fc6 | 1005 | cases[discr.0 as usize].fields.len() - 2); |
e9174d1e | 1006 | Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr); |
1a4d82fc | 1007 | } |
9cc50fc6 | 1008 | Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), |
e9174d1e | 1009 | StructGEP(bcx, val, 0)); |
1a4d82fc JJ |
1010 | } |
1011 | Univariant(ref st, dtor) => { | |
9cc50fc6 | 1012 | assert_eq!(discr, Disr(0)); |
c34b1796 | 1013 | if dtor_active(dtor) { |
e9174d1e SL |
1014 | Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), |
1015 | StructGEP(bcx, val, st.fields.len() - 1)); | |
1a4d82fc JJ |
1016 | } |
1017 | } | |
1018 | RawNullablePointer { nndiscr, nnty, ..} => { | |
1019 | if discr != nndiscr { | |
1020 | let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); | |
d9579d0f | 1021 | Store(bcx, C_null(llptrty), val); |
1a4d82fc JJ |
1022 | } |
1023 | } | |
1024 | StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { | |
1025 | if discr != nndiscr { | |
85aaf69f | 1026 | let llptrptr = GEPi(bcx, val, &discrfield[..]); |
1a4d82fc | 1027 | let llptrty = val_ty(llptrptr).element_type(); |
d9579d0f | 1028 | Store(bcx, C_null(llptrty), llptrptr); |
1a4d82fc JJ |
1029 | } |
1030 | } | |
1031 | } | |
1032 | } | |
1033 | ||
1034 | fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) { | |
1035 | match ity { | |
9cc50fc6 SL |
1036 | attr::UnsignedInt(_) => { |
1037 | assert!(min <= discr); | |
1038 | assert!(discr <= max) | |
1039 | }, | |
1040 | attr::SignedInt(_) => { | |
1041 | assert!(min.0 as i64 <= discr.0 as i64); | |
1042 | assert!(discr.0 as i64 <= max.0 as i64); | |
1043 | }, | |
1a4d82fc JJ |
1044 | } |
1045 | } | |
1046 | ||
1047 | /// The number of fields in a given case; for use when obtaining this | |
1048 | /// information from the type or definition is less convenient. | |
c34b1796 | 1049 | pub fn num_args(r: &Repr, discr: Disr) -> usize { |
1a4d82fc JJ |
1050 | match *r { |
1051 | CEnum(..) => 0, | |
1052 | Univariant(ref st, dtor) => { | |
9cc50fc6 | 1053 | assert_eq!(discr, Disr(0)); |
c34b1796 | 1054 | st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 }) |
1a4d82fc JJ |
1055 | } |
1056 | General(_, ref cases, dtor) => { | |
9cc50fc6 | 1057 | cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 }) |
1a4d82fc JJ |
1058 | } |
1059 | RawNullablePointer { nndiscr, ref nullfields, .. } => { | |
1060 | if discr == nndiscr { 1 } else { nullfields.len() } | |
1061 | } | |
1062 | StructWrappedNullablePointer { ref nonnull, nndiscr, | |
1063 | ref nullfields, .. } => { | |
1064 | if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() } | |
1065 | } | |
1066 | } | |
1067 | } | |
1068 | ||
1069 | /// Access a field, at a point when the value's case is known. | |
1070 | pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, | |
92a42be0 | 1071 | val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { |
1a4d82fc JJ |
1072 | // Note: if this ever needs to generate conditionals (e.g., if we |
1073 | // decide to do some kind of cdr-coding-like non-unique repr | |
1074 | // someday), it will need to return a possibly-new bcx as well. | |
1075 | match *r { | |
1076 | CEnum(..) => { | |
1077 | bcx.ccx().sess().bug("element access in C-like enum") | |
1078 | } | |
1079 | Univariant(ref st, _dtor) => { | |
9cc50fc6 | 1080 | assert_eq!(discr, Disr(0)); |
1a4d82fc JJ |
1081 | struct_field_ptr(bcx, st, val, ix, false) |
1082 | } | |
1083 | General(_, ref cases, _) => { | |
9cc50fc6 | 1084 | struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true) |
1a4d82fc JJ |
1085 | } |
1086 | RawNullablePointer { nndiscr, ref nullfields, .. } | | |
1087 | StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => { | |
1088 | // The unit-like case might have a nonzero number of unit-like fields. | |
1089 | // (e.d., Result of Either with (), as one side.) | |
1090 | let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); | |
1091 | assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); | |
1092 | // The contents of memory at this pointer can't matter, but use | |
1093 | // the value that's "reasonable" in case of pointer comparison. | |
92a42be0 | 1094 | PointerCast(bcx, val.value, ty.ptr_to()) |
1a4d82fc JJ |
1095 | } |
1096 | RawNullablePointer { nndiscr, nnty, .. } => { | |
1097 | assert_eq!(ix, 0); | |
1098 | assert_eq!(discr, nndiscr); | |
1099 | let ty = type_of::type_of(bcx.ccx(), nnty); | |
92a42be0 | 1100 | PointerCast(bcx, val.value, ty.ptr_to()) |
1a4d82fc JJ |
1101 | } |
1102 | StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { | |
1103 | assert_eq!(discr, nndiscr); | |
1104 | struct_field_ptr(bcx, nonnull, val, ix, false) | |
1105 | } | |
1106 | } | |
1107 | } | |
1108 | ||
92a42be0 | 1109 | pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: MaybeSizedValue, |
c34b1796 | 1110 | ix: usize, needs_cast: bool) -> ValueRef { |
92a42be0 SL |
1111 | let ccx = bcx.ccx(); |
1112 | let ptr_val = if needs_cast { | |
1113 | let fields = st.fields.iter().map(|&ty| { | |
1114 | type_of::in_memory_type_of(ccx, ty) | |
1115 | }).collect::<Vec<_>>(); | |
85aaf69f | 1116 | let real_ty = Type::struct_(ccx, &fields[..], st.packed); |
92a42be0 | 1117 | PointerCast(bcx, val.value, real_ty.ptr_to()) |
1a4d82fc | 1118 | } else { |
92a42be0 | 1119 | val.value |
1a4d82fc JJ |
1120 | }; |
1121 | ||
92a42be0 SL |
1122 | let fty = st.fields[ix]; |
1123 | // Simple case - we can just GEP the field | |
1124 | // * First field - Always aligned properly | |
1125 | // * Packed struct - There is no alignment padding | |
1126 | // * Field is sized - pointer is properly aligned already | |
1127 | if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) { | |
1128 | return StructGEP(bcx, ptr_val, ix); | |
1129 | } | |
1130 | ||
1131 | // If the type of the last field is [T] or str, then we don't need to do | |
1132 | // any adjusments | |
1133 | match fty.sty { | |
1134 | ty::TySlice(..) | ty::TyStr => { | |
1135 | return StructGEP(bcx, ptr_val, ix); | |
1136 | } | |
1137 | _ => () | |
1138 | } | |
1139 | ||
1140 | // There's no metadata available, log the case and just do the GEP. | |
1141 | if !val.has_meta() { | |
1142 | debug!("Unsized field `{}`, of `{}` has no metadata for adjustment", | |
1143 | ix, | |
1144 | bcx.val_to_string(ptr_val)); | |
1145 | return StructGEP(bcx, ptr_val, ix); | |
1146 | } | |
1147 | ||
1148 | let dbloc = DebugLoc::None; | |
1149 | ||
1150 | // We need to get the pointer manually now. | |
1151 | // We do this by casting to a *i8, then offsetting it by the appropriate amount. | |
1152 | // We do this instead of, say, simply adjusting the pointer from the result of a GEP | |
1153 | // because the field may have an arbitrary alignment in the LLVM representation | |
1154 | // anyway. | |
1155 | // | |
1156 | // To demonstrate: | |
1157 | // struct Foo<T: ?Sized> { | |
1158 | // x: u16, | |
1159 | // y: T | |
1160 | // } | |
1161 | // | |
1162 | // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that | |
1163 | // the `y` field has 16-bit alignment. | |
1164 | ||
1165 | let meta = val.meta; | |
1166 | ||
9cc50fc6 | 1167 | // Calculate the unaligned offset of the unsized field. |
92a42be0 SL |
1168 | let mut offset = 0; |
1169 | for &ty in &st.fields[0..ix] { | |
1170 | let llty = type_of::sizing_type_of(ccx, ty); | |
1171 | let type_align = type_of::align_of(ccx, ty); | |
1172 | offset = roundup(offset, type_align); | |
1173 | offset += machine::llsize_of_alloc(ccx, llty); | |
1174 | } | |
1175 | let unaligned_offset = C_uint(bcx.ccx(), offset); | |
1176 | ||
1177 | // Get the alignment of the field | |
1178 | let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); | |
1179 | ||
1180 | // Bump the unaligned offset up to the appropriate alignment using the | |
1181 | // following expression: | |
1182 | // | |
1183 | // (unaligned offset + (align - 1)) & -align | |
1184 | ||
1185 | // Calculate offset | |
1186 | let align_sub_1 = Sub(bcx, align, C_uint(bcx.ccx(), 1u64), dbloc); | |
1187 | let offset = And(bcx, | |
1188 | Add(bcx, unaligned_offset, align_sub_1, dbloc), | |
1189 | Neg(bcx, align, dbloc), | |
1190 | dbloc); | |
1191 | ||
1192 | debug!("struct_field_ptr: DST field offset: {}", | |
1193 | bcx.val_to_string(offset)); | |
1194 | ||
1195 | // Cast and adjust pointer | |
1196 | let byte_ptr = PointerCast(bcx, ptr_val, Type::i8p(bcx.ccx())); | |
1197 | let byte_ptr = GEP(bcx, byte_ptr, &[offset]); | |
1198 | ||
1199 | // Finally, cast back to the type expected | |
1200 | let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); | |
1201 | debug!("struct_field_ptr: Field type is {}", ll_fty.to_string()); | |
1202 | PointerCast(bcx, byte_ptr, ll_fty.ptr_to()) | |
1a4d82fc JJ |
1203 | } |
1204 | ||
1205 | pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, | |
1206 | r: &Repr<'tcx>, | |
1207 | value: ValueRef, | |
1208 | mut f: F) | |
1209 | -> Block<'blk, 'tcx> where | |
1210 | F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>, | |
1211 | { | |
1212 | let fcx = bcx.fcx; | |
1213 | match *r { | |
1214 | Univariant(ref st, _) => { | |
1215 | f(bcx, st, value) | |
1216 | } | |
1217 | General(ity, ref cases, _) => { | |
1218 | let ccx = bcx.ccx(); | |
62682a34 SL |
1219 | |
1220 | // See the comments in trans/base.rs for more information (inside | |
1221 | // iter_structural_ty), but the gist here is that if the enum's | |
1222 | // discriminant is *not* in the range that we're expecting (in which | |
1223 | // case we'll take the fall-through branch on the switch | |
1224 | // instruction) then we can't just optimize this to an Unreachable | |
1225 | // block. | |
1226 | // | |
1227 | // Currently we still have filling drop, so this means that the drop | |
1228 | // glue for enums may be called when the enum has been paved over | |
1229 | // with the "I've been dropped" value. In this case the default | |
1230 | // branch of the switch instruction will actually be taken at | |
1231 | // runtime, so the basic block isn't actually unreachable, so we | |
1232 | // need to make it do something with defined behavior. In this case | |
1233 | // we just return early from the function. | |
7453a54e SL |
1234 | // |
1235 | // Note that this is also why the `trans_get_discr` below has | |
1236 | // `false` to indicate that loading the discriminant should | |
1237 | // not have a range assert. | |
62682a34 SL |
1238 | let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void"); |
1239 | RetVoid(ret_void_cx, DebugLoc::None); | |
1a4d82fc | 1240 | |
7453a54e | 1241 | let discr_val = trans_get_discr(bcx, r, value, None, false); |
62682a34 | 1242 | let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len()); |
1a4d82fc JJ |
1243 | let bcx_next = fcx.new_temp_block("enum-variant-iter-next"); |
1244 | ||
1245 | for (discr, case) in cases.iter().enumerate() { | |
1246 | let mut variant_cx = fcx.new_temp_block( | |
c34b1796 | 1247 | &format!("enum-variant-iter-{}", &discr.to_string()) |
1a4d82fc JJ |
1248 | ); |
1249 | let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true); | |
1250 | AddCase(llswitch, rhs_val, variant_cx.llbb); | |
1251 | ||
1252 | let fields = case.fields.iter().map(|&ty| | |
1253 | type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>(); | |
85aaf69f | 1254 | let real_ty = Type::struct_(ccx, &fields[..], case.packed); |
1a4d82fc JJ |
1255 | let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to()); |
1256 | ||
1257 | variant_cx = f(variant_cx, case, variant_value); | |
85aaf69f | 1258 | Br(variant_cx, bcx_next.llbb, DebugLoc::None); |
1a4d82fc JJ |
1259 | } |
1260 | ||
1261 | bcx_next | |
1262 | } | |
1263 | _ => unreachable!() | |
1264 | } | |
1265 | } | |
1266 | ||
1267 | /// Access the struct drop flag, if present. | |
d9579d0f AL |
1268 | pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, |
1269 | r: &Repr<'tcx>, | |
1270 | val: ValueRef) | |
1a4d82fc JJ |
1271 | -> datum::DatumBlock<'blk, 'tcx, datum::Expr> |
1272 | { | |
1273 | let tcx = bcx.tcx(); | |
c1a9b12d | 1274 | let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type()); |
1a4d82fc | 1275 | match *r { |
c34b1796 | 1276 | Univariant(ref st, dtor) if dtor_active(dtor) => { |
e9174d1e | 1277 | let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1); |
1a4d82fc JJ |
1278 | datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock() |
1279 | } | |
c34b1796 | 1280 | General(_, _, dtor) if dtor_active(dtor) => { |
1a4d82fc JJ |
1281 | let fcx = bcx.fcx; |
1282 | let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); | |
1283 | let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( | |
c34b1796 | 1284 | bcx, tcx.dtor_type(), "drop_flag", |
9cc50fc6 SL |
1285 | InitAlloca::Uninit("drop flag itself has no dtor"), |
1286 | cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| { | |
1287 | debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}", | |
1288 | tcx.dtor_type()); | |
1289 | bcx | |
1290 | } | |
1a4d82fc JJ |
1291 | )); |
1292 | bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { | |
92a42be0 SL |
1293 | let ptr = struct_field_ptr(variant_cx, st, MaybeSizedValue::sized(value), |
1294 | (st.fields.len() - 1), false); | |
c1a9b12d | 1295 | datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr")) |
1a4d82fc JJ |
1296 | .store_to(variant_cx, scratch.val) |
1297 | }); | |
1298 | let expr_datum = scratch.to_expr_datum(); | |
1299 | fcx.pop_custom_cleanup_scope(custom_cleanup_scope); | |
1300 | datum::DatumBlock::new(bcx, expr_datum) | |
1301 | } | |
1302 | _ => bcx.ccx().sess().bug("tried to get drop flag of non-droppable type") | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | /// Construct a constant value, suitable for initializing a | |
1307 | /// GlobalVariable, given a case and constant values for its fields. | |
1308 | /// Note that this may have a different LLVM type (and different | |
1309 | /// alignment!) from the representation's `type_of`, so it needs a | |
1310 | /// pointer cast before use. | |
1311 | /// | |
1312 | /// The LLVM type system does not directly support unions, and only | |
1313 | /// pointers can be bitcast, so a constant (and, by extension, the | |
1314 | /// GlobalVariable initialized by it) will have a type that can vary | |
1315 | /// depending on which case of an enum it is. | |
1316 | /// | |
1317 | /// To understand the alignment situation, consider `enum E { V64(u64), | |
1318 | /// V32(u32, u32) }` on Windows. The type has 8-byte alignment to | |
1319 | /// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, | |
1320 | /// i32, i32}`, which is 4-byte aligned. | |
1321 | /// | |
1322 | /// Currently the returned value has the same size as the type, but | |
1323 | /// this could be changed in the future to avoid allocating unnecessary | |
1324 | /// space after values of shorter-than-maximum cases. | |
1325 | pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr, | |
1326 | vals: &[ValueRef]) -> ValueRef { | |
1327 | match *r { | |
1328 | CEnum(ity, min, max) => { | |
1329 | assert_eq!(vals.len(), 0); | |
1330 | assert_discr_in_range(ity, min, max, discr); | |
9cc50fc6 | 1331 | C_integral(ll_inttype(ccx, ity), discr.0, true) |
1a4d82fc JJ |
1332 | } |
1333 | General(ity, ref cases, _) => { | |
9cc50fc6 | 1334 | let case = &cases[discr.0 as usize]; |
85aaf69f | 1335 | let (max_sz, _) = union_size_and_align(&cases[..]); |
9cc50fc6 | 1336 | let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true); |
1a4d82fc | 1337 | let mut f = vec![lldiscr]; |
92a42be0 | 1338 | f.extend_from_slice(vals); |
85aaf69f | 1339 | let mut contents = build_const_struct(ccx, case, &f[..]); |
92a42be0 | 1340 | contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]); |
85aaf69f | 1341 | C_struct(ccx, &contents[..], false) |
1a4d82fc JJ |
1342 | } |
1343 | Univariant(ref st, _dro) => { | |
9cc50fc6 | 1344 | assert_eq!(discr, Disr(0)); |
1a4d82fc | 1345 | let contents = build_const_struct(ccx, st, vals); |
85aaf69f | 1346 | C_struct(ccx, &contents[..], st.packed) |
1a4d82fc JJ |
1347 | } |
1348 | RawNullablePointer { nndiscr, nnty, .. } => { | |
1349 | if discr == nndiscr { | |
1350 | assert_eq!(vals.len(), 1); | |
1351 | vals[0] | |
1352 | } else { | |
1353 | C_null(type_of::sizing_type_of(ccx, nnty)) | |
1354 | } | |
1355 | } | |
1356 | StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { | |
1357 | if discr == nndiscr { | |
1358 | C_struct(ccx, &build_const_struct(ccx, | |
1359 | nonnull, | |
c34b1796 | 1360 | vals), |
1a4d82fc JJ |
1361 | false) |
1362 | } else { | |
1363 | let vals = nonnull.fields.iter().map(|&ty| { | |
1364 | // Always use null even if it's not the `discrfield`th | |
1365 | // field; see #8506. | |
1366 | C_null(type_of::sizing_type_of(ccx, ty)) | |
1367 | }).collect::<Vec<ValueRef>>(); | |
1368 | C_struct(ccx, &build_const_struct(ccx, | |
1369 | nonnull, | |
c34b1796 | 1370 | &vals[..]), |
1a4d82fc JJ |
1371 | false) |
1372 | } | |
1373 | } | |
1374 | } | |
1375 | } | |
1376 | ||
1377 | /// Compute struct field offsets relative to struct begin. | |
1378 | fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, | |
1379 | st: &Struct<'tcx>) -> Vec<u64> { | |
1380 | let mut offsets = vec!(); | |
1381 | ||
1382 | let mut offset = 0; | |
85aaf69f | 1383 | for &ty in &st.fields { |
1a4d82fc JJ |
1384 | let llty = type_of::sizing_type_of(ccx, ty); |
1385 | if !st.packed { | |
1386 | let type_align = type_of::align_of(ccx, ty); | |
1387 | offset = roundup(offset, type_align); | |
1388 | } | |
1389 | offsets.push(offset); | |
1390 | offset += machine::llsize_of_alloc(ccx, llty); | |
1391 | } | |
1392 | assert_eq!(st.fields.len(), offsets.len()); | |
1393 | offsets | |
1394 | } | |
1395 | ||
1396 | /// Building structs is a little complicated, because we might need to | |
1397 | /// insert padding if a field's value is less aligned than its type. | |
1398 | /// | |
1399 | /// Continuing the example from `trans_const`, a value of type `(u32, | |
1400 | /// E)` should have the `E` at offset 8, but if that field's | |
1401 | /// initializer is 4-byte aligned then simply translating the tuple as | |
1402 | /// a two-element struct will locate it at offset 4, and accesses to it | |
1403 | /// will read the wrong memory. | |
1404 | fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, | |
1405 | st: &Struct<'tcx>, vals: &[ValueRef]) | |
1406 | -> Vec<ValueRef> { | |
1407 | assert_eq!(vals.len(), st.fields.len()); | |
1408 | ||
1409 | let target_offsets = compute_struct_field_offsets(ccx, st); | |
1410 | ||
1411 | // offset of current value | |
1412 | let mut offset = 0; | |
1413 | let mut cfields = Vec::new(); | |
62682a34 | 1414 | for (&val, target_offset) in vals.iter().zip(target_offsets) { |
1a4d82fc JJ |
1415 | if !st.packed { |
1416 | let val_align = machine::llalign_of_min(ccx, val_ty(val)); | |
1417 | offset = roundup(offset, val_align); | |
1418 | } | |
1419 | if offset != target_offset { | |
1420 | cfields.push(padding(ccx, target_offset - offset)); | |
1421 | offset = target_offset; | |
1422 | } | |
1423 | assert!(!is_undef(val)); | |
1424 | cfields.push(val); | |
1425 | offset += machine::llsize_of_alloc(ccx, val_ty(val)); | |
1426 | } | |
1427 | ||
1428 | assert!(st.sized && offset <= st.size); | |
1429 | if offset != st.size { | |
1430 | cfields.push(padding(ccx, st.size - offset)); | |
1431 | } | |
1432 | ||
1433 | cfields | |
1434 | } | |
1435 | ||
1436 | fn padding(ccx: &CrateContext, size: u64) -> ValueRef { | |
1437 | C_undef(Type::array(&Type::i8(ccx), size)) | |
1438 | } | |
1439 | ||
1440 | // FIXME this utility routine should be somewhere more general | |
1441 | #[inline] | |
1442 | fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } | |
1443 | ||
1444 | /// Get the discriminant of a constant value. | |
1445 | pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr { | |
1446 | match *r { | |
1447 | CEnum(ity, _, _) => { | |
1448 | match ity { | |
9cc50fc6 SL |
1449 | attr::SignedInt(..) => Disr(const_to_int(val) as u64), |
1450 | attr::UnsignedInt(..) => Disr(const_to_uint(val)), | |
1a4d82fc JJ |
1451 | } |
1452 | } | |
1453 | General(ity, _, _) => { | |
1454 | match ity { | |
9cc50fc6 SL |
1455 | attr::SignedInt(..) => Disr(const_to_int(const_get_elt(ccx, val, &[0])) as u64), |
1456 | attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(ccx, val, &[0]))) | |
1a4d82fc JJ |
1457 | } |
1458 | } | |
9cc50fc6 | 1459 | Univariant(..) => Disr(0), |
1a4d82fc JJ |
1460 | RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { |
1461 | ccx.sess().bug("const discrim access of non c-like enum") | |
1462 | } | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | /// Extract a field of a constant value, as appropriate for its | |
1467 | /// representation. | |
1468 | /// | |
1469 | /// (Not to be confused with `common::const_get_elt`, which operates on | |
1470 | /// raw LLVM-level structs and arrays.) | |
1471 | pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef, | |
c34b1796 | 1472 | _discr: Disr, ix: usize) -> ValueRef { |
1a4d82fc JJ |
1473 | match *r { |
1474 | CEnum(..) => ccx.sess().bug("element access in C-like enum const"), | |
1475 | Univariant(..) => const_struct_field(ccx, val, ix), | |
1476 | General(..) => const_struct_field(ccx, val, ix + 1), | |
1477 | RawNullablePointer { .. } => { | |
1478 | assert_eq!(ix, 0); | |
1479 | val | |
1480 | }, | |
1481 | StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix) | |
1482 | } | |
1483 | } | |
1484 | ||
1485 | /// Extract field of struct-like const, skipping our alignment padding. | |
c34b1796 | 1486 | fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: usize) -> ValueRef { |
1a4d82fc JJ |
1487 | // Get the ix-th non-undef element of the struct. |
1488 | let mut real_ix = 0; // actual position in the struct | |
1489 | let mut ix = ix; // logical index relative to real_ix | |
1490 | let mut field; | |
1491 | loop { | |
1492 | loop { | |
1493 | field = const_get_elt(ccx, val, &[real_ix]); | |
1494 | if !is_undef(field) { | |
1495 | break; | |
1496 | } | |
1497 | real_ix = real_ix + 1; | |
1498 | } | |
1499 | if ix == 0 { | |
1500 | return field; | |
1501 | } | |
1502 | ix = ix - 1; | |
1503 | real_ix = real_ix + 1; | |
1504 | } | |
1505 | } |