]>
Commit | Line | Data |
---|---|---|
92a42be0 SL |
1 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | use llvm::ValueRef; | |
32a655c1 | 12 | use rustc::ty::{self, layout, Ty, TypeFoldable}; |
c30ab7b3 | 13 | use rustc::mir; |
92a42be0 | 14 | use rustc::mir::tcx::LvalueTy; |
3157f602 | 15 | use rustc_data_structures::indexed_vec::Idx; |
54a0048b | 16 | use adt; |
32a655c1 SL |
17 | use builder::Builder; |
18 | use common::{self, CrateContext, C_uint}; | |
54a0048b SL |
19 | use consts; |
20 | use machine; | |
3157f602 | 21 | use type_of; |
32a655c1 SL |
22 | use type_::Type; |
23 | use value::Value; | |
24 | use glue; | |
92a42be0 SL |
25 | |
26 | use std::ptr; | |
32a655c1 | 27 | use std::ops; |
92a42be0 | 28 | |
3157f602 XL |
29 | use super::{MirContext, LocalRef}; |
30 | use super::operand::OperandValue; | |
92a42be0 | 31 | |
32a655c1 SL |
32 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] |
33 | pub enum Alignment { | |
34 | Packed, | |
35 | AbiAligned, | |
36 | } | |
37 | ||
38 | impl ops::BitOr for Alignment { | |
39 | type Output = Self; | |
40 | ||
41 | fn bitor(self, rhs: Self) -> Self { | |
42 | match (self, rhs) { | |
43 | (Alignment::Packed, _) => Alignment::Packed, | |
44 | (Alignment::AbiAligned, a) => a, | |
45 | } | |
46 | } | |
47 | } | |
48 | ||
49 | impl Alignment { | |
50 | pub fn from_packed(packed: bool) -> Self { | |
51 | if packed { | |
52 | Alignment::Packed | |
53 | } else { | |
54 | Alignment::AbiAligned | |
55 | } | |
56 | } | |
57 | ||
58 | pub fn to_align(self) -> Option<u32> { | |
59 | match self { | |
60 | Alignment::Packed => Some(1), | |
61 | Alignment::AbiAligned => None, | |
62 | } | |
63 | } | |
64 | ||
65 | pub fn min_with(self, align: u32) -> Option<u32> { | |
66 | match self { | |
67 | Alignment::Packed => Some(1), | |
68 | Alignment::AbiAligned => Some(align), | |
69 | } | |
70 | } | |
71 | } | |
72 | ||
3157f602 | 73 | #[derive(Copy, Clone, Debug)] |
92a42be0 SL |
74 | pub struct LvalueRef<'tcx> { |
75 | /// Pointer to the contents of the lvalue | |
76 | pub llval: ValueRef, | |
77 | ||
78 | /// This lvalue's extra data if it is unsized, or null | |
79 | pub llextra: ValueRef, | |
80 | ||
81 | /// Monomorphized type of this lvalue, including variant information | |
82 | pub ty: LvalueTy<'tcx>, | |
32a655c1 SL |
83 | |
84 | /// Whether this lvalue is known to be aligned according to its layout | |
85 | pub alignment: Alignment, | |
92a42be0 SL |
86 | } |
87 | ||
32a655c1 SL |
88 | impl<'a, 'tcx> LvalueRef<'tcx> { |
89 | pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>, | |
90 | alignment: Alignment) -> LvalueRef<'tcx> { | |
91 | LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment } | |
92a42be0 SL |
92 | } |
93 | ||
32a655c1 SL |
94 | pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> { |
95 | LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment) | |
92a42be0 | 96 | } |
a7813a04 | 97 | |
32a655c1 SL |
98 | pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>, alignment: Alignment) |
99 | -> LvalueRef<'tcx> { | |
100 | LvalueRef { | |
101 | llval: llval, | |
102 | llextra: llextra, | |
103 | ty: LvalueTy::from_ty(ty), | |
104 | alignment: alignment, | |
105 | } | |
106 | } | |
107 | ||
108 | pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> { | |
109 | debug!("alloca({:?}: {:?})", name, ty); | |
110 | let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name); | |
111 | assert!(!ty.has_param_types()); | |
112 | Self::new_sized_ty(tmp, ty, Alignment::AbiAligned) | |
113 | } | |
114 | ||
115 | pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { | |
a7813a04 XL |
116 | let ty = self.ty.to_ty(ccx.tcx()); |
117 | match ty.sty { | |
118 | ty::TyArray(_, n) => common::C_uint(ccx, n), | |
119 | ty::TySlice(_) | ty::TyStr => { | |
120 | assert!(self.llextra != ptr::null_mut()); | |
121 | self.llextra | |
122 | } | |
123 | _ => bug!("unexpected type `{}` in LvalueRef::len", ty) | |
124 | } | |
125 | } | |
32a655c1 SL |
126 | |
127 | pub fn has_extra(&self) -> bool { | |
128 | !self.llextra.is_null() | |
129 | } | |
130 | ||
131 | fn struct_field_ptr( | |
132 | self, | |
133 | bcx: &Builder<'a, 'tcx>, | |
134 | st: &layout::Struct, | |
135 | fields: &Vec<Ty<'tcx>>, | |
136 | ix: usize, | |
137 | needs_cast: bool | |
138 | ) -> (ValueRef, Alignment) { | |
139 | let fty = fields[ix]; | |
140 | let ccx = bcx.ccx; | |
141 | ||
142 | let alignment = self.alignment | Alignment::from_packed(st.packed); | |
143 | ||
144 | let ptr_val = if needs_cast { | |
145 | let fields = st.field_index_by_increasing_offset().map(|i| { | |
146 | type_of::in_memory_type_of(ccx, fields[i]) | |
147 | }).collect::<Vec<_>>(); | |
148 | let real_ty = Type::struct_(ccx, &fields[..], st.packed); | |
149 | bcx.pointercast(self.llval, real_ty.ptr_to()) | |
150 | } else { | |
151 | self.llval | |
152 | }; | |
153 | ||
154 | // Simple case - we can just GEP the field | |
155 | // * First field - Always aligned properly | |
156 | // * Packed struct - There is no alignment padding | |
157 | // * Field is sized - pointer is properly aligned already | |
158 | if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || | |
159 | bcx.ccx.shared().type_is_sized(fty) { | |
160 | return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment); | |
161 | } | |
162 | ||
163 | // If the type of the last field is [T] or str, then we don't need to do | |
164 | // any adjusments | |
165 | match fty.sty { | |
166 | ty::TySlice(..) | ty::TyStr => { | |
167 | return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment); | |
168 | } | |
169 | _ => () | |
170 | } | |
171 | ||
172 | // There's no metadata available, log the case and just do the GEP. | |
173 | if !self.has_extra() { | |
174 | debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", | |
175 | ix, Value(ptr_val)); | |
176 | return (bcx.struct_gep(ptr_val, ix), alignment); | |
177 | } | |
178 | ||
179 | // We need to get the pointer manually now. | |
180 | // We do this by casting to a *i8, then offsetting it by the appropriate amount. | |
181 | // We do this instead of, say, simply adjusting the pointer from the result of a GEP | |
182 | // because the field may have an arbitrary alignment in the LLVM representation | |
183 | // anyway. | |
184 | // | |
185 | // To demonstrate: | |
186 | // struct Foo<T: ?Sized> { | |
187 | // x: u16, | |
188 | // y: T | |
189 | // } | |
190 | // | |
191 | // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that | |
192 | // the `y` field has 16-bit alignment. | |
193 | ||
194 | let meta = self.llextra; | |
195 | ||
196 | ||
197 | let offset = st.offsets[ix].bytes(); | |
198 | let unaligned_offset = C_uint(bcx.ccx, offset); | |
199 | ||
200 | // Get the alignment of the field | |
201 | let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); | |
202 | ||
203 | // Bump the unaligned offset up to the appropriate alignment using the | |
204 | // following expression: | |
205 | // | |
206 | // (unaligned offset + (align - 1)) & -align | |
207 | ||
208 | // Calculate offset | |
209 | let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); | |
210 | let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), | |
211 | bcx.neg(align)); | |
212 | ||
213 | debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); | |
214 | ||
215 | // Cast and adjust pointer | |
216 | let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); | |
217 | let byte_ptr = bcx.gep(byte_ptr, &[offset]); | |
218 | ||
219 | // Finally, cast back to the type expected | |
220 | let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); | |
221 | debug!("struct_field_ptr: Field type is {:?}", ll_fty); | |
222 | (bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment) | |
223 | } | |
224 | ||
225 | /// Access a field, at a point when the value's case is known. | |
226 | pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) { | |
227 | let discr = match self.ty { | |
228 | LvalueTy::Ty { .. } => 0, | |
229 | LvalueTy::Downcast { variant_index, .. } => variant_index, | |
230 | }; | |
231 | let t = self.ty.to_ty(bcx.tcx()); | |
232 | let l = bcx.ccx.layout_of(t); | |
233 | // Note: if this ever needs to generate conditionals (e.g., if we | |
234 | // decide to do some kind of cdr-coding-like non-unique repr | |
235 | // someday), it will need to return a possibly-new bcx as well. | |
236 | match *l { | |
237 | layout::Univariant { ref variant, .. } => { | |
238 | assert_eq!(discr, 0); | |
239 | self.struct_field_ptr(bcx, &variant, | |
240 | &adt::compute_fields(bcx.ccx, t, 0, false), ix, false) | |
241 | } | |
242 | layout::Vector { count, .. } => { | |
243 | assert_eq!(discr, 0); | |
244 | assert!((ix as u64) < count); | |
245 | (bcx.struct_gep(self.llval, ix), self.alignment) | |
246 | } | |
247 | layout::General { discr: d, ref variants, .. } => { | |
248 | let mut fields = adt::compute_fields(bcx.ccx, t, discr, false); | |
249 | fields.insert(0, d.to_ty(&bcx.tcx(), false)); | |
250 | self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true) | |
251 | } | |
252 | layout::UntaggedUnion { ref variants } => { | |
253 | let fields = adt::compute_fields(bcx.ccx, t, 0, false); | |
254 | let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); | |
255 | (bcx.pointercast(self.llval, ty.ptr_to()), | |
256 | self.alignment | Alignment::from_packed(variants.packed)) | |
257 | } | |
258 | layout::RawNullablePointer { nndiscr, .. } | | |
259 | layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { | |
260 | let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); | |
261 | // The unit-like case might have a nonzero number of unit-like fields. | |
262 | // (e.d., Result of Either with (), as one side.) | |
263 | let ty = type_of::type_of(bcx.ccx, nullfields[ix]); | |
264 | assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); | |
265 | (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed) | |
266 | } | |
267 | layout::RawNullablePointer { nndiscr, .. } => { | |
268 | let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; | |
269 | assert_eq!(ix, 0); | |
270 | assert_eq!(discr as u64, nndiscr); | |
271 | let ty = type_of::type_of(bcx.ccx, nnty); | |
272 | (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment) | |
273 | } | |
274 | layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { | |
275 | assert_eq!(discr as u64, nndiscr); | |
276 | self.struct_field_ptr(bcx, &nonnull, | |
277 | &adt::compute_fields(bcx.ccx, t, discr, false), ix, false) | |
278 | } | |
279 | _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) | |
280 | } | |
281 | } | |
92a42be0 SL |
282 | } |
283 | ||
32a655c1 | 284 | impl<'a, 'tcx> MirContext<'a, 'tcx> { |
92a42be0 | 285 | pub fn trans_lvalue(&mut self, |
32a655c1 | 286 | bcx: &Builder<'a, 'tcx>, |
92a42be0 SL |
287 | lvalue: &mir::Lvalue<'tcx>) |
288 | -> LvalueRef<'tcx> { | |
289 | debug!("trans_lvalue(lvalue={:?})", lvalue); | |
290 | ||
32a655c1 SL |
291 | let ccx = bcx.ccx; |
292 | let tcx = ccx.tcx(); | |
3157f602 | 293 | |
c30ab7b3 | 294 | if let mir::Lvalue::Local(index) = *lvalue { |
3157f602 XL |
295 | match self.locals[index] { |
296 | LocalRef::Lvalue(lvalue) => { | |
297 | return lvalue; | |
298 | } | |
299 | LocalRef::Operand(..) => { | |
300 | bug!("using operand local {:?} as lvalue", lvalue); | |
301 | } | |
302 | } | |
303 | } | |
304 | ||
305 | let result = match *lvalue { | |
c30ab7b3 | 306 | mir::Lvalue::Local(_) => bug!(), // handled above |
8bb4bdeb | 307 | mir::Lvalue::Static(box mir::Static { def_id, ty }) => { |
9e0c209e | 308 | LvalueRef::new_sized(consts::get_static(ccx, def_id), |
8bb4bdeb | 309 | LvalueTy::from_ty(self.monomorphize(&ty)), |
32a655c1 | 310 | Alignment::AbiAligned) |
92a42be0 | 311 | }, |
3157f602 XL |
312 | mir::Lvalue::Projection(box mir::Projection { |
313 | ref base, | |
314 | elem: mir::ProjectionElem::Deref | |
315 | }) => { | |
316 | // Load the pointer from its location. | |
317 | let ptr = self.trans_consume(bcx, base); | |
318 | let projected_ty = LvalueTy::from_ty(ptr.ty) | |
319 | .projection_ty(tcx, &mir::ProjectionElem::Deref); | |
32a655c1 | 320 | let projected_ty = self.monomorphize(&projected_ty); |
3157f602 XL |
321 | let (llptr, llextra) = match ptr.val { |
322 | OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), | |
323 | OperandValue::Pair(llptr, llextra) => (llptr, llextra), | |
32a655c1 | 324 | OperandValue::Ref(..) => bug!("Deref of by-Ref type {:?}", ptr.ty) |
9cc50fc6 | 325 | }; |
3157f602 XL |
326 | LvalueRef { |
327 | llval: llptr, | |
328 | llextra: llextra, | |
329 | ty: projected_ty, | |
32a655c1 | 330 | alignment: Alignment::AbiAligned, |
3157f602 XL |
331 | } |
332 | } | |
92a42be0 SL |
333 | mir::Lvalue::Projection(ref projection) => { |
334 | let tr_base = self.trans_lvalue(bcx, &projection.base); | |
335 | let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); | |
32a655c1 SL |
336 | let projected_ty = self.monomorphize(&projected_ty); |
337 | let align = tr_base.alignment; | |
54a0048b SL |
338 | |
339 | let project_index = |llindex| { | |
340 | let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty { | |
341 | // Slices already point to the array element type. | |
342 | bcx.inbounds_gep(tr_base.llval, &[llindex]) | |
343 | } else { | |
32a655c1 | 344 | let zero = common::C_uint(bcx.ccx, 0u64); |
54a0048b SL |
345 | bcx.inbounds_gep(tr_base.llval, &[zero, llindex]) |
346 | }; | |
32a655c1 | 347 | (element, align) |
54a0048b SL |
348 | }; |
349 | ||
32a655c1 | 350 | let ((llprojected, align), llextra) = match projection.elem { |
3157f602 | 351 | mir::ProjectionElem::Deref => bug!(), |
7453a54e | 352 | mir::ProjectionElem::Field(ref field, _) => { |
32a655c1 | 353 | let llextra = if self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)) { |
7453a54e SL |
354 | ptr::null_mut() |
355 | } else { | |
356 | tr_base.llextra | |
357 | }; | |
32a655c1 | 358 | (tr_base.trans_field_ptr(bcx, field.index()), llextra) |
92a42be0 SL |
359 | } |
360 | mir::ProjectionElem::Index(ref index) => { | |
361 | let index = self.trans_operand(bcx, index); | |
3157f602 | 362 | (project_index(self.prepare_index(bcx, index.immediate())), ptr::null_mut()) |
92a42be0 SL |
363 | } |
364 | mir::ProjectionElem::ConstantIndex { offset, | |
365 | from_end: false, | |
366 | min_length: _ } => { | |
32a655c1 | 367 | let lloffset = C_uint(bcx.ccx, offset); |
3157f602 | 368 | (project_index(lloffset), ptr::null_mut()) |
92a42be0 SL |
369 | } |
370 | mir::ProjectionElem::ConstantIndex { offset, | |
371 | from_end: true, | |
372 | min_length: _ } => { | |
32a655c1 SL |
373 | let lloffset = C_uint(bcx.ccx, offset); |
374 | let lllen = tr_base.len(bcx.ccx); | |
7453a54e | 375 | let llindex = bcx.sub(lllen, lloffset); |
3157f602 XL |
376 | (project_index(llindex), ptr::null_mut()) |
377 | } | |
378 | mir::ProjectionElem::Subslice { from, to } => { | |
32a655c1 SL |
379 | let llindex = C_uint(bcx.ccx, from); |
380 | let (llbase, align) = project_index(llindex); | |
3157f602 XL |
381 | |
382 | let base_ty = tr_base.ty.to_ty(bcx.tcx()); | |
383 | match base_ty.sty { | |
384 | ty::TyArray(..) => { | |
385 | // must cast the lvalue pointer type to the new | |
386 | // array type (*[%_; new_len]). | |
5bcae85e | 387 | let base_ty = self.monomorphized_lvalue_ty(lvalue); |
32a655c1 | 388 | let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to(); |
3157f602 | 389 | let llbase = bcx.pointercast(llbase, llbasety); |
32a655c1 | 390 | ((llbase, align), ptr::null_mut()) |
3157f602 XL |
391 | } |
392 | ty::TySlice(..) => { | |
393 | assert!(tr_base.llextra != ptr::null_mut()); | |
394 | let lllen = bcx.sub(tr_base.llextra, | |
32a655c1 SL |
395 | C_uint(bcx.ccx, from+to)); |
396 | ((llbase, align), lllen) | |
3157f602 XL |
397 | } |
398 | _ => bug!("unexpected type {:?} in Subslice", base_ty) | |
399 | } | |
92a42be0 SL |
400 | } |
401 | mir::ProjectionElem::Downcast(..) => { | |
32a655c1 | 402 | ((tr_base.llval, align), tr_base.llextra) |
92a42be0 SL |
403 | } |
404 | }; | |
405 | LvalueRef { | |
406 | llval: llprojected, | |
407 | llextra: llextra, | |
408 | ty: projected_ty, | |
32a655c1 | 409 | alignment: align, |
92a42be0 SL |
410 | } |
411 | } | |
3157f602 XL |
412 | }; |
413 | debug!("trans_lvalue(lvalue={:?}) => {:?}", lvalue, result); | |
414 | result | |
92a42be0 SL |
415 | } |
416 | ||
417 | /// Adjust the bitwidth of an index since LLVM is less forgiving | |
418 | /// than we are. | |
419 | /// | |
420 | /// nmatsakis: is this still necessary? Not sure. | |
32a655c1 SL |
421 | fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { |
422 | let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); | |
423 | let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type()); | |
92a42be0 | 424 | if index_size < int_size { |
32a655c1 | 425 | bcx.zext(llindex, bcx.ccx.int_type()) |
92a42be0 | 426 | } else if index_size > int_size { |
32a655c1 | 427 | bcx.trunc(llindex, bcx.ccx.int_type()) |
92a42be0 SL |
428 | } else { |
429 | llindex | |
430 | } | |
431 | } | |
3157f602 | 432 | |
5bcae85e | 433 | pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { |
32a655c1 | 434 | let tcx = self.ccx.tcx(); |
5bcae85e | 435 | let lvalue_ty = lvalue.ty(&self.mir, tcx); |
32a655c1 | 436 | self.monomorphize(&lvalue_ty.to_ty(tcx)) |
3157f602 | 437 | } |
92a42be0 | 438 | } |