]>
Commit | Line | Data |
---|---|---|
60c5eb7d | 1 | use super::operand::OperandValue; |
dfeec247 | 2 | use super::{FunctionCx, LocalRef}; |
60c5eb7d | 3 | |
9fa01778 XL |
4 | use crate::common::IntPredicate; |
5 | use crate::glue; | |
9fa01778 | 6 | use crate::traits::*; |
dfeec247 | 7 | use crate::MemFlags; |
ff7c6d11 | 8 | |
ba9703b0 XL |
9 | use rustc_middle::mir; |
10 | use rustc_middle::mir::tcx::PlaceTy; | |
11 | use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout}; | |
12 | use rustc_middle::ty::{self, Ty}; | |
f035d41b | 13 | use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding}; |
ba9703b0 | 14 | use rustc_target::abi::{LayoutOf, VariantIdx, Variants}; |
ff7c6d11 XL |
15 | |
16 | #[derive(Copy, Clone, Debug)] | |
a1dfa0c6 | 17 | pub struct PlaceRef<'tcx, V> { |
60c5eb7d | 18 | /// A pointer to the contents of the place. |
a1dfa0c6 | 19 | pub llval: V, |
ff7c6d11 | 20 | |
60c5eb7d | 21 | /// This place's extra data if it is unsized, or `None` if null. |
a1dfa0c6 | 22 | pub llextra: Option<V>, |
ff7c6d11 | 23 | |
60c5eb7d | 24 | /// The monomorphized type of this place, including variant information. |
ba9703b0 | 25 | pub layout: TyAndLayout<'tcx>, |
ff7c6d11 | 26 | |
60c5eb7d | 27 | /// The alignment we know for this place. |
ff7c6d11 XL |
28 | pub align: Align, |
29 | } | |
30 | ||
dc9dc135 | 31 | impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { |
ba9703b0 | 32 | pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { |
e1599b0c | 33 | assert!(!layout.is_unsized()); |
dfeec247 | 34 | PlaceRef { llval, llextra: None, layout, align: layout.align.abi } |
e1599b0c XL |
35 | } |
36 | ||
ba9703b0 XL |
37 | pub fn new_sized_aligned( |
38 | llval: V, | |
39 | layout: TyAndLayout<'tcx>, | |
40 | align: Align, | |
41 | ) -> PlaceRef<'tcx, V> { | |
b7449926 | 42 | assert!(!layout.is_unsized()); |
dfeec247 | 43 | PlaceRef { llval, llextra: None, layout, align } |
9fa01778 XL |
44 | } |
45 | ||
e74abb32 XL |
46 | // FIXME(eddyb) pass something else for the name so no work is done |
47 | // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`). | |
a1dfa0c6 XL |
48 | pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
49 | bx: &mut Bx, | |
ba9703b0 | 50 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 51 | ) -> Self { |
b7449926 | 52 | assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); |
e1599b0c XL |
53 | let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi); |
54 | Self::new_sized(tmp, layout) | |
ff7c6d11 XL |
55 | } |
56 | ||
b7449926 | 57 | /// Returns a place for an indirect reference to an unsized place. |
e74abb32 XL |
58 | // FIXME(eddyb) pass something else for the name so no work is done |
59 | // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`). | |
a1dfa0c6 XL |
60 | pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
61 | bx: &mut Bx, | |
ba9703b0 | 62 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 63 | ) -> Self { |
b7449926 | 64 | assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); |
a1dfa0c6 XL |
65 | let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); |
66 | let ptr_layout = bx.cx().layout_of(ptr_ty); | |
e1599b0c | 67 | Self::alloca(bx, ptr_layout) |
b7449926 XL |
68 | } |
69 | ||
dfeec247 | 70 | pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V { |
ba9703b0 | 71 | if let FieldsShape::Array { count, .. } = self.layout.fields { |
ff7c6d11 | 72 | if self.layout.is_unsized() { |
ff7c6d11 | 73 | assert_eq!(count, 0); |
b7449926 | 74 | self.llextra.unwrap() |
ff7c6d11 | 75 | } else { |
a1dfa0c6 | 76 | cx.const_usize(count) |
ff7c6d11 XL |
77 | } |
78 | } else { | |
79 | bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) | |
80 | } | |
81 | } | |
a1dfa0c6 | 82 | } |
ff7c6d11 | 83 | |
dc9dc135 | 84 | impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { |
ff7c6d11 | 85 | /// Access a field, at a point when the value's case is known. |
a1dfa0c6 | 86 | pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
dfeec247 XL |
87 | self, |
88 | bx: &mut Bx, | |
a1dfa0c6 XL |
89 | ix: usize, |
90 | ) -> Self { | |
91 | let field = self.layout.field(bx.cx(), ix); | |
ff7c6d11 | 92 | let offset = self.layout.fields.offset(ix); |
0bf4aa26 | 93 | let effective_field_align = self.align.restrict_for_offset(offset); |
ff7c6d11 | 94 | |
a1dfa0c6 | 95 | let mut simple = || { |
1b1a35ee XL |
96 | let llval = match self.layout.abi { |
97 | _ if offset.bytes() == 0 => { | |
98 | // Unions and newtypes only use an offset of 0. | |
99 | // Also handles the first field of Scalar, ScalarPair, and Vector layouts. | |
100 | self.llval | |
101 | } | |
102 | Abi::ScalarPair(ref a, ref b) | |
103 | if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => | |
104 | { | |
105 | // Offset matches second field. | |
106 | bx.struct_gep(self.llval, 1) | |
107 | } | |
108 | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { | |
109 | // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. | |
110 | let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); | |
111 | bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())]) | |
112 | } | |
113 | Abi::Scalar(_) | Abi::ScalarPair(..) => { | |
114 | // All fields of Scalar and ScalarPair layouts must have been handled by this point. | |
115 | // Vector layouts have additional fields for each element of the vector, so don't panic in that case. | |
116 | bug!( | |
117 | "offset of non-ZST field `{:?}` does not match layout `{:#?}`", | |
118 | field, | |
119 | self.layout | |
120 | ); | |
121 | } | |
122 | _ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)), | |
ff7c6d11 XL |
123 | }; |
124 | PlaceRef { | |
dc9dc135 | 125 | // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. |
a1dfa0c6 | 126 | llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), |
dfeec247 | 127 | llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, |
ff7c6d11 | 128 | layout: field, |
b7449926 | 129 | align: effective_field_align, |
ff7c6d11 XL |
130 | } |
131 | }; | |
132 | ||
133 | // Simple cases, which don't need DST adjustment: | |
134 | // * no metadata available - just log the case | |
dc9dc135 | 135 | // * known alignment - sized types, `[T]`, `str` or a foreign type |
ff7c6d11 | 136 | // * packed struct - there is no alignment padding |
1b1a35ee | 137 | match field.ty.kind() { |
b7449926 | 138 | _ if self.llextra.is_none() => { |
dfeec247 XL |
139 | debug!( |
140 | "unsized field `{}`, of `{:?}` has no metadata for adjustment", | |
141 | ix, self.llval | |
142 | ); | |
ff7c6d11 XL |
143 | return simple(); |
144 | } | |
145 | _ if !field.is_unsized() => return simple(), | |
b7449926 XL |
146 | ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(), |
147 | ty::Adt(def, _) => { | |
ff7c6d11 XL |
148 | if def.repr.packed() { |
149 | // FIXME(eddyb) generalize the adjustment when we | |
150 | // start supporting packing to larger alignments. | |
a1dfa0c6 | 151 | assert_eq!(self.layout.align.abi.bytes(), 1); |
ff7c6d11 XL |
152 | return simple(); |
153 | } | |
154 | } | |
155 | _ => {} | |
156 | } | |
157 | ||
158 | // We need to get the pointer manually now. | |
dc9dc135 | 159 | // We do this by casting to a `*i8`, then offsetting it by the appropriate amount. |
ff7c6d11 XL |
160 | // We do this instead of, say, simply adjusting the pointer from the result of a GEP |
161 | // because the field may have an arbitrary alignment in the LLVM representation | |
162 | // anyway. | |
163 | // | |
164 | // To demonstrate: | |
ff7c6d11 | 165 | // |
dc9dc135 XL |
166 | // struct Foo<T: ?Sized> { |
167 | // x: u16, | |
168 | // y: T | |
169 | // } | |
170 | // | |
171 | // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that | |
ff7c6d11 XL |
172 | // the `y` field has 16-bit alignment. |
173 | ||
174 | let meta = self.llextra; | |
175 | ||
a1dfa0c6 | 176 | let unaligned_offset = bx.cx().const_usize(offset.bytes()); |
ff7c6d11 XL |
177 | |
178 | // Get the alignment of the field | |
2c00a5a8 | 179 | let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); |
ff7c6d11 | 180 | |
5869c6ff XL |
181 | // Bump the unaligned offset up to the appropriate alignment |
182 | let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align); | |
ff7c6d11 | 183 | |
b7449926 | 184 | debug!("struct_field_ptr: DST field offset: {:?}", offset); |
ff7c6d11 | 185 | |
dc9dc135 | 186 | // Cast and adjust pointer. |
a1dfa0c6 | 187 | let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); |
2c00a5a8 | 188 | let byte_ptr = bx.gep(byte_ptr, &[offset]); |
ff7c6d11 | 189 | |
dc9dc135 | 190 | // Finally, cast back to the type expected. |
a1dfa0c6 | 191 | let ll_fty = bx.cx().backend_type(field); |
ff7c6d11 XL |
192 | debug!("struct_field_ptr: Field type is {:?}", ll_fty); |
193 | ||
194 | PlaceRef { | |
a1dfa0c6 | 195 | llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), |
ff7c6d11 XL |
196 | llextra: self.llextra, |
197 | layout: field, | |
b7449926 | 198 | align: effective_field_align, |
ff7c6d11 XL |
199 | } |
200 | } | |
201 | ||
202 | /// Obtain the actual discriminant of a value. | |
a1dfa0c6 XL |
203 | pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
204 | self, | |
205 | bx: &mut Bx, | |
dfeec247 | 206 | cast_to: Ty<'tcx>, |
a1dfa0c6 XL |
207 | ) -> V { |
208 | let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to)); | |
0bf4aa26 | 209 | if self.layout.abi.is_uninhabited() { |
a1dfa0c6 | 210 | return bx.cx().const_undef(cast_to); |
83c7162d | 211 | } |
f035d41b | 212 | let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants { |
ba9703b0 | 213 | Variants::Single { index } => { |
dfeec247 XL |
214 | let discr_val = self |
215 | .layout | |
216 | .ty | |
217 | .discriminant_for_variant(bx.cx().tcx(), index) | |
48663c56 | 218 | .map_or(index.as_u32() as u128, |discr| discr.val); |
a1dfa0c6 | 219 | return bx.cx().const_uint_big(cast_to, discr_val); |
ff7c6d11 | 220 | } |
f035d41b XL |
221 | Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => { |
222 | (tag, tag_encoding, tag_field) | |
532ac7d7 XL |
223 | } |
224 | }; | |
ff7c6d11 | 225 | |
416331ca | 226 | // Read the tag/niche-encoded discriminant from memory. |
f035d41b XL |
227 | let tag = self.project_field(bx, tag_field); |
228 | let tag = bx.load_operand(tag); | |
416331ca XL |
229 | |
230 | // Decode the discriminant (specifically if it's niche-encoded). | |
f035d41b XL |
231 | match *tag_encoding { |
232 | TagEncoding::Direct => { | |
233 | let signed = match tag_scalar.value { | |
94b46f34 | 234 | // We use `i1` for bytes that are always `0` or `1`, |
0731742a | 235 | // e.g., `#[repr(i8)] enum E { A, B }`, but we can't |
94b46f34 | 236 | // let LLVM interpret the `i1` as signed, because |
dc9dc135 | 237 | // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`. |
f035d41b | 238 | Int(_, signed) => !tag_scalar.is_bool() && signed, |
dfeec247 | 239 | _ => false, |
ff7c6d11 | 240 | }; |
f035d41b | 241 | bx.intcast(tag.immediate(), cast_to, signed) |
ff7c6d11 | 242 | } |
f035d41b | 243 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => { |
416331ca XL |
244 | // Rebase from niche values to discriminants, and check |
245 | // whether the result is in range for the niche variants. | |
f035d41b XL |
246 | let niche_llty = bx.cx().immediate_backend_type(tag.layout); |
247 | let tag = tag.immediate(); | |
416331ca XL |
248 | |
249 | // We first compute the "relative discriminant" (wrt `niche_variants`), | |
250 | // that is, if `n = niche_variants.end() - niche_variants.start()`, | |
251 | // we remap `niche_start..=niche_start + n` (which may wrap around) | |
252 | // to (non-wrap-around) `0..=n`, to be able to check whether the | |
253 | // discriminant corresponds to a niche variant with one comparison. | |
254 | // We also can't go directly to the (variant index) discriminant | |
255 | // and check that it is in the range `niche_variants`, because | |
256 | // that might not fit in the same type, on top of needing an extra | |
257 | // comparison (see also the comment on `let niche_discr`). | |
258 | let relative_discr = if niche_start == 0 { | |
259 | // Avoid subtracting `0`, which wouldn't work for pointers. | |
260 | // FIXME(eddyb) check the actual primitive type here. | |
f035d41b | 261 | tag |
416331ca | 262 | } else { |
f035d41b | 263 | bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start)) |
416331ca XL |
264 | }; |
265 | let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32(); | |
3dfed10e XL |
266 | let is_niche = if relative_max == 0 { |
267 | // Avoid calling `const_uint`, which wouldn't work for pointers. | |
268 | // Also use canonical == 0 instead of non-canonical u<= 0. | |
269 | // FIXME(eddyb) check the actual primitive type here. | |
270 | bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty)) | |
271 | } else { | |
272 | let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64); | |
416331ca XL |
273 | bx.icmp(IntPredicate::IntULE, relative_discr, relative_max) |
274 | }; | |
275 | ||
276 | // NOTE(eddyb) this addition needs to be performed on the final | |
277 | // type, in case the niche itself can't represent all variant | |
278 | // indices (e.g. `u8` niche with more than `256` variants, | |
279 | // but enough uninhabited variants so that the remaining variants | |
280 | // fit in the niche). | |
281 | // In other words, `niche_variants.end - niche_variants.start` | |
282 | // is representable in the niche, but `niche_variants.end` | |
283 | // might not be, in extreme cases. | |
284 | let niche_discr = { | |
285 | let relative_discr = if relative_max == 0 { | |
286 | // HACK(eddyb) since we have only one niche, we know which | |
287 | // one it is, and we can avoid having a dynamic value here. | |
288 | bx.cx().const_uint(cast_to, 0) | |
289 | } else { | |
290 | bx.intcast(relative_discr, cast_to, false) | |
291 | }; | |
292 | bx.add( | |
293 | relative_discr, | |
a1dfa0c6 | 294 | bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64), |
416331ca XL |
295 | ) |
296 | }; | |
297 | ||
298 | bx.select( | |
299 | is_niche, | |
300 | niche_discr, | |
301 | bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64), | |
302 | ) | |
ff7c6d11 XL |
303 | } |
304 | } | |
305 | } | |
306 | ||
9fa01778 | 307 | /// Sets the discriminant for a new value of the given case of the given |
ff7c6d11 | 308 | /// representation. |
a1dfa0c6 XL |
309 | pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
310 | &self, | |
311 | bx: &mut Bx, | |
dfeec247 | 312 | variant_index: VariantIdx, |
a1dfa0c6 XL |
313 | ) { |
314 | if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { | |
60c5eb7d XL |
315 | // We play it safe by using a well-defined `abort`, but we could go for immediate UB |
316 | // if that turns out to be helpful. | |
317 | bx.abort(); | |
ff7c6d11 XL |
318 | return; |
319 | } | |
320 | match self.layout.variants { | |
ba9703b0 | 321 | Variants::Single { index } => { |
ff7c6d11 XL |
322 | assert_eq!(index, variant_index); |
323 | } | |
f035d41b XL |
324 | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => { |
325 | let ptr = self.project_field(bx, tag_field); | |
48663c56 XL |
326 | let to = |
327 | self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val; | |
83c7162d | 328 | bx.store( |
a1dfa0c6 | 329 | bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), |
83c7162d | 330 | ptr.llval, |
dfeec247 XL |
331 | ptr.align, |
332 | ); | |
ff7c6d11 | 333 | } |
ba9703b0 | 334 | Variants::Multiple { |
f035d41b XL |
335 | tag_encoding: |
336 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, | |
337 | tag_field, | |
ff7c6d11 XL |
338 | .. |
339 | } => { | |
340 | if variant_index != dataful_variant { | |
29967ef6 XL |
341 | if bx.cx().sess().target.arch == "arm" |
342 | || bx.cx().sess().target.arch == "aarch64" | |
dfeec247 | 343 | { |
dc9dc135 | 344 | // FIXME(#34427): as workaround for LLVM bug on ARM, |
ff7c6d11 | 345 | // use memset of 0 before assigning niche value. |
a1dfa0c6 XL |
346 | let fill_byte = bx.cx().const_u8(0); |
347 | let size = bx.cx().const_usize(self.layout.size.bytes()); | |
348 | bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); | |
ff7c6d11 XL |
349 | } |
350 | ||
f035d41b | 351 | let niche = self.project_field(bx, tag_field); |
a1dfa0c6 XL |
352 | let niche_llty = bx.cx().immediate_backend_type(niche.layout); |
353 | let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); | |
dfeec247 | 354 | let niche_value = (niche_value as u128).wrapping_add(niche_start); |
dc9dc135 | 355 | // FIXME(eddyb): check the actual primitive type here. |
ff7c6d11 | 356 | let niche_llval = if niche_value == 0 { |
dc9dc135 | 357 | // HACK(eddyb): using `c_null` as it works on all types. |
a1dfa0c6 | 358 | bx.cx().const_null(niche_llty) |
ff7c6d11 | 359 | } else { |
a1dfa0c6 | 360 | bx.cx().const_uint_big(niche_llty, niche_value) |
ff7c6d11 | 361 | }; |
2c00a5a8 | 362 | OperandValue::Immediate(niche_llval).store(bx, niche); |
ff7c6d11 XL |
363 | } |
364 | } | |
365 | } | |
366 | } | |
367 | ||
a1dfa0c6 XL |
368 | pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
369 | &self, | |
370 | bx: &mut Bx, | |
dfeec247 | 371 | llindex: V, |
a1dfa0c6 | 372 | ) -> Self { |
69743fb6 XL |
373 | // Statically compute the offset if we can, otherwise just use the element size, |
374 | // as this will yield the lowest alignment. | |
375 | let layout = self.layout.field(bx, 0); | |
e74abb32 XL |
376 | let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) { |
377 | layout.size.checked_mul(llindex, bx).unwrap_or(layout.size) | |
69743fb6 XL |
378 | } else { |
379 | layout.size | |
380 | }; | |
381 | ||
ff7c6d11 | 382 | PlaceRef { |
a1dfa0c6 | 383 | llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), |
b7449926 | 384 | llextra: None, |
69743fb6 XL |
385 | layout, |
386 | align: self.align.restrict_for_offset(offset), | |
ff7c6d11 XL |
387 | } |
388 | } | |
389 | ||
a1dfa0c6 XL |
390 | pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
391 | &self, | |
392 | bx: &mut Bx, | |
dfeec247 | 393 | variant_index: VariantIdx, |
a1dfa0c6 | 394 | ) -> Self { |
ff7c6d11 | 395 | let mut downcast = *self; |
a1dfa0c6 | 396 | downcast.layout = self.layout.for_variant(bx.cx(), variant_index); |
ff7c6d11 XL |
397 | |
398 | // Cast to the appropriate variant struct type. | |
a1dfa0c6 XL |
399 | let variant_ty = bx.cx().backend_type(downcast.layout); |
400 | downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); | |
ff7c6d11 XL |
401 | |
402 | downcast | |
403 | } | |
404 | ||
a1dfa0c6 | 405 | pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { |
2c00a5a8 | 406 | bx.lifetime_start(self.llval, self.layout.size); |
ff7c6d11 XL |
407 | } |
408 | ||
a1dfa0c6 | 409 | pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { |
2c00a5a8 | 410 | bx.lifetime_end(self.llval, self.layout.size); |
ff7c6d11 XL |
411 | } |
412 | } | |
413 | ||
dc9dc135 | 414 | impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { |
a1dfa0c6 XL |
415 | pub fn codegen_place( |
416 | &mut self, | |
417 | bx: &mut Bx, | |
74b04a01 | 418 | place_ref: mir::PlaceRef<'tcx>, |
a1dfa0c6 | 419 | ) -> PlaceRef<'tcx, Bx::Value> { |
416331ca | 420 | debug!("codegen_place(place_ref={:?})", place_ref); |
a1dfa0c6 XL |
421 | let cx = self.cx; |
422 | let tcx = self.cx.tcx(); | |
ff7c6d11 | 423 | |
dfeec247 | 424 | let result = match place_ref { |
74b04a01 | 425 | mir::PlaceRef { local, projection: [] } => match self.locals[local] { |
dfeec247 XL |
426 | LocalRef::Place(place) => { |
427 | return place; | |
ff7c6d11 | 428 | } |
dfeec247 XL |
429 | LocalRef::UnsizedPlace(place) => { |
430 | return bx.load_operand(place).deref(cx); | |
431 | } | |
432 | LocalRef::Operand(..) => { | |
433 | bug!("using operand local {:?} as place", place_ref); | |
8faf50e0 | 434 | } |
ff7c6d11 | 435 | }, |
dfeec247 | 436 | mir::PlaceRef { local, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => { |
ff7c6d11 | 437 | // Load the pointer from its location. |
74b04a01 | 438 | self.codegen_consume(bx, mir::PlaceRef { local, projection: proj_base }) |
dfeec247 | 439 | .deref(bx.cx()) |
ff7c6d11 | 440 | } |
f9f354fc | 441 | mir::PlaceRef { local, projection: &[ref proj_base @ .., elem] } => { |
416331ca | 442 | // FIXME turn this recursion into iteration |
dfeec247 | 443 | let cg_base = |
74b04a01 | 444 | self.codegen_place(bx, mir::PlaceRef { local, projection: proj_base }); |
ff7c6d11 | 445 | |
e1599b0c | 446 | match elem { |
ff7c6d11 XL |
447 | mir::ProjectionElem::Deref => bug!(), |
448 | mir::ProjectionElem::Field(ref field, _) => { | |
94b46f34 | 449 | cg_base.project_field(bx, field.index()) |
ff7c6d11 XL |
450 | } |
451 | mir::ProjectionElem::Index(index) => { | |
f9f354fc | 452 | let index = &mir::Operand::Copy(mir::Place::from(index)); |
94b46f34 | 453 | let index = self.codegen_operand(bx, index); |
ff7c6d11 | 454 | let llindex = index.immediate(); |
94b46f34 | 455 | cg_base.project_index(bx, llindex) |
ff7c6d11 | 456 | } |
dfeec247 XL |
457 | mir::ProjectionElem::ConstantIndex { |
458 | offset, | |
459 | from_end: false, | |
460 | min_length: _, | |
461 | } => { | |
f9f354fc | 462 | let lloffset = bx.cx().const_usize(offset as u64); |
94b46f34 | 463 | cg_base.project_index(bx, lloffset) |
ff7c6d11 | 464 | } |
dfeec247 XL |
465 | mir::ProjectionElem::ConstantIndex { |
466 | offset, | |
467 | from_end: true, | |
468 | min_length: _, | |
469 | } => { | |
f9f354fc | 470 | let lloffset = bx.cx().const_usize(offset as u64); |
a1dfa0c6 | 471 | let lllen = cg_base.len(bx.cx()); |
2c00a5a8 | 472 | let llindex = bx.sub(lllen, lloffset); |
94b46f34 | 473 | cg_base.project_index(bx, llindex) |
ff7c6d11 | 474 | } |
60c5eb7d | 475 | mir::ProjectionElem::Subslice { from, to, from_end } => { |
dfeec247 | 476 | let mut subslice = |
f9f354fc | 477 | cg_base.project_index(bx, bx.cx().const_usize(from as u64)); |
dfeec247 XL |
478 | let projected_ty = |
479 | PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty; | |
fc512014 | 480 | subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty)); |
ff7c6d11 XL |
481 | |
482 | if subslice.layout.is_unsized() { | |
60c5eb7d | 483 | assert!(from_end, "slice subslices should be `from_end`"); |
dfeec247 XL |
484 | subslice.llextra = Some(bx.sub( |
485 | cg_base.llextra.unwrap(), | |
f9f354fc | 486 | bx.cx().const_usize((from as u64) + (to as u64)), |
dfeec247 | 487 | )); |
ff7c6d11 XL |
488 | } |
489 | ||
490 | // Cast the place pointer type to the new | |
dc9dc135 | 491 | // array or slice type (`*[%_; new_len]`). |
dfeec247 XL |
492 | subslice.llval = bx.pointercast( |
493 | subslice.llval, | |
494 | bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)), | |
495 | ); | |
ff7c6d11 XL |
496 | |
497 | subslice | |
498 | } | |
f9f354fc | 499 | mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v), |
ff7c6d11 XL |
500 | } |
501 | } | |
502 | }; | |
416331ca | 503 | debug!("codegen_place(place={:?}) => {:?}", place_ref, result); |
ff7c6d11 XL |
504 | result |
505 | } | |
506 | ||
74b04a01 | 507 | pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> { |
a1dfa0c6 | 508 | let tcx = self.cx.tcx(); |
5869c6ff | 509 | let place_ty = place_ref.ty(self.mir, tcx); |
fc512014 | 510 | self.monomorphize(place_ty.ty) |
ff7c6d11 XL |
511 | } |
512 | } | |
5869c6ff XL |
513 | |
514 | fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( | |
515 | bx: &mut Bx, | |
516 | value: Bx::Value, | |
517 | align: Bx::Value, | |
518 | ) -> Bx::Value { | |
519 | // In pseudo code: | |
520 | // | |
521 | // if value & (align - 1) == 0 { | |
522 | // value | |
523 | // } else { | |
524 | // (value & !(align - 1)) + align | |
525 | // } | |
526 | // | |
527 | // Usually this is written without branches as | |
528 | // | |
529 | // (value + align - 1) & !(align - 1) | |
530 | // | |
531 | // But this formula cannot take advantage of constant `value`. E.g. if `value` is known | |
532 | // at compile time to be `1`, this expression should be optimized to `align`. However, | |
533 | // optimization only holds if `align` is a power of two. Since the optimizer doesn't know | |
534 | // that `align` is a power of two, it cannot perform this optimization. | |
535 | // | |
536 | // Instead we use | |
537 | // | |
538 | // value + (-value & (align - 1)) | |
539 | // | |
540 | // Since `align` is used only once, the expression can be optimized. For `value = 0` | |
541 | // its optimized to `0` even in debug mode. | |
542 | // | |
543 | // NB: The previous version of this code used | |
544 | // | |
545 | // (value + align - 1) & -align | |
546 | // | |
547 | // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for | |
548 | // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559 | |
549 | let one = bx.const_usize(1); | |
550 | let align_minus_1 = bx.sub(align, one); | |
551 | let neg_value = bx.neg(value); | |
552 | let offset = bx.and(neg_value, align_minus_1); | |
553 | bx.add(value, offset) | |
554 | } |