]>
Commit | Line | Data |
---|---|---|
60c5eb7d | 1 | use super::operand::OperandValue; |
dfeec247 | 2 | use super::{FunctionCx, LocalRef}; |
60c5eb7d | 3 | |
9fa01778 XL |
4 | use crate::common::IntPredicate; |
5 | use crate::glue; | |
9fa01778 | 6 | use crate::traits::*; |
dfeec247 | 7 | use crate::MemFlags; |
ff7c6d11 | 8 | |
ba9703b0 XL |
9 | use rustc_middle::mir; |
10 | use rustc_middle::mir::tcx::PlaceTy; | |
c295e0f8 | 11 | use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; |
ba9703b0 | 12 | use rustc_middle::ty::{self, Ty}; |
f035d41b | 13 | use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding}; |
c295e0f8 | 14 | use rustc_target::abi::{VariantIdx, Variants}; |
ff7c6d11 XL |
15 | |
16 | #[derive(Copy, Clone, Debug)] | |
a1dfa0c6 | 17 | pub struct PlaceRef<'tcx, V> { |
60c5eb7d | 18 | /// A pointer to the contents of the place. |
a1dfa0c6 | 19 | pub llval: V, |
ff7c6d11 | 20 | |
60c5eb7d | 21 | /// This place's extra data if it is unsized, or `None` if null. |
a1dfa0c6 | 22 | pub llextra: Option<V>, |
ff7c6d11 | 23 | |
60c5eb7d | 24 | /// The monomorphized type of this place, including variant information. |
ba9703b0 | 25 | pub layout: TyAndLayout<'tcx>, |
ff7c6d11 | 26 | |
60c5eb7d | 27 | /// The alignment we know for this place. |
ff7c6d11 XL |
28 | pub align: Align, |
29 | } | |
30 | ||
dc9dc135 | 31 | impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { |
ba9703b0 | 32 | pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { |
e1599b0c | 33 | assert!(!layout.is_unsized()); |
dfeec247 | 34 | PlaceRef { llval, llextra: None, layout, align: layout.align.abi } |
e1599b0c XL |
35 | } |
36 | ||
ba9703b0 XL |
37 | pub fn new_sized_aligned( |
38 | llval: V, | |
39 | layout: TyAndLayout<'tcx>, | |
40 | align: Align, | |
41 | ) -> PlaceRef<'tcx, V> { | |
b7449926 | 42 | assert!(!layout.is_unsized()); |
dfeec247 | 43 | PlaceRef { llval, llextra: None, layout, align } |
9fa01778 XL |
44 | } |
45 | ||
e74abb32 XL |
46 | // FIXME(eddyb) pass something else for the name so no work is done |
47 | // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`). | |
a1dfa0c6 XL |
48 | pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
49 | bx: &mut Bx, | |
ba9703b0 | 50 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 51 | ) -> Self { |
b7449926 | 52 | assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); |
e1599b0c XL |
53 | let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi); |
54 | Self::new_sized(tmp, layout) | |
ff7c6d11 XL |
55 | } |
56 | ||
b7449926 | 57 | /// Returns a place for an indirect reference to an unsized place. |
e74abb32 XL |
58 | // FIXME(eddyb) pass something else for the name so no work is done |
59 | // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`). | |
a1dfa0c6 XL |
60 | pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
61 | bx: &mut Bx, | |
ba9703b0 | 62 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 63 | ) -> Self { |
b7449926 | 64 | assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); |
a1dfa0c6 XL |
65 | let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); |
66 | let ptr_layout = bx.cx().layout_of(ptr_ty); | |
e1599b0c | 67 | Self::alloca(bx, ptr_layout) |
b7449926 XL |
68 | } |
69 | ||
dfeec247 | 70 | pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V { |
ba9703b0 | 71 | if let FieldsShape::Array { count, .. } = self.layout.fields { |
ff7c6d11 | 72 | if self.layout.is_unsized() { |
ff7c6d11 | 73 | assert_eq!(count, 0); |
b7449926 | 74 | self.llextra.unwrap() |
ff7c6d11 | 75 | } else { |
a1dfa0c6 | 76 | cx.const_usize(count) |
ff7c6d11 XL |
77 | } |
78 | } else { | |
79 | bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) | |
80 | } | |
81 | } | |
a1dfa0c6 | 82 | } |
ff7c6d11 | 83 | |
dc9dc135 | 84 | impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { |
ff7c6d11 | 85 | /// Access a field, at a point when the value's case is known. |
a1dfa0c6 | 86 | pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
dfeec247 XL |
87 | self, |
88 | bx: &mut Bx, | |
a1dfa0c6 XL |
89 | ix: usize, |
90 | ) -> Self { | |
91 | let field = self.layout.field(bx.cx(), ix); | |
ff7c6d11 | 92 | let offset = self.layout.fields.offset(ix); |
0bf4aa26 | 93 | let effective_field_align = self.align.restrict_for_offset(offset); |
ff7c6d11 | 94 | |
a1dfa0c6 | 95 | let mut simple = || { |
1b1a35ee XL |
96 | let llval = match self.layout.abi { |
97 | _ if offset.bytes() == 0 => { | |
98 | // Unions and newtypes only use an offset of 0. | |
99 | // Also handles the first field of Scalar, ScalarPair, and Vector layouts. | |
100 | self.llval | |
101 | } | |
c295e0f8 | 102 | Abi::ScalarPair(a, b) |
04454e1e | 103 | if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) => |
1b1a35ee XL |
104 | { |
105 | // Offset matches second field. | |
94222f64 XL |
106 | let ty = bx.backend_type(self.layout); |
107 | bx.struct_gep(ty, self.llval, 1) | |
1b1a35ee XL |
108 | } |
109 | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { | |
110 | // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. | |
111 | let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); | |
94222f64 | 112 | bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())]) |
1b1a35ee XL |
113 | } |
114 | Abi::Scalar(_) | Abi::ScalarPair(..) => { | |
115 | // All fields of Scalar and ScalarPair layouts must have been handled by this point. | |
116 | // Vector layouts have additional fields for each element of the vector, so don't panic in that case. | |
117 | bug!( | |
118 | "offset of non-ZST field `{:?}` does not match layout `{:#?}`", | |
119 | field, | |
120 | self.layout | |
121 | ); | |
122 | } | |
94222f64 XL |
123 | _ => { |
124 | let ty = bx.backend_type(self.layout); | |
125 | bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix)) | |
126 | } | |
ff7c6d11 XL |
127 | }; |
128 | PlaceRef { | |
dc9dc135 | 129 | // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. |
a1dfa0c6 | 130 | llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), |
dfeec247 | 131 | llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, |
ff7c6d11 | 132 | layout: field, |
b7449926 | 133 | align: effective_field_align, |
ff7c6d11 XL |
134 | } |
135 | }; | |
136 | ||
137 | // Simple cases, which don't need DST adjustment: | |
138 | // * no metadata available - just log the case | |
dc9dc135 | 139 | // * known alignment - sized types, `[T]`, `str` or a foreign type |
ff7c6d11 | 140 | // * packed struct - there is no alignment padding |
1b1a35ee | 141 | match field.ty.kind() { |
b7449926 | 142 | _ if self.llextra.is_none() => { |
dfeec247 XL |
143 | debug!( |
144 | "unsized field `{}`, of `{:?}` has no metadata for adjustment", | |
145 | ix, self.llval | |
146 | ); | |
ff7c6d11 XL |
147 | return simple(); |
148 | } | |
149 | _ if !field.is_unsized() => return simple(), | |
b7449926 XL |
150 | ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(), |
151 | ty::Adt(def, _) => { | |
5e7ed085 | 152 | if def.repr().packed() { |
ff7c6d11 XL |
153 | // FIXME(eddyb) generalize the adjustment when we |
154 | // start supporting packing to larger alignments. | |
a1dfa0c6 | 155 | assert_eq!(self.layout.align.abi.bytes(), 1); |
ff7c6d11 XL |
156 | return simple(); |
157 | } | |
158 | } | |
159 | _ => {} | |
160 | } | |
161 | ||
162 | // We need to get the pointer manually now. | |
dc9dc135 | 163 | // We do this by casting to a `*i8`, then offsetting it by the appropriate amount. |
ff7c6d11 XL |
164 | // We do this instead of, say, simply adjusting the pointer from the result of a GEP |
165 | // because the field may have an arbitrary alignment in the LLVM representation | |
166 | // anyway. | |
167 | // | |
168 | // To demonstrate: | |
ff7c6d11 | 169 | // |
dc9dc135 XL |
170 | // struct Foo<T: ?Sized> { |
171 | // x: u16, | |
172 | // y: T | |
173 | // } | |
174 | // | |
175 | // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that | |
ff7c6d11 XL |
176 | // the `y` field has 16-bit alignment. |
177 | ||
178 | let meta = self.llextra; | |
179 | ||
a1dfa0c6 | 180 | let unaligned_offset = bx.cx().const_usize(offset.bytes()); |
ff7c6d11 XL |
181 | |
182 | // Get the alignment of the field | |
2c00a5a8 | 183 | let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); |
ff7c6d11 | 184 | |
5869c6ff XL |
185 | // Bump the unaligned offset up to the appropriate alignment |
186 | let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align); | |
ff7c6d11 | 187 | |
b7449926 | 188 | debug!("struct_field_ptr: DST field offset: {:?}", offset); |
ff7c6d11 | 189 | |
dc9dc135 | 190 | // Cast and adjust pointer. |
a1dfa0c6 | 191 | let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); |
94222f64 | 192 | let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]); |
ff7c6d11 | 193 | |
dc9dc135 | 194 | // Finally, cast back to the type expected. |
a1dfa0c6 | 195 | let ll_fty = bx.cx().backend_type(field); |
ff7c6d11 XL |
196 | debug!("struct_field_ptr: Field type is {:?}", ll_fty); |
197 | ||
198 | PlaceRef { | |
a1dfa0c6 | 199 | llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), |
ff7c6d11 XL |
200 | llextra: self.llextra, |
201 | layout: field, | |
b7449926 | 202 | align: effective_field_align, |
ff7c6d11 XL |
203 | } |
204 | } | |
205 | ||
206 | /// Obtain the actual discriminant of a value. | |
a1dfa0c6 XL |
207 | pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
208 | self, | |
209 | bx: &mut Bx, | |
dfeec247 | 210 | cast_to: Ty<'tcx>, |
a1dfa0c6 XL |
211 | ) -> V { |
212 | let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to)); | |
0bf4aa26 | 213 | if self.layout.abi.is_uninhabited() { |
a1dfa0c6 | 214 | return bx.cx().const_undef(cast_to); |
83c7162d | 215 | } |
f035d41b | 216 | let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants { |
ba9703b0 | 217 | Variants::Single { index } => { |
dfeec247 XL |
218 | let discr_val = self |
219 | .layout | |
220 | .ty | |
221 | .discriminant_for_variant(bx.cx().tcx(), index) | |
48663c56 | 222 | .map_or(index.as_u32() as u128, |discr| discr.val); |
a1dfa0c6 | 223 | return bx.cx().const_uint_big(cast_to, discr_val); |
ff7c6d11 | 224 | } |
c295e0f8 | 225 | Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { |
f035d41b | 226 | (tag, tag_encoding, tag_field) |
532ac7d7 XL |
227 | } |
228 | }; | |
ff7c6d11 | 229 | |
416331ca | 230 | // Read the tag/niche-encoded discriminant from memory. |
f035d41b XL |
231 | let tag = self.project_field(bx, tag_field); |
232 | let tag = bx.load_operand(tag); | |
416331ca XL |
233 | |
234 | // Decode the discriminant (specifically if it's niche-encoded). | |
f035d41b XL |
235 | match *tag_encoding { |
236 | TagEncoding::Direct => { | |
04454e1e | 237 | let signed = match tag_scalar.primitive() { |
94b46f34 | 238 | // We use `i1` for bytes that are always `0` or `1`, |
0731742a | 239 | // e.g., `#[repr(i8)] enum E { A, B }`, but we can't |
94b46f34 | 240 | // let LLVM interpret the `i1` as signed, because |
dc9dc135 | 241 | // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`. |
f035d41b | 242 | Int(_, signed) => !tag_scalar.is_bool() && signed, |
dfeec247 | 243 | _ => false, |
ff7c6d11 | 244 | }; |
f035d41b | 245 | bx.intcast(tag.immediate(), cast_to, signed) |
ff7c6d11 | 246 | } |
f035d41b | 247 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => { |
416331ca XL |
248 | // Rebase from niche values to discriminants, and check |
249 | // whether the result is in range for the niche variants. | |
f035d41b XL |
250 | let niche_llty = bx.cx().immediate_backend_type(tag.layout); |
251 | let tag = tag.immediate(); | |
416331ca XL |
252 | |
253 | // We first compute the "relative discriminant" (wrt `niche_variants`), | |
254 | // that is, if `n = niche_variants.end() - niche_variants.start()`, | |
255 | // we remap `niche_start..=niche_start + n` (which may wrap around) | |
256 | // to (non-wrap-around) `0..=n`, to be able to check whether the | |
257 | // discriminant corresponds to a niche variant with one comparison. | |
258 | // We also can't go directly to the (variant index) discriminant | |
259 | // and check that it is in the range `niche_variants`, because | |
260 | // that might not fit in the same type, on top of needing an extra | |
261 | // comparison (see also the comment on `let niche_discr`). | |
262 | let relative_discr = if niche_start == 0 { | |
263 | // Avoid subtracting `0`, which wouldn't work for pointers. | |
264 | // FIXME(eddyb) check the actual primitive type here. | |
f035d41b | 265 | tag |
416331ca | 266 | } else { |
f035d41b | 267 | bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start)) |
416331ca XL |
268 | }; |
269 | let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32(); | |
3dfed10e XL |
270 | let is_niche = if relative_max == 0 { |
271 | // Avoid calling `const_uint`, which wouldn't work for pointers. | |
272 | // Also use canonical == 0 instead of non-canonical u<= 0. | |
273 | // FIXME(eddyb) check the actual primitive type here. | |
274 | bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty)) | |
275 | } else { | |
276 | let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64); | |
416331ca XL |
277 | bx.icmp(IntPredicate::IntULE, relative_discr, relative_max) |
278 | }; | |
279 | ||
280 | // NOTE(eddyb) this addition needs to be performed on the final | |
281 | // type, in case the niche itself can't represent all variant | |
282 | // indices (e.g. `u8` niche with more than `256` variants, | |
283 | // but enough uninhabited variants so that the remaining variants | |
284 | // fit in the niche). | |
285 | // In other words, `niche_variants.end - niche_variants.start` | |
286 | // is representable in the niche, but `niche_variants.end` | |
287 | // might not be, in extreme cases. | |
288 | let niche_discr = { | |
289 | let relative_discr = if relative_max == 0 { | |
290 | // HACK(eddyb) since we have only one niche, we know which | |
291 | // one it is, and we can avoid having a dynamic value here. | |
292 | bx.cx().const_uint(cast_to, 0) | |
293 | } else { | |
294 | bx.intcast(relative_discr, cast_to, false) | |
295 | }; | |
296 | bx.add( | |
297 | relative_discr, | |
a1dfa0c6 | 298 | bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64), |
416331ca XL |
299 | ) |
300 | }; | |
301 | ||
302 | bx.select( | |
303 | is_niche, | |
304 | niche_discr, | |
305 | bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64), | |
306 | ) | |
ff7c6d11 XL |
307 | } |
308 | } | |
309 | } | |
310 | ||
9fa01778 | 311 | /// Sets the discriminant for a new value of the given case of the given |
ff7c6d11 | 312 | /// representation. |
a1dfa0c6 XL |
313 | pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
314 | &self, | |
315 | bx: &mut Bx, | |
dfeec247 | 316 | variant_index: VariantIdx, |
a1dfa0c6 XL |
317 | ) { |
318 | if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { | |
60c5eb7d XL |
319 | // We play it safe by using a well-defined `abort`, but we could go for immediate UB |
320 | // if that turns out to be helpful. | |
321 | bx.abort(); | |
ff7c6d11 XL |
322 | return; |
323 | } | |
324 | match self.layout.variants { | |
ba9703b0 | 325 | Variants::Single { index } => { |
ff7c6d11 XL |
326 | assert_eq!(index, variant_index); |
327 | } | |
f035d41b XL |
328 | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => { |
329 | let ptr = self.project_field(bx, tag_field); | |
48663c56 XL |
330 | let to = |
331 | self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val; | |
83c7162d | 332 | bx.store( |
a1dfa0c6 | 333 | bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), |
83c7162d | 334 | ptr.llval, |
dfeec247 XL |
335 | ptr.align, |
336 | ); | |
ff7c6d11 | 337 | } |
ba9703b0 | 338 | Variants::Multiple { |
f035d41b XL |
339 | tag_encoding: |
340 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, | |
341 | tag_field, | |
ff7c6d11 XL |
342 | .. |
343 | } => { | |
344 | if variant_index != dataful_variant { | |
29967ef6 XL |
345 | if bx.cx().sess().target.arch == "arm" |
346 | || bx.cx().sess().target.arch == "aarch64" | |
dfeec247 | 347 | { |
dc9dc135 | 348 | // FIXME(#34427): as workaround for LLVM bug on ARM, |
ff7c6d11 | 349 | // use memset of 0 before assigning niche value. |
a1dfa0c6 XL |
350 | let fill_byte = bx.cx().const_u8(0); |
351 | let size = bx.cx().const_usize(self.layout.size.bytes()); | |
352 | bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); | |
ff7c6d11 XL |
353 | } |
354 | ||
f035d41b | 355 | let niche = self.project_field(bx, tag_field); |
a1dfa0c6 XL |
356 | let niche_llty = bx.cx().immediate_backend_type(niche.layout); |
357 | let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); | |
dfeec247 | 358 | let niche_value = (niche_value as u128).wrapping_add(niche_start); |
dc9dc135 | 359 | // FIXME(eddyb): check the actual primitive type here. |
ff7c6d11 | 360 | let niche_llval = if niche_value == 0 { |
dc9dc135 | 361 | // HACK(eddyb): using `c_null` as it works on all types. |
a1dfa0c6 | 362 | bx.cx().const_null(niche_llty) |
ff7c6d11 | 363 | } else { |
a1dfa0c6 | 364 | bx.cx().const_uint_big(niche_llty, niche_value) |
ff7c6d11 | 365 | }; |
2c00a5a8 | 366 | OperandValue::Immediate(niche_llval).store(bx, niche); |
ff7c6d11 XL |
367 | } |
368 | } | |
369 | } | |
370 | } | |
371 | ||
a1dfa0c6 XL |
372 | pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
373 | &self, | |
374 | bx: &mut Bx, | |
dfeec247 | 375 | llindex: V, |
a1dfa0c6 | 376 | ) -> Self { |
69743fb6 XL |
377 | // Statically compute the offset if we can, otherwise just use the element size, |
378 | // as this will yield the lowest alignment. | |
379 | let layout = self.layout.field(bx, 0); | |
e74abb32 XL |
380 | let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) { |
381 | layout.size.checked_mul(llindex, bx).unwrap_or(layout.size) | |
69743fb6 XL |
382 | } else { |
383 | layout.size | |
384 | }; | |
385 | ||
ff7c6d11 | 386 | PlaceRef { |
94222f64 XL |
387 | llval: bx.inbounds_gep( |
388 | bx.cx().backend_type(self.layout), | |
389 | self.llval, | |
390 | &[bx.cx().const_usize(0), llindex], | |
391 | ), | |
b7449926 | 392 | llextra: None, |
69743fb6 XL |
393 | layout, |
394 | align: self.align.restrict_for_offset(offset), | |
ff7c6d11 XL |
395 | } |
396 | } | |
397 | ||
a1dfa0c6 XL |
398 | pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
399 | &self, | |
400 | bx: &mut Bx, | |
dfeec247 | 401 | variant_index: VariantIdx, |
a1dfa0c6 | 402 | ) -> Self { |
ff7c6d11 | 403 | let mut downcast = *self; |
a1dfa0c6 | 404 | downcast.layout = self.layout.for_variant(bx.cx(), variant_index); |
ff7c6d11 XL |
405 | |
406 | // Cast to the appropriate variant struct type. | |
a1dfa0c6 XL |
407 | let variant_ty = bx.cx().backend_type(downcast.layout); |
408 | downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); | |
ff7c6d11 XL |
409 | |
410 | downcast | |
411 | } | |
412 | ||
a1dfa0c6 | 413 | pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { |
2c00a5a8 | 414 | bx.lifetime_start(self.llval, self.layout.size); |
ff7c6d11 XL |
415 | } |
416 | ||
a1dfa0c6 | 417 | pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { |
2c00a5a8 | 418 | bx.lifetime_end(self.llval, self.layout.size); |
ff7c6d11 XL |
419 | } |
420 | } | |
421 | ||
dc9dc135 | 422 | impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { |
a1dfa0c6 XL |
423 | pub fn codegen_place( |
424 | &mut self, | |
425 | bx: &mut Bx, | |
74b04a01 | 426 | place_ref: mir::PlaceRef<'tcx>, |
a1dfa0c6 | 427 | ) -> PlaceRef<'tcx, Bx::Value> { |
416331ca | 428 | debug!("codegen_place(place_ref={:?})", place_ref); |
a1dfa0c6 XL |
429 | let cx = self.cx; |
430 | let tcx = self.cx.tcx(); | |
ff7c6d11 | 431 | |
5099ac24 FG |
432 | let mut base = 0; |
433 | let mut cg_base = match self.locals[place_ref.local] { | |
434 | LocalRef::Place(place) => place, | |
435 | LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx), | |
436 | LocalRef::Operand(..) => { | |
437 | if let Some(elem) = place_ref | |
438 | .projection | |
439 | .iter() | |
440 | .enumerate() | |
441 | .find(|elem| matches!(elem.1, mir::ProjectionElem::Deref)) | |
442 | { | |
443 | base = elem.0 + 1; | |
5e7ed085 | 444 | let cg_base = self.codegen_consume( |
5099ac24 FG |
445 | bx, |
446 | mir::PlaceRef { projection: &place_ref.projection[..elem.0], ..place_ref }, | |
5e7ed085 FG |
447 | ); |
448 | ||
923072b8 | 449 | cg_base.deref(bx.cx()) |
5099ac24 | 450 | } else { |
dfeec247 | 451 | bug!("using operand local {:?} as place", place_ref); |
8faf50e0 | 452 | } |
ff7c6d11 | 453 | } |
5099ac24 FG |
454 | }; |
455 | for elem in place_ref.projection[base..].iter() { | |
923072b8 FG |
456 | cg_base = match *elem { |
457 | mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()), | |
5099ac24 FG |
458 | mir::ProjectionElem::Field(ref field, _) => { |
459 | cg_base.project_field(bx, field.index()) | |
460 | } | |
461 | mir::ProjectionElem::Index(index) => { | |
462 | let index = &mir::Operand::Copy(mir::Place::from(index)); | |
463 | let index = self.codegen_operand(bx, index); | |
464 | let llindex = index.immediate(); | |
465 | cg_base.project_index(bx, llindex) | |
466 | } | |
467 | mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { | |
468 | let lloffset = bx.cx().const_usize(offset as u64); | |
469 | cg_base.project_index(bx, lloffset) | |
470 | } | |
471 | mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { | |
472 | let lloffset = bx.cx().const_usize(offset as u64); | |
473 | let lllen = cg_base.len(bx.cx()); | |
474 | let llindex = bx.sub(lllen, lloffset); | |
475 | cg_base.project_index(bx, llindex) | |
476 | } | |
477 | mir::ProjectionElem::Subslice { from, to, from_end } => { | |
478 | let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64)); | |
479 | let projected_ty = | |
480 | PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty; | |
481 | subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty)); | |
482 | ||
483 | if subslice.layout.is_unsized() { | |
484 | assert!(from_end, "slice subslices should be `from_end`"); | |
485 | subslice.llextra = Some(bx.sub( | |
486 | cg_base.llextra.unwrap(), | |
487 | bx.cx().const_usize((from as u64) + (to as u64)), | |
488 | )); | |
ff7c6d11 | 489 | } |
5099ac24 FG |
490 | |
491 | // Cast the place pointer type to the new | |
492 | // array or slice type (`*[%_; new_len]`). | |
493 | subslice.llval = bx.pointercast( | |
494 | subslice.llval, | |
495 | bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)), | |
496 | ); | |
497 | ||
498 | subslice | |
ff7c6d11 | 499 | } |
5099ac24 FG |
500 | mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v), |
501 | }; | |
502 | } | |
503 | debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base); | |
504 | cg_base | |
ff7c6d11 XL |
505 | } |
506 | ||
74b04a01 | 507 | pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> { |
a1dfa0c6 | 508 | let tcx = self.cx.tcx(); |
5869c6ff | 509 | let place_ty = place_ref.ty(self.mir, tcx); |
fc512014 | 510 | self.monomorphize(place_ty.ty) |
ff7c6d11 XL |
511 | } |
512 | } | |
5869c6ff XL |
513 | |
514 | fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( | |
515 | bx: &mut Bx, | |
516 | value: Bx::Value, | |
517 | align: Bx::Value, | |
518 | ) -> Bx::Value { | |
519 | // In pseudo code: | |
520 | // | |
521 | // if value & (align - 1) == 0 { | |
522 | // value | |
523 | // } else { | |
524 | // (value & !(align - 1)) + align | |
525 | // } | |
526 | // | |
527 | // Usually this is written without branches as | |
528 | // | |
529 | // (value + align - 1) & !(align - 1) | |
530 | // | |
531 | // But this formula cannot take advantage of constant `value`. E.g. if `value` is known | |
532 | // at compile time to be `1`, this expression should be optimized to `align`. However, | |
533 | // optimization only holds if `align` is a power of two. Since the optimizer doesn't know | |
534 | // that `align` is a power of two, it cannot perform this optimization. | |
535 | // | |
536 | // Instead we use | |
537 | // | |
538 | // value + (-value & (align - 1)) | |
539 | // | |
540 | // Since `align` is used only once, the expression can be optimized. For `value = 0` | |
541 | // its optimized to `0` even in debug mode. | |
542 | // | |
543 | // NB: The previous version of this code used | |
544 | // | |
545 | // (value + align - 1) & -align | |
546 | // | |
547 | // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for | |
548 | // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559 | |
549 | let one = bx.const_usize(1); | |
550 | let align_minus_1 = bx.sub(align, one); | |
551 | let neg_value = bx.neg(value); | |
552 | let offset = bx.and(neg_value, align_minus_1); | |
553 | bx.add(value, offset) | |
554 | } |