]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/mir/place.rs
New upstream version 1.69.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / place.rs
CommitLineData
60c5eb7d 1use super::operand::OperandValue;
dfeec247 2use super::{FunctionCx, LocalRef};
60c5eb7d 3
9fa01778
XL
4use crate::common::IntPredicate;
5use crate::glue;
9fa01778 6use crate::traits::*;
ff7c6d11 7
ba9703b0
XL
8use rustc_middle::mir;
9use rustc_middle::mir::tcx::PlaceTy;
c295e0f8 10use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
ba9703b0 11use rustc_middle::ty::{self, Ty};
9ffffee4 12use rustc_target::abi::{Abi, Align, FieldsShape, Int, Pointer, TagEncoding};
c295e0f8 13use rustc_target::abi::{VariantIdx, Variants};
ff7c6d11
XL
14
15#[derive(Copy, Clone, Debug)]
a1dfa0c6 16pub struct PlaceRef<'tcx, V> {
60c5eb7d 17 /// A pointer to the contents of the place.
a1dfa0c6 18 pub llval: V,
ff7c6d11 19
60c5eb7d 20 /// This place's extra data if it is unsized, or `None` if null.
a1dfa0c6 21 pub llextra: Option<V>,
ff7c6d11 22
60c5eb7d 23 /// The monomorphized type of this place, including variant information.
ba9703b0 24 pub layout: TyAndLayout<'tcx>,
ff7c6d11 25
60c5eb7d 26 /// The alignment we know for this place.
ff7c6d11
XL
27 pub align: Align,
28}
29
dc9dc135 30impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
ba9703b0 31 pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
487cf647 32 assert!(layout.is_sized());
dfeec247 33 PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
e1599b0c
XL
34 }
35
ba9703b0
XL
36 pub fn new_sized_aligned(
37 llval: V,
38 layout: TyAndLayout<'tcx>,
39 align: Align,
40 ) -> PlaceRef<'tcx, V> {
487cf647 41 assert!(layout.is_sized());
dfeec247 42 PlaceRef { llval, llextra: None, layout, align }
9fa01778
XL
43 }
44
e74abb32
XL
45 // FIXME(eddyb) pass something else for the name so no work is done
46 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
a1dfa0c6
XL
47 pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
48 bx: &mut Bx,
ba9703b0 49 layout: TyAndLayout<'tcx>,
a1dfa0c6 50 ) -> Self {
487cf647 51 assert!(layout.is_sized(), "tried to statically allocate unsized place");
e1599b0c
XL
52 let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
53 Self::new_sized(tmp, layout)
ff7c6d11
XL
54 }
55
b7449926 56 /// Returns a place for an indirect reference to an unsized place.
e74abb32
XL
57 // FIXME(eddyb) pass something else for the name so no work is done
58 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
a1dfa0c6
XL
59 pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
60 bx: &mut Bx,
ba9703b0 61 layout: TyAndLayout<'tcx>,
a1dfa0c6 62 ) -> Self {
b7449926 63 assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
a1dfa0c6
XL
64 let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
65 let ptr_layout = bx.cx().layout_of(ptr_ty);
e1599b0c 66 Self::alloca(bx, ptr_layout)
b7449926
XL
67 }
68
dfeec247 69 pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
ba9703b0 70 if let FieldsShape::Array { count, .. } = self.layout.fields {
ff7c6d11 71 if self.layout.is_unsized() {
ff7c6d11 72 assert_eq!(count, 0);
b7449926 73 self.llextra.unwrap()
ff7c6d11 74 } else {
a1dfa0c6 75 cx.const_usize(count)
ff7c6d11
XL
76 }
77 } else {
78 bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
79 }
80 }
a1dfa0c6 81}
ff7c6d11 82
dc9dc135 83impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
ff7c6d11 84 /// Access a field, at a point when the value's case is known.
a1dfa0c6 85 pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
dfeec247
XL
86 self,
87 bx: &mut Bx,
a1dfa0c6
XL
88 ix: usize,
89 ) -> Self {
90 let field = self.layout.field(bx.cx(), ix);
ff7c6d11 91 let offset = self.layout.fields.offset(ix);
0bf4aa26 92 let effective_field_align = self.align.restrict_for_offset(offset);
ff7c6d11 93
a1dfa0c6 94 let mut simple = || {
1b1a35ee
XL
95 let llval = match self.layout.abi {
96 _ if offset.bytes() == 0 => {
97 // Unions and newtypes only use an offset of 0.
98 // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
99 self.llval
100 }
c295e0f8 101 Abi::ScalarPair(a, b)
04454e1e 102 if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
1b1a35ee
XL
103 {
104 // Offset matches second field.
94222f64
XL
105 let ty = bx.backend_type(self.layout);
106 bx.struct_gep(ty, self.llval, 1)
1b1a35ee
XL
107 }
108 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
109 // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
110 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
94222f64 111 bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
1b1a35ee
XL
112 }
113 Abi::Scalar(_) | Abi::ScalarPair(..) => {
114 // All fields of Scalar and ScalarPair layouts must have been handled by this point.
115 // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
116 bug!(
117 "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
118 field,
119 self.layout
120 );
121 }
94222f64
XL
122 _ => {
123 let ty = bx.backend_type(self.layout);
124 bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
125 }
ff7c6d11
XL
126 };
127 PlaceRef {
dc9dc135 128 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
a1dfa0c6 129 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
dfeec247 130 llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
ff7c6d11 131 layout: field,
b7449926 132 align: effective_field_align,
ff7c6d11
XL
133 }
134 };
135
136 // Simple cases, which don't need DST adjustment:
137 // * no metadata available - just log the case
dc9dc135 138 // * known alignment - sized types, `[T]`, `str` or a foreign type
ff7c6d11 139 // * packed struct - there is no alignment padding
1b1a35ee 140 match field.ty.kind() {
b7449926 141 _ if self.llextra.is_none() => {
dfeec247
XL
142 debug!(
143 "unsized field `{}`, of `{:?}` has no metadata for adjustment",
144 ix, self.llval
145 );
ff7c6d11
XL
146 return simple();
147 }
487cf647 148 _ if field.is_sized() => return simple(),
b7449926
XL
149 ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
150 ty::Adt(def, _) => {
5e7ed085 151 if def.repr().packed() {
ff7c6d11
XL
152 // FIXME(eddyb) generalize the adjustment when we
153 // start supporting packing to larger alignments.
a1dfa0c6 154 assert_eq!(self.layout.align.abi.bytes(), 1);
ff7c6d11
XL
155 return simple();
156 }
157 }
158 _ => {}
159 }
160
161 // We need to get the pointer manually now.
dc9dc135 162 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
ff7c6d11
XL
163 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
164 // because the field may have an arbitrary alignment in the LLVM representation
165 // anyway.
166 //
167 // To demonstrate:
ff7c6d11 168 //
dc9dc135
XL
169 // struct Foo<T: ?Sized> {
170 // x: u16,
171 // y: T
172 // }
173 //
174 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
ff7c6d11
XL
175 // the `y` field has 16-bit alignment.
176
177 let meta = self.llextra;
178
a1dfa0c6 179 let unaligned_offset = bx.cx().const_usize(offset.bytes());
ff7c6d11
XL
180
181 // Get the alignment of the field
2c00a5a8 182 let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
ff7c6d11 183
5869c6ff
XL
184 // Bump the unaligned offset up to the appropriate alignment
185 let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
ff7c6d11 186
b7449926 187 debug!("struct_field_ptr: DST field offset: {:?}", offset);
ff7c6d11 188
dc9dc135 189 // Cast and adjust pointer.
a1dfa0c6 190 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
94222f64 191 let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
ff7c6d11 192
dc9dc135 193 // Finally, cast back to the type expected.
a1dfa0c6 194 let ll_fty = bx.cx().backend_type(field);
ff7c6d11
XL
195 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
196
197 PlaceRef {
a1dfa0c6 198 llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
ff7c6d11
XL
199 llextra: self.llextra,
200 layout: field,
b7449926 201 align: effective_field_align,
ff7c6d11
XL
202 }
203 }
204
205 /// Obtain the actual discriminant of a value.
064997fb 206 #[instrument(level = "trace", skip(bx))]
a1dfa0c6
XL
207 pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
208 self,
209 bx: &mut Bx,
dfeec247 210 cast_to: Ty<'tcx>,
a1dfa0c6 211 ) -> V {
9ffffee4 212 let dl = &bx.tcx().data_layout;
487cf647
FG
213 let cast_to_layout = bx.cx().layout_of(cast_to);
214 let cast_to_size = cast_to_layout.layout.size();
215 let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
0bf4aa26 216 if self.layout.abi.is_uninhabited() {
a1dfa0c6 217 return bx.cx().const_undef(cast_to);
83c7162d 218 }
f035d41b 219 let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
ba9703b0 220 Variants::Single { index } => {
dfeec247
XL
221 let discr_val = self
222 .layout
223 .ty
224 .discriminant_for_variant(bx.cx().tcx(), index)
48663c56 225 .map_or(index.as_u32() as u128, |discr| discr.val);
a1dfa0c6 226 return bx.cx().const_uint_big(cast_to, discr_val);
ff7c6d11 227 }
c295e0f8 228 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
f035d41b 229 (tag, tag_encoding, tag_field)
532ac7d7
XL
230 }
231 };
ff7c6d11 232
416331ca 233 // Read the tag/niche-encoded discriminant from memory.
f035d41b 234 let tag = self.project_field(bx, tag_field);
487cf647
FG
235 let tag_op = bx.load_operand(tag);
236 let tag_imm = tag_op.immediate();
416331ca
XL
237
238 // Decode the discriminant (specifically if it's niche-encoded).
f035d41b
XL
239 match *tag_encoding {
240 TagEncoding::Direct => {
04454e1e 241 let signed = match tag_scalar.primitive() {
94b46f34 242 // We use `i1` for bytes that are always `0` or `1`,
0731742a 243 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
94b46f34 244 // let LLVM interpret the `i1` as signed, because
dc9dc135 245 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
f035d41b 246 Int(_, signed) => !tag_scalar.is_bool() && signed,
dfeec247 247 _ => false,
ff7c6d11 248 };
487cf647 249 bx.intcast(tag_imm, cast_to, signed)
ff7c6d11 250 }
f2b60f7d 251 TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
487cf647
FG
252 // Cast to an integer so we don't have to treat a pointer as a
253 // special case.
9ffffee4
FG
254 let (tag, tag_llty) = match tag_scalar.primitive() {
255 // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
256 Pointer(_) => {
257 let t = bx.type_from_integer(dl.ptr_sized_integer());
258 let tag = bx.ptrtoint(tag_imm, t);
259 (tag, t)
260 }
261 _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
416331ca 262 };
487cf647
FG
263
264 let tag_size = tag_scalar.size(bx.cx());
265 let max_unsigned = tag_size.unsigned_int_max();
266 let max_signed = tag_size.signed_int_max() as u128;
267 let min_signed = max_signed + 1;
416331ca 268 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
487cf647
FG
269 let niche_end = niche_start.wrapping_add(relative_max as u128) & max_unsigned;
270 let range = tag_scalar.valid_range(bx.cx());
271
272 let sle = |lhs: u128, rhs: u128| -> bool {
273 // Signed and unsigned comparisons give the same results,
274 // except that in signed comparisons an integer with the
275 // sign bit set is less than one with the sign bit clear.
276 // Toggle the sign bit to do a signed comparison.
277 (lhs ^ min_signed) <= (rhs ^ min_signed)
278 };
279
280 // We have a subrange `niche_start..=niche_end` inside `range`.
281 // If the value of the tag is inside this subrange, it's a
282 // "niche value", an increment of the discriminant. Otherwise it
283 // indicates the untagged variant.
284 // A general algorithm to extract the discriminant from the tag
285 // is:
286 // relative_tag = tag - niche_start
287 // is_niche = relative_tag <= (ule) relative_max
288 // discr = if is_niche {
289 // cast(relative_tag) + niche_variants.start()
290 // } else {
291 // untagged_variant
292 // }
293 // However, we will likely be able to emit simpler code.
294
295 // Find the least and greatest values in `range`, considered
296 // both as signed and unsigned.
297 let (low_unsigned, high_unsigned) = if range.start <= range.end {
298 (range.start, range.end)
299 } else {
300 (0, max_unsigned)
301 };
302 let (low_signed, high_signed) = if sle(range.start, range.end) {
303 (range.start, range.end)
3dfed10e 304 } else {
487cf647
FG
305 (min_signed, max_signed)
306 };
307
308 let niches_ule = niche_start <= niche_end;
309 let niches_sle = sle(niche_start, niche_end);
310 let cast_smaller = cast_to_size <= tag_size;
311
312 // In the algorithm above, we can change
313 // cast(relative_tag) + niche_variants.start()
314 // into
315 // cast(tag + (niche_variants.start() - niche_start))
316 // if either the casted type is no larger than the original
317 // type, or if the niche values are contiguous (in either the
318 // signed or unsigned sense).
319 let can_incr = cast_smaller || niches_ule || niches_sle;
320
321 let data_for_boundary_niche = || -> Option<(IntPredicate, u128)> {
322 if !can_incr {
323 None
324 } else if niche_start == low_unsigned {
325 Some((IntPredicate::IntULE, niche_end))
326 } else if niche_end == high_unsigned {
327 Some((IntPredicate::IntUGE, niche_start))
328 } else if niche_start == low_signed {
329 Some((IntPredicate::IntSLE, niche_end))
330 } else if niche_end == high_signed {
331 Some((IntPredicate::IntSGE, niche_start))
332 } else {
333 None
334 }
416331ca
XL
335 };
336
487cf647
FG
337 let (is_niche, tagged_discr, delta) = if relative_max == 0 {
338 // Best case scenario: only one tagged variant. This will
339 // likely become just a comparison and a jump.
340 // The algorithm is:
341 // is_niche = tag == niche_start
342 // discr = if is_niche {
343 // niche_start
344 // } else {
345 // untagged_variant
346 // }
347 let niche_start = bx.cx().const_uint_big(tag_llty, niche_start);
348 let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start);
349 let tagged_discr =
350 bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
351 (is_niche, tagged_discr, 0)
352 } else if let Some((predicate, constant)) = data_for_boundary_niche() {
353 // The niche values are either the lowest or the highest in
354 // `range`. We can avoid the first subtraction in the
355 // algorithm.
356 // The algorithm is now this:
357 // is_niche = tag <= niche_end
358 // discr = if is_niche {
359 // cast(tag + (niche_variants.start() - niche_start))
360 // } else {
361 // untagged_variant
362 // }
363 // (the first line may instead be tag >= niche_start,
364 // and may be a signed or unsigned comparison)
365 // The arithmetic must be done before the cast, so we can
366 // have the correct wrapping behavior. See issue #104519 for
367 // the consequences of getting this wrong.
368 let is_niche =
369 bx.icmp(predicate, tag, bx.cx().const_uint_big(tag_llty, constant));
370 let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
371 let incr_tag = if delta == 0 {
372 tag
416331ca 373 } else {
487cf647 374 bx.add(tag, bx.cx().const_uint_big(tag_llty, delta))
416331ca 375 };
487cf647
FG
376
377 let cast_tag = if cast_smaller {
378 bx.intcast(incr_tag, cast_to, false)
379 } else if niches_ule {
380 bx.zext(incr_tag, cast_to)
381 } else {
382 bx.sext(incr_tag, cast_to)
383 };
384
385 (is_niche, cast_tag, 0)
386 } else {
387 // The special cases don't apply, so we'll have to go with
388 // the general algorithm.
389 let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start));
390 let cast_tag = bx.intcast(relative_discr, cast_to, false);
391 let is_niche = bx.icmp(
392 IntPredicate::IntULE,
416331ca 393 relative_discr,
487cf647
FG
394 bx.cx().const_uint(tag_llty, relative_max as u64),
395 );
396 (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
416331ca
XL
397 };
398
487cf647
FG
399 let tagged_discr = if delta == 0 {
400 tagged_discr
401 } else {
402 bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
403 };
404
405 let discr = bx.select(
416331ca 406 is_niche,
487cf647 407 tagged_discr,
f2b60f7d 408 bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
487cf647
FG
409 );
410
411 // In principle we could insert assumes on the possible range of `discr`, but
412 // currently in LLVM this seems to be a pessimization.
413
414 discr
ff7c6d11
XL
415 }
416 }
417 }
418
9fa01778 419 /// Sets the discriminant for a new value of the given case of the given
ff7c6d11 420 /// representation.
a1dfa0c6
XL
421 pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
422 &self,
423 bx: &mut Bx,
dfeec247 424 variant_index: VariantIdx,
a1dfa0c6
XL
425 ) {
426 if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
60c5eb7d
XL
427 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
428 // if that turns out to be helpful.
429 bx.abort();
ff7c6d11
XL
430 return;
431 }
432 match self.layout.variants {
ba9703b0 433 Variants::Single { index } => {
ff7c6d11
XL
434 assert_eq!(index, variant_index);
435 }
f035d41b
XL
436 Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
437 let ptr = self.project_field(bx, tag_field);
48663c56
XL
438 let to =
439 self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
83c7162d 440 bx.store(
a1dfa0c6 441 bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
83c7162d 442 ptr.llval,
dfeec247
XL
443 ptr.align,
444 );
ff7c6d11 445 }
ba9703b0 446 Variants::Multiple {
f035d41b 447 tag_encoding:
f2b60f7d 448 TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
f035d41b 449 tag_field,
ff7c6d11
XL
450 ..
451 } => {
f2b60f7d 452 if variant_index != untagged_variant {
f035d41b 453 let niche = self.project_field(bx, tag_field);
a1dfa0c6
XL
454 let niche_llty = bx.cx().immediate_backend_type(niche.layout);
455 let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
dfeec247 456 let niche_value = (niche_value as u128).wrapping_add(niche_start);
dc9dc135 457 // FIXME(eddyb): check the actual primitive type here.
ff7c6d11 458 let niche_llval = if niche_value == 0 {
dc9dc135 459 // HACK(eddyb): using `c_null` as it works on all types.
a1dfa0c6 460 bx.cx().const_null(niche_llty)
ff7c6d11 461 } else {
a1dfa0c6 462 bx.cx().const_uint_big(niche_llty, niche_value)
ff7c6d11 463 };
2c00a5a8 464 OperandValue::Immediate(niche_llval).store(bx, niche);
ff7c6d11
XL
465 }
466 }
467 }
468 }
469
a1dfa0c6
XL
470 pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
471 &self,
472 bx: &mut Bx,
dfeec247 473 llindex: V,
a1dfa0c6 474 ) -> Self {
69743fb6
XL
475 // Statically compute the offset if we can, otherwise just use the element size,
476 // as this will yield the lowest alignment.
477 let layout = self.layout.field(bx, 0);
e74abb32
XL
478 let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
479 layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
69743fb6
XL
480 } else {
481 layout.size
482 };
483
ff7c6d11 484 PlaceRef {
94222f64
XL
485 llval: bx.inbounds_gep(
486 bx.cx().backend_type(self.layout),
487 self.llval,
488 &[bx.cx().const_usize(0), llindex],
489 ),
b7449926 490 llextra: None,
69743fb6
XL
491 layout,
492 align: self.align.restrict_for_offset(offset),
ff7c6d11
XL
493 }
494 }
495
a1dfa0c6
XL
496 pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
497 &self,
498 bx: &mut Bx,
dfeec247 499 variant_index: VariantIdx,
a1dfa0c6 500 ) -> Self {
ff7c6d11 501 let mut downcast = *self;
a1dfa0c6 502 downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
ff7c6d11
XL
503
504 // Cast to the appropriate variant struct type.
a1dfa0c6
XL
505 let variant_ty = bx.cx().backend_type(downcast.layout);
506 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
ff7c6d11
XL
507
508 downcast
509 }
510
2b03887a
FG
511 pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
512 &self,
513 bx: &mut Bx,
514 ty: Ty<'tcx>,
515 ) -> Self {
516 let mut downcast = *self;
517 downcast.layout = bx.cx().layout_of(ty);
518
519 // Cast to the appropriate type.
520 let variant_ty = bx.cx().backend_type(downcast.layout);
521 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
522
523 downcast
524 }
525
a1dfa0c6 526 pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
2c00a5a8 527 bx.lifetime_start(self.llval, self.layout.size);
ff7c6d11
XL
528 }
529
a1dfa0c6 530 pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
2c00a5a8 531 bx.lifetime_end(self.llval, self.layout.size);
ff7c6d11
XL
532 }
533}
534
dc9dc135 535impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
064997fb 536 #[instrument(level = "trace", skip(self, bx))]
a1dfa0c6
XL
537 pub fn codegen_place(
538 &mut self,
539 bx: &mut Bx,
74b04a01 540 place_ref: mir::PlaceRef<'tcx>,
a1dfa0c6 541 ) -> PlaceRef<'tcx, Bx::Value> {
a1dfa0c6
XL
542 let cx = self.cx;
543 let tcx = self.cx.tcx();
ff7c6d11 544
5099ac24
FG
545 let mut base = 0;
546 let mut cg_base = match self.locals[place_ref.local] {
547 LocalRef::Place(place) => place,
548 LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
549 LocalRef::Operand(..) => {
064997fb
FG
550 if place_ref.has_deref() {
551 base = 1;
5e7ed085 552 let cg_base = self.codegen_consume(
5099ac24 553 bx,
064997fb 554 mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
5e7ed085 555 );
923072b8 556 cg_base.deref(bx.cx())
5099ac24 557 } else {
dfeec247 558 bug!("using operand local {:?} as place", place_ref);
8faf50e0 559 }
ff7c6d11 560 }
5099ac24
FG
561 };
562 for elem in place_ref.projection[base..].iter() {
923072b8
FG
563 cg_base = match *elem {
564 mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
5099ac24
FG
565 mir::ProjectionElem::Field(ref field, _) => {
566 cg_base.project_field(bx, field.index())
567 }
2b03887a 568 mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty),
5099ac24
FG
569 mir::ProjectionElem::Index(index) => {
570 let index = &mir::Operand::Copy(mir::Place::from(index));
571 let index = self.codegen_operand(bx, index);
572 let llindex = index.immediate();
573 cg_base.project_index(bx, llindex)
574 }
575 mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
576 let lloffset = bx.cx().const_usize(offset as u64);
577 cg_base.project_index(bx, lloffset)
578 }
579 mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
580 let lloffset = bx.cx().const_usize(offset as u64);
581 let lllen = cg_base.len(bx.cx());
582 let llindex = bx.sub(lllen, lloffset);
583 cg_base.project_index(bx, llindex)
584 }
585 mir::ProjectionElem::Subslice { from, to, from_end } => {
586 let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64));
587 let projected_ty =
588 PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
589 subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
590
591 if subslice.layout.is_unsized() {
592 assert!(from_end, "slice subslices should be `from_end`");
593 subslice.llextra = Some(bx.sub(
594 cg_base.llextra.unwrap(),
595 bx.cx().const_usize((from as u64) + (to as u64)),
596 ));
ff7c6d11 597 }
5099ac24
FG
598
599 // Cast the place pointer type to the new
600 // array or slice type (`*[%_; new_len]`).
601 subslice.llval = bx.pointercast(
602 subslice.llval,
603 bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
604 );
605
606 subslice
ff7c6d11 607 }
5099ac24
FG
608 mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
609 };
610 }
611 debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
612 cg_base
ff7c6d11
XL
613 }
614
74b04a01 615 pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
a1dfa0c6 616 let tcx = self.cx.tcx();
5869c6ff 617 let place_ty = place_ref.ty(self.mir, tcx);
fc512014 618 self.monomorphize(place_ty.ty)
ff7c6d11
XL
619 }
620}
5869c6ff
XL
621
622fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
623 bx: &mut Bx,
624 value: Bx::Value,
625 align: Bx::Value,
626) -> Bx::Value {
627 // In pseudo code:
628 //
629 // if value & (align - 1) == 0 {
630 // value
631 // } else {
632 // (value & !(align - 1)) + align
633 // }
634 //
635 // Usually this is written without branches as
636 //
637 // (value + align - 1) & !(align - 1)
638 //
639 // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
640 // at compile time to be `1`, this expression should be optimized to `align`. However,
641 // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
642 // that `align` is a power of two, it cannot perform this optimization.
643 //
644 // Instead we use
645 //
646 // value + (-value & (align - 1))
647 //
648 // Since `align` is used only once, the expression can be optimized. For `value = 0`
649 // its optimized to `0` even in debug mode.
650 //
651 // NB: The previous version of this code used
652 //
653 // (value + align - 1) & -align
654 //
655 // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
656 // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
657 let one = bx.const_usize(1);
658 let align_minus_1 = bx.sub(align, one);
659 let neg_value = bx.neg(value);
660 let offset = bx.and(neg_value, align_minus_1);
661 bx.add(value, offset)
662}