]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_codegen_ssa/src/mir/place.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / place.rs
1 use super::operand::OperandValue;
2 use super::{FunctionCx, LocalRef};
3
4 use crate::common::IntPredicate;
5 use crate::glue;
6 use crate::traits::*;
7 use crate::MemFlags;
8
9 use rustc_middle::mir;
10 use rustc_middle::mir::tcx::PlaceTy;
11 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
12 use rustc_middle::ty::{self, Ty};
13 use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding};
14 use rustc_target::abi::{VariantIdx, Variants};
15
16 #[derive(Copy, Clone, Debug)]
17 pub struct PlaceRef<'tcx, V> {
18 /// A pointer to the contents of the place.
19 pub llval: V,
20
21 /// This place's extra data if it is unsized, or `None` if null.
22 pub llextra: Option<V>,
23
24 /// The monomorphized type of this place, including variant information.
25 pub layout: TyAndLayout<'tcx>,
26
27 /// The alignment we know for this place.
28 pub align: Align,
29 }
30
31 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
32 pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
33 assert!(!layout.is_unsized());
34 PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
35 }
36
37 pub fn new_sized_aligned(
38 llval: V,
39 layout: TyAndLayout<'tcx>,
40 align: Align,
41 ) -> PlaceRef<'tcx, V> {
42 assert!(!layout.is_unsized());
43 PlaceRef { llval, llextra: None, layout, align }
44 }
45
46 // FIXME(eddyb) pass something else for the name so no work is done
47 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
48 pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
49 bx: &mut Bx,
50 layout: TyAndLayout<'tcx>,
51 ) -> Self {
52 assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
53 let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
54 Self::new_sized(tmp, layout)
55 }
56
57 /// Returns a place for an indirect reference to an unsized place.
58 // FIXME(eddyb) pass something else for the name so no work is done
59 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
60 pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
61 bx: &mut Bx,
62 layout: TyAndLayout<'tcx>,
63 ) -> Self {
64 assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
65 let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
66 let ptr_layout = bx.cx().layout_of(ptr_ty);
67 Self::alloca(bx, ptr_layout)
68 }
69
70 pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
71 if let FieldsShape::Array { count, .. } = self.layout.fields {
72 if self.layout.is_unsized() {
73 assert_eq!(count, 0);
74 self.llextra.unwrap()
75 } else {
76 cx.const_usize(count)
77 }
78 } else {
79 bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
80 }
81 }
82 }
83
84 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
85 /// Access a field, at a point when the value's case is known.
86 pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
87 self,
88 bx: &mut Bx,
89 ix: usize,
90 ) -> Self {
91 let field = self.layout.field(bx.cx(), ix);
92 let offset = self.layout.fields.offset(ix);
93 let effective_field_align = self.align.restrict_for_offset(offset);
94
95 let mut simple = || {
96 let llval = match self.layout.abi {
97 _ if offset.bytes() == 0 => {
98 // Unions and newtypes only use an offset of 0.
99 // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
100 self.llval
101 }
102 Abi::ScalarPair(a, b)
103 if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) =>
104 {
105 // Offset matches second field.
106 let ty = bx.backend_type(self.layout);
107 bx.struct_gep(ty, self.llval, 1)
108 }
109 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
110 // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
111 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
112 bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
113 }
114 Abi::Scalar(_) | Abi::ScalarPair(..) => {
115 // All fields of Scalar and ScalarPair layouts must have been handled by this point.
116 // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
117 bug!(
118 "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
119 field,
120 self.layout
121 );
122 }
123 _ => {
124 let ty = bx.backend_type(self.layout);
125 bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
126 }
127 };
128 PlaceRef {
129 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
130 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
131 llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
132 layout: field,
133 align: effective_field_align,
134 }
135 };
136
137 // Simple cases, which don't need DST adjustment:
138 // * no metadata available - just log the case
139 // * known alignment - sized types, `[T]`, `str` or a foreign type
140 // * packed struct - there is no alignment padding
141 match field.ty.kind() {
142 _ if self.llextra.is_none() => {
143 debug!(
144 "unsized field `{}`, of `{:?}` has no metadata for adjustment",
145 ix, self.llval
146 );
147 return simple();
148 }
149 _ if !field.is_unsized() => return simple(),
150 ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
151 ty::Adt(def, _) => {
152 if def.repr().packed() {
153 // FIXME(eddyb) generalize the adjustment when we
154 // start supporting packing to larger alignments.
155 assert_eq!(self.layout.align.abi.bytes(), 1);
156 return simple();
157 }
158 }
159 _ => {}
160 }
161
162 // We need to get the pointer manually now.
163 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
164 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
165 // because the field may have an arbitrary alignment in the LLVM representation
166 // anyway.
167 //
168 // To demonstrate:
169 //
170 // struct Foo<T: ?Sized> {
171 // x: u16,
172 // y: T
173 // }
174 //
175 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
176 // the `y` field has 16-bit alignment.
177
178 let meta = self.llextra;
179
180 let unaligned_offset = bx.cx().const_usize(offset.bytes());
181
182 // Get the alignment of the field
183 let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
184
185 // Bump the unaligned offset up to the appropriate alignment
186 let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
187
188 debug!("struct_field_ptr: DST field offset: {:?}", offset);
189
190 // Cast and adjust pointer.
191 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
192 let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
193
194 // Finally, cast back to the type expected.
195 let ll_fty = bx.cx().backend_type(field);
196 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
197
198 PlaceRef {
199 llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
200 llextra: self.llextra,
201 layout: field,
202 align: effective_field_align,
203 }
204 }
205
206 /// Obtain the actual discriminant of a value.
207 pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
208 self,
209 bx: &mut Bx,
210 cast_to: Ty<'tcx>,
211 ) -> V {
212 let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
213 if self.layout.abi.is_uninhabited() {
214 return bx.cx().const_undef(cast_to);
215 }
216 let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
217 Variants::Single { index } => {
218 let discr_val = self
219 .layout
220 .ty
221 .discriminant_for_variant(bx.cx().tcx(), index)
222 .map_or(index.as_u32() as u128, |discr| discr.val);
223 return bx.cx().const_uint_big(cast_to, discr_val);
224 }
225 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
226 (tag, tag_encoding, tag_field)
227 }
228 };
229
230 // Read the tag/niche-encoded discriminant from memory.
231 let tag = self.project_field(bx, tag_field);
232 let tag = bx.load_operand(tag);
233
234 // Decode the discriminant (specifically if it's niche-encoded).
235 match *tag_encoding {
236 TagEncoding::Direct => {
237 let signed = match tag_scalar.value {
238 // We use `i1` for bytes that are always `0` or `1`,
239 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
240 // let LLVM interpret the `i1` as signed, because
241 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
242 Int(_, signed) => !tag_scalar.is_bool() && signed,
243 _ => false,
244 };
245 bx.intcast(tag.immediate(), cast_to, signed)
246 }
247 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
248 // Rebase from niche values to discriminants, and check
249 // whether the result is in range for the niche variants.
250 let niche_llty = bx.cx().immediate_backend_type(tag.layout);
251 let tag = tag.immediate();
252
253 // We first compute the "relative discriminant" (wrt `niche_variants`),
254 // that is, if `n = niche_variants.end() - niche_variants.start()`,
255 // we remap `niche_start..=niche_start + n` (which may wrap around)
256 // to (non-wrap-around) `0..=n`, to be able to check whether the
257 // discriminant corresponds to a niche variant with one comparison.
258 // We also can't go directly to the (variant index) discriminant
259 // and check that it is in the range `niche_variants`, because
260 // that might not fit in the same type, on top of needing an extra
261 // comparison (see also the comment on `let niche_discr`).
262 let relative_discr = if niche_start == 0 {
263 // Avoid subtracting `0`, which wouldn't work for pointers.
264 // FIXME(eddyb) check the actual primitive type here.
265 tag
266 } else {
267 bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start))
268 };
269 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
270 let is_niche = if relative_max == 0 {
271 // Avoid calling `const_uint`, which wouldn't work for pointers.
272 // Also use canonical == 0 instead of non-canonical u<= 0.
273 // FIXME(eddyb) check the actual primitive type here.
274 bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty))
275 } else {
276 let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64);
277 bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
278 };
279
280 // NOTE(eddyb) this addition needs to be performed on the final
281 // type, in case the niche itself can't represent all variant
282 // indices (e.g. `u8` niche with more than `256` variants,
283 // but enough uninhabited variants so that the remaining variants
284 // fit in the niche).
285 // In other words, `niche_variants.end - niche_variants.start`
286 // is representable in the niche, but `niche_variants.end`
287 // might not be, in extreme cases.
288 let niche_discr = {
289 let relative_discr = if relative_max == 0 {
290 // HACK(eddyb) since we have only one niche, we know which
291 // one it is, and we can avoid having a dynamic value here.
292 bx.cx().const_uint(cast_to, 0)
293 } else {
294 bx.intcast(relative_discr, cast_to, false)
295 };
296 bx.add(
297 relative_discr,
298 bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
299 )
300 };
301
302 bx.select(
303 is_niche,
304 niche_discr,
305 bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
306 )
307 }
308 }
309 }
310
311 /// Sets the discriminant for a new value of the given case of the given
312 /// representation.
313 pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
314 &self,
315 bx: &mut Bx,
316 variant_index: VariantIdx,
317 ) {
318 if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
319 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
320 // if that turns out to be helpful.
321 bx.abort();
322 return;
323 }
324 match self.layout.variants {
325 Variants::Single { index } => {
326 assert_eq!(index, variant_index);
327 }
328 Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
329 let ptr = self.project_field(bx, tag_field);
330 let to =
331 self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
332 bx.store(
333 bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
334 ptr.llval,
335 ptr.align,
336 );
337 }
338 Variants::Multiple {
339 tag_encoding:
340 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
341 tag_field,
342 ..
343 } => {
344 if variant_index != dataful_variant {
345 if bx.cx().sess().target.arch == "arm"
346 || bx.cx().sess().target.arch == "aarch64"
347 {
348 // FIXME(#34427): as workaround for LLVM bug on ARM,
349 // use memset of 0 before assigning niche value.
350 let fill_byte = bx.cx().const_u8(0);
351 let size = bx.cx().const_usize(self.layout.size.bytes());
352 bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
353 }
354
355 let niche = self.project_field(bx, tag_field);
356 let niche_llty = bx.cx().immediate_backend_type(niche.layout);
357 let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
358 let niche_value = (niche_value as u128).wrapping_add(niche_start);
359 // FIXME(eddyb): check the actual primitive type here.
360 let niche_llval = if niche_value == 0 {
361 // HACK(eddyb): using `c_null` as it works on all types.
362 bx.cx().const_null(niche_llty)
363 } else {
364 bx.cx().const_uint_big(niche_llty, niche_value)
365 };
366 OperandValue::Immediate(niche_llval).store(bx, niche);
367 }
368 }
369 }
370 }
371
372 pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
373 &self,
374 bx: &mut Bx,
375 llindex: V,
376 ) -> Self {
377 // Statically compute the offset if we can, otherwise just use the element size,
378 // as this will yield the lowest alignment.
379 let layout = self.layout.field(bx, 0);
380 let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
381 layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
382 } else {
383 layout.size
384 };
385
386 PlaceRef {
387 llval: bx.inbounds_gep(
388 bx.cx().backend_type(self.layout),
389 self.llval,
390 &[bx.cx().const_usize(0), llindex],
391 ),
392 llextra: None,
393 layout,
394 align: self.align.restrict_for_offset(offset),
395 }
396 }
397
398 pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
399 &self,
400 bx: &mut Bx,
401 variant_index: VariantIdx,
402 ) -> Self {
403 let mut downcast = *self;
404 downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
405
406 // Cast to the appropriate variant struct type.
407 let variant_ty = bx.cx().backend_type(downcast.layout);
408 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
409
410 downcast
411 }
412
413 pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
414 bx.lifetime_start(self.llval, self.layout.size);
415 }
416
417 pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
418 bx.lifetime_end(self.llval, self.layout.size);
419 }
420 }
421
422 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
423 pub fn codegen_place(
424 &mut self,
425 bx: &mut Bx,
426 place_ref: mir::PlaceRef<'tcx>,
427 ) -> PlaceRef<'tcx, Bx::Value> {
428 debug!("codegen_place(place_ref={:?})", place_ref);
429 let cx = self.cx;
430 let tcx = self.cx.tcx();
431
432 let mut base = 0;
433 let mut cg_base = match self.locals[place_ref.local] {
434 LocalRef::Place(place) => place,
435 LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
436 LocalRef::Operand(..) => {
437 if let Some(elem) = place_ref
438 .projection
439 .iter()
440 .enumerate()
441 .find(|elem| matches!(elem.1, mir::ProjectionElem::Deref))
442 {
443 base = elem.0 + 1;
444 let cg_base = self.codegen_consume(
445 bx,
446 mir::PlaceRef { projection: &place_ref.projection[..elem.0], ..place_ref },
447 );
448
449 // a box with a non-zst allocator should not be directly dereferenced
450 if cg_base.layout.ty.is_box() && !cg_base.layout.field(cx, 1).is_zst() {
451 let ptr = cg_base.extract_field(bx, 0).extract_field(bx, 0);
452
453 ptr.deref(bx.cx())
454 } else {
455 cg_base.deref(bx.cx())
456 }
457 } else {
458 bug!("using operand local {:?} as place", place_ref);
459 }
460 }
461 };
462 for elem in place_ref.projection[base..].iter() {
463 cg_base = match elem.clone() {
464 mir::ProjectionElem::Deref => {
465 // a box with a non-zst allocator should not be directly dereferenced
466 if cg_base.layout.ty.is_box() && !cg_base.layout.field(cx, 1).is_zst() {
467 let ptr = cg_base.project_field(bx, 0).project_field(bx, 0);
468
469 bx.load_operand(ptr).deref(bx.cx())
470 } else {
471 bx.load_operand(cg_base).deref(bx.cx())
472 }
473 }
474 mir::ProjectionElem::Field(ref field, _) => {
475 cg_base.project_field(bx, field.index())
476 }
477 mir::ProjectionElem::Index(index) => {
478 let index = &mir::Operand::Copy(mir::Place::from(index));
479 let index = self.codegen_operand(bx, index);
480 let llindex = index.immediate();
481 cg_base.project_index(bx, llindex)
482 }
483 mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
484 let lloffset = bx.cx().const_usize(offset as u64);
485 cg_base.project_index(bx, lloffset)
486 }
487 mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
488 let lloffset = bx.cx().const_usize(offset as u64);
489 let lllen = cg_base.len(bx.cx());
490 let llindex = bx.sub(lllen, lloffset);
491 cg_base.project_index(bx, llindex)
492 }
493 mir::ProjectionElem::Subslice { from, to, from_end } => {
494 let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64));
495 let projected_ty =
496 PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
497 subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
498
499 if subslice.layout.is_unsized() {
500 assert!(from_end, "slice subslices should be `from_end`");
501 subslice.llextra = Some(bx.sub(
502 cg_base.llextra.unwrap(),
503 bx.cx().const_usize((from as u64) + (to as u64)),
504 ));
505 }
506
507 // Cast the place pointer type to the new
508 // array or slice type (`*[%_; new_len]`).
509 subslice.llval = bx.pointercast(
510 subslice.llval,
511 bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
512 );
513
514 subslice
515 }
516 mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
517 };
518 }
519 debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
520 cg_base
521 }
522
523 pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
524 let tcx = self.cx.tcx();
525 let place_ty = place_ref.ty(self.mir, tcx);
526 self.monomorphize(place_ty.ty)
527 }
528 }
529
530 fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
531 bx: &mut Bx,
532 value: Bx::Value,
533 align: Bx::Value,
534 ) -> Bx::Value {
535 // In pseudo code:
536 //
537 // if value & (align - 1) == 0 {
538 // value
539 // } else {
540 // (value & !(align - 1)) + align
541 // }
542 //
543 // Usually this is written without branches as
544 //
545 // (value + align - 1) & !(align - 1)
546 //
547 // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
548 // at compile time to be `1`, this expression should be optimized to `align`. However,
549 // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
550 // that `align` is a power of two, it cannot perform this optimization.
551 //
552 // Instead we use
553 //
554 // value + (-value & (align - 1))
555 //
556 // Since `align` is used only once, the expression can be optimized. For `value = 0`
557 // its optimized to `0` even in debug mode.
558 //
559 // NB: The previous version of this code used
560 //
561 // (value + align - 1) & -align
562 //
563 // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
564 // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
565 let one = bx.const_usize(1);
566 let align_minus_1 = bx.sub(align, one);
567 let neg_value = bx.neg(value);
568 let offset = bx.and(neg_value, align_minus_1);
569 bx.add(value, offset)
570 }