1 use super::operand
::OperandValue
;
2 use super::{FunctionCx, LocalRef}
;
4 use crate::common
::IntPredicate
;
10 use rustc_middle
::mir
::tcx
::PlaceTy
;
11 use rustc_middle
::ty
::layout
::{HasTyCtxt, LayoutOf, TyAndLayout}
;
12 use rustc_middle
::ty
::{self, Ty}
;
13 use rustc_target
::abi
::{Abi, Align, FieldsShape, Int, TagEncoding}
;
14 use rustc_target
::abi
::{VariantIdx, Variants}
;
16 #[derive(Copy, Clone, Debug)]
17 pub struct PlaceRef
<'tcx
, V
> {
18 /// A pointer to the contents of the place.
21 /// This place's extra data if it is unsized, or `None` if null.
22 pub llextra
: Option
<V
>,
24 /// The monomorphized type of this place, including variant information.
25 pub layout
: TyAndLayout
<'tcx
>,
27 /// The alignment we know for this place.
31 impl<'a
, 'tcx
, V
: CodegenObject
> PlaceRef
<'tcx
, V
> {
32 pub fn new_sized(llval
: V
, layout
: TyAndLayout
<'tcx
>) -> PlaceRef
<'tcx
, V
> {
33 assert
!(!layout
.is_unsized());
34 PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
37 pub fn new_sized_aligned(
39 layout
: TyAndLayout
<'tcx
>,
41 ) -> PlaceRef
<'tcx
, V
> {
42 assert
!(!layout
.is_unsized());
43 PlaceRef { llval, llextra: None, layout, align }
46 // FIXME(eddyb) pass something else for the name so no work is done
47 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
48 pub fn alloca
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
50 layout
: TyAndLayout
<'tcx
>,
52 assert
!(!layout
.is_unsized(), "tried to statically allocate unsized place");
53 let tmp
= bx
.alloca(bx
.cx().backend_type(layout
), layout
.align
.abi
);
54 Self::new_sized(tmp
, layout
)
57 /// Returns a place for an indirect reference to an unsized place.
58 // FIXME(eddyb) pass something else for the name so no work is done
59 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
60 pub fn alloca_unsized_indirect
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
62 layout
: TyAndLayout
<'tcx
>,
64 assert
!(layout
.is_unsized(), "tried to allocate indirect place for sized values");
65 let ptr_ty
= bx
.cx().tcx().mk_mut_ptr(layout
.ty
);
66 let ptr_layout
= bx
.cx().layout_of(ptr_ty
);
67 Self::alloca(bx
, ptr_layout
)
70 pub fn len
<Cx
: ConstMethods
<'tcx
, Value
= V
>>(&self, cx
: &Cx
) -> V
{
71 if let FieldsShape
::Array { count, .. }
= self.layout
.fields
{
72 if self.layout
.is_unsized() {
79 bug
!("unexpected layout `{:#?}` in PlaceRef::len", self.layout
)
84 impl<'a
, 'tcx
, V
: CodegenObject
> PlaceRef
<'tcx
, V
> {
85 /// Access a field, at a point when the value's case is known.
86 pub fn project_field
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
91 let field
= self.layout
.field(bx
.cx(), ix
);
92 let offset
= self.layout
.fields
.offset(ix
);
93 let effective_field_align
= self.align
.restrict_for_offset(offset
);
96 let llval
= match self.layout
.abi
{
97 _
if offset
.bytes() == 0 => {
98 // Unions and newtypes only use an offset of 0.
99 // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
102 Abi
::ScalarPair(a
, b
)
103 if offset
== a
.value
.size(bx
.cx()).align_to(b
.value
.align(bx
.cx()).abi
) =>
105 // Offset matches second field.
106 let ty
= bx
.backend_type(self.layout
);
107 bx
.struct_gep(ty
, self.llval
, 1)
109 Abi
::Scalar(_
) | Abi
::ScalarPair(..) | Abi
::Vector { .. }
if field
.is_zst() => {
110 // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
111 let byte_ptr
= bx
.pointercast(self.llval
, bx
.cx().type_i8p());
112 bx
.gep(bx
.cx().type_i8(), byte_ptr
, &[bx
.const_usize(offset
.bytes())])
114 Abi
::Scalar(_
) | Abi
::ScalarPair(..) => {
115 // All fields of Scalar and ScalarPair layouts must have been handled by this point.
116 // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
118 "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
124 let ty
= bx
.backend_type(self.layout
);
125 bx
.struct_gep(ty
, self.llval
, bx
.cx().backend_field_index(self.layout
, ix
))
129 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
130 llval
: bx
.pointercast(llval
, bx
.cx().type_ptr_to(bx
.cx().backend_type(field
))),
131 llextra
: if bx
.cx().type_has_metadata(field
.ty
) { self.llextra }
else { None }
,
133 align
: effective_field_align
,
137 // Simple cases, which don't need DST adjustment:
138 // * no metadata available - just log the case
139 // * known alignment - sized types, `[T]`, `str` or a foreign type
140 // * packed struct - there is no alignment padding
141 match field
.ty
.kind() {
142 _
if self.llextra
.is_none() => {
144 "unsized field `{}`, of `{:?}` has no metadata for adjustment",
149 _
if !field
.is_unsized() => return simple(),
150 ty
::Slice(..) | ty
::Str
| ty
::Foreign(..) => return simple(),
152 if def
.repr().packed() {
153 // FIXME(eddyb) generalize the adjustment when we
154 // start supporting packing to larger alignments.
155 assert_eq
!(self.layout
.align
.abi
.bytes(), 1);
162 // We need to get the pointer manually now.
163 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
164 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
165 // because the field may have an arbitrary alignment in the LLVM representation
170 // struct Foo<T: ?Sized> {
175 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
176 // the `y` field has 16-bit alignment.
178 let meta
= self.llextra
;
180 let unaligned_offset
= bx
.cx().const_usize(offset
.bytes());
182 // Get the alignment of the field
183 let (_
, unsized_align
) = glue
::size_and_align_of_dst(bx
, field
.ty
, meta
);
185 // Bump the unaligned offset up to the appropriate alignment
186 let offset
= round_up_const_value_to_alignment(bx
, unaligned_offset
, unsized_align
);
188 debug
!("struct_field_ptr: DST field offset: {:?}", offset
);
190 // Cast and adjust pointer.
191 let byte_ptr
= bx
.pointercast(self.llval
, bx
.cx().type_i8p());
192 let byte_ptr
= bx
.gep(bx
.cx().type_i8(), byte_ptr
, &[offset
]);
194 // Finally, cast back to the type expected.
195 let ll_fty
= bx
.cx().backend_type(field
);
196 debug
!("struct_field_ptr: Field type is {:?}", ll_fty
);
199 llval
: bx
.pointercast(byte_ptr
, bx
.cx().type_ptr_to(ll_fty
)),
200 llextra
: self.llextra
,
202 align
: effective_field_align
,
206 /// Obtain the actual discriminant of a value.
207 pub fn codegen_get_discr
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
212 let cast_to
= bx
.cx().immediate_backend_type(bx
.cx().layout_of(cast_to
));
213 if self.layout
.abi
.is_uninhabited() {
214 return bx
.cx().const_undef(cast_to
);
216 let (tag_scalar
, tag_encoding
, tag_field
) = match self.layout
.variants
{
217 Variants
::Single { index }
=> {
221 .discriminant_for_variant(bx
.cx().tcx(), index
)
222 .map_or(index
.as_u32() as u128
, |discr
| discr
.val
);
223 return bx
.cx().const_uint_big(cast_to
, discr_val
);
225 Variants
::Multiple { tag, ref tag_encoding, tag_field, .. }
=> {
226 (tag
, tag_encoding
, tag_field
)
230 // Read the tag/niche-encoded discriminant from memory.
231 let tag
= self.project_field(bx
, tag_field
);
232 let tag
= bx
.load_operand(tag
);
234 // Decode the discriminant (specifically if it's niche-encoded).
235 match *tag_encoding
{
236 TagEncoding
::Direct
=> {
237 let signed
= match tag_scalar
.value
{
238 // We use `i1` for bytes that are always `0` or `1`,
239 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
240 // let LLVM interpret the `i1` as signed, because
241 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
242 Int(_
, signed
) => !tag_scalar
.is_bool() && signed
,
245 bx
.intcast(tag
.immediate(), cast_to
, signed
)
247 TagEncoding
::Niche { dataful_variant, ref niche_variants, niche_start }
=> {
248 // Rebase from niche values to discriminants, and check
249 // whether the result is in range for the niche variants.
250 let niche_llty
= bx
.cx().immediate_backend_type(tag
.layout
);
251 let tag
= tag
.immediate();
253 // We first compute the "relative discriminant" (wrt `niche_variants`),
254 // that is, if `n = niche_variants.end() - niche_variants.start()`,
255 // we remap `niche_start..=niche_start + n` (which may wrap around)
256 // to (non-wrap-around) `0..=n`, to be able to check whether the
257 // discriminant corresponds to a niche variant with one comparison.
258 // We also can't go directly to the (variant index) discriminant
259 // and check that it is in the range `niche_variants`, because
260 // that might not fit in the same type, on top of needing an extra
261 // comparison (see also the comment on `let niche_discr`).
262 let relative_discr
= if niche_start
== 0 {
263 // Avoid subtracting `0`, which wouldn't work for pointers.
264 // FIXME(eddyb) check the actual primitive type here.
267 bx
.sub(tag
, bx
.cx().const_uint_big(niche_llty
, niche_start
))
269 let relative_max
= niche_variants
.end().as_u32() - niche_variants
.start().as_u32();
270 let is_niche
= if relative_max
== 0 {
271 // Avoid calling `const_uint`, which wouldn't work for pointers.
272 // Also use canonical == 0 instead of non-canonical u<= 0.
273 // FIXME(eddyb) check the actual primitive type here.
274 bx
.icmp(IntPredicate
::IntEQ
, relative_discr
, bx
.cx().const_null(niche_llty
))
276 let relative_max
= bx
.cx().const_uint(niche_llty
, relative_max
as u64);
277 bx
.icmp(IntPredicate
::IntULE
, relative_discr
, relative_max
)
280 // NOTE(eddyb) this addition needs to be performed on the final
281 // type, in case the niche itself can't represent all variant
282 // indices (e.g. `u8` niche with more than `256` variants,
283 // but enough uninhabited variants so that the remaining variants
284 // fit in the niche).
285 // In other words, `niche_variants.end - niche_variants.start`
286 // is representable in the niche, but `niche_variants.end`
287 // might not be, in extreme cases.
289 let relative_discr
= if relative_max
== 0 {
290 // HACK(eddyb) since we have only one niche, we know which
291 // one it is, and we can avoid having a dynamic value here.
292 bx
.cx().const_uint(cast_to
, 0)
294 bx
.intcast(relative_discr
, cast_to
, false)
298 bx
.cx().const_uint(cast_to
, niche_variants
.start().as_u32() as u64),
305 bx
.cx().const_uint(cast_to
, dataful_variant
.as_u32() as u64),
311 /// Sets the discriminant for a new value of the given case of the given
313 pub fn codegen_set_discr
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
316 variant_index
: VariantIdx
,
318 if self.layout
.for_variant(bx
.cx(), variant_index
).abi
.is_uninhabited() {
319 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
320 // if that turns out to be helpful.
324 match self.layout
.variants
{
325 Variants
::Single { index }
=> {
326 assert_eq
!(index
, variant_index
);
328 Variants
::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. }
=> {
329 let ptr
= self.project_field(bx
, tag_field
);
331 self.layout
.ty
.discriminant_for_variant(bx
.tcx(), variant_index
).unwrap().val
;
333 bx
.cx().const_uint_big(bx
.cx().backend_type(ptr
.layout
), to
),
340 TagEncoding
::Niche { dataful_variant, ref niche_variants, niche_start }
,
344 if variant_index
!= dataful_variant
{
345 if bx
.cx().sess().target
.arch
== "arm"
346 || bx
.cx().sess().target
.arch
== "aarch64"
348 // FIXME(#34427): as workaround for LLVM bug on ARM,
349 // use memset of 0 before assigning niche value.
350 let fill_byte
= bx
.cx().const_u8(0);
351 let size
= bx
.cx().const_usize(self.layout
.size
.bytes());
352 bx
.memset(self.llval
, fill_byte
, size
, self.align
, MemFlags
::empty());
355 let niche
= self.project_field(bx
, tag_field
);
356 let niche_llty
= bx
.cx().immediate_backend_type(niche
.layout
);
357 let niche_value
= variant_index
.as_u32() - niche_variants
.start().as_u32();
358 let niche_value
= (niche_value
as u128
).wrapping_add(niche_start
);
359 // FIXME(eddyb): check the actual primitive type here.
360 let niche_llval
= if niche_value
== 0 {
361 // HACK(eddyb): using `c_null` as it works on all types.
362 bx
.cx().const_null(niche_llty
)
364 bx
.cx().const_uint_big(niche_llty
, niche_value
)
366 OperandValue
::Immediate(niche_llval
).store(bx
, niche
);
372 pub fn project_index
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
377 // Statically compute the offset if we can, otherwise just use the element size,
378 // as this will yield the lowest alignment.
379 let layout
= self.layout
.field(bx
, 0);
380 let offset
= if let Some(llindex
) = bx
.const_to_opt_uint(llindex
) {
381 layout
.size
.checked_mul(llindex
, bx
).unwrap_or(layout
.size
)
387 llval
: bx
.inbounds_gep(
388 bx
.cx().backend_type(self.layout
),
390 &[bx
.cx().const_usize(0), llindex
],
394 align
: self.align
.restrict_for_offset(offset
),
398 pub fn project_downcast
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
401 variant_index
: VariantIdx
,
403 let mut downcast
= *self;
404 downcast
.layout
= self.layout
.for_variant(bx
.cx(), variant_index
);
406 // Cast to the appropriate variant struct type.
407 let variant_ty
= bx
.cx().backend_type(downcast
.layout
);
408 downcast
.llval
= bx
.pointercast(downcast
.llval
, bx
.cx().type_ptr_to(variant_ty
));
413 pub fn storage_live
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(&self, bx
: &mut Bx
) {
414 bx
.lifetime_start(self.llval
, self.layout
.size
);
417 pub fn storage_dead
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(&self, bx
: &mut Bx
) {
418 bx
.lifetime_end(self.llval
, self.layout
.size
);
422 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
423 pub fn codegen_place(
426 place_ref
: mir
::PlaceRef
<'tcx
>,
427 ) -> PlaceRef
<'tcx
, Bx
::Value
> {
428 debug
!("codegen_place(place_ref={:?})", place_ref
);
430 let tcx
= self.cx
.tcx();
433 let mut cg_base
= match self.locals
[place_ref
.local
] {
434 LocalRef
::Place(place
) => place
,
435 LocalRef
::UnsizedPlace(place
) => bx
.load_operand(place
).deref(cx
),
436 LocalRef
::Operand(..) => {
437 if let Some(elem
) = place_ref
441 .find(|elem
| matches
!(elem
.1, mir
::ProjectionElem
::Deref
))
444 let cg_base
= self.codegen_consume(
446 mir
::PlaceRef { projection: &place_ref.projection[..elem.0], ..place_ref }
,
449 // a box with a non-zst allocator should not be directly dereferenced
450 if cg_base
.layout
.ty
.is_box() && !cg_base
.layout
.field(cx
, 1).is_zst() {
451 let ptr
= cg_base
.extract_field(bx
, 0).extract_field(bx
, 0);
455 cg_base
.deref(bx
.cx())
458 bug
!("using operand local {:?} as place", place_ref
);
462 for elem
in place_ref
.projection
[base
..].iter() {
463 cg_base
= match elem
.clone() {
464 mir
::ProjectionElem
::Deref
=> {
465 // a box with a non-zst allocator should not be directly dereferenced
466 if cg_base
.layout
.ty
.is_box() && !cg_base
.layout
.field(cx
, 1).is_zst() {
467 let ptr
= cg_base
.project_field(bx
, 0).project_field(bx
, 0);
469 bx
.load_operand(ptr
).deref(bx
.cx())
471 bx
.load_operand(cg_base
).deref(bx
.cx())
474 mir
::ProjectionElem
::Field(ref field
, _
) => {
475 cg_base
.project_field(bx
, field
.index())
477 mir
::ProjectionElem
::Index(index
) => {
478 let index
= &mir
::Operand
::Copy(mir
::Place
::from(index
));
479 let index
= self.codegen_operand(bx
, index
);
480 let llindex
= index
.immediate();
481 cg_base
.project_index(bx
, llindex
)
483 mir
::ProjectionElem
::ConstantIndex { offset, from_end: false, min_length: _ }
=> {
484 let lloffset
= bx
.cx().const_usize(offset
as u64);
485 cg_base
.project_index(bx
, lloffset
)
487 mir
::ProjectionElem
::ConstantIndex { offset, from_end: true, min_length: _ }
=> {
488 let lloffset
= bx
.cx().const_usize(offset
as u64);
489 let lllen
= cg_base
.len(bx
.cx());
490 let llindex
= bx
.sub(lllen
, lloffset
);
491 cg_base
.project_index(bx
, llindex
)
493 mir
::ProjectionElem
::Subslice { from, to, from_end }
=> {
494 let mut subslice
= cg_base
.project_index(bx
, bx
.cx().const_usize(from
as u64));
496 PlaceTy
::from_ty(cg_base
.layout
.ty
).projection_ty(tcx
, *elem
).ty
;
497 subslice
.layout
= bx
.cx().layout_of(self.monomorphize(projected_ty
));
499 if subslice
.layout
.is_unsized() {
500 assert
!(from_end
, "slice subslices should be `from_end`");
501 subslice
.llextra
= Some(bx
.sub(
502 cg_base
.llextra
.unwrap(),
503 bx
.cx().const_usize((from
as u64) + (to
as u64)),
507 // Cast the place pointer type to the new
508 // array or slice type (`*[%_; new_len]`).
509 subslice
.llval
= bx
.pointercast(
511 bx
.cx().type_ptr_to(bx
.cx().backend_type(subslice
.layout
)),
516 mir
::ProjectionElem
::Downcast(_
, v
) => cg_base
.project_downcast(bx
, v
),
519 debug
!("codegen_place(place={:?}) => {:?}", place_ref
, cg_base
);
523 pub fn monomorphized_place_ty(&self, place_ref
: mir
::PlaceRef
<'tcx
>) -> Ty
<'tcx
> {
524 let tcx
= self.cx
.tcx();
525 let place_ty
= place_ref
.ty(self.mir
, tcx
);
526 self.monomorphize(place_ty
.ty
)
530 fn round_up_const_value_to_alignment
<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>>(
537 // if value & (align - 1) == 0 {
540 // (value & !(align - 1)) + align
543 // Usually this is written without branches as
545 // (value + align - 1) & !(align - 1)
547 // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
548 // at compile time to be `1`, this expression should be optimized to `align`. However,
549 // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
550 // that `align` is a power of two, it cannot perform this optimization.
554 // value + (-value & (align - 1))
556 // Since `align` is used only once, the expression can be optimized. For `value = 0`
557 // its optimized to `0` even in debug mode.
559 // NB: The previous version of this code used
561 // (value + align - 1) & -align
563 // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
564 // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
565 let one
= bx
.const_usize(1);
566 let align_minus_1
= bx
.sub(align
, one
);
567 let neg_value
= bx
.neg(value
);
568 let offset
= bx
.and(neg_value
, align_minus_1
);
569 bx
.add(value
, offset
)