1 use super::operand
::OperandValue
;
2 use super::{FunctionCx, LocalRef}
;
4 use crate::common
::IntPredicate
;
10 use rustc_middle
::mir
::tcx
::PlaceTy
;
11 use rustc_middle
::ty
::layout
::{HasTyCtxt, TyAndLayout}
;
12 use rustc_middle
::ty
::{self, Ty}
;
13 use rustc_target
::abi
::{Abi, Align, FieldsShape, Int, TagEncoding}
;
14 use rustc_target
::abi
::{LayoutOf, VariantIdx, Variants}
;
16 #[derive(Copy, Clone, Debug)]
17 pub struct PlaceRef
<'tcx
, V
> {
18 /// A pointer to the contents of the place.
21 /// This place's extra data if it is unsized, or `None` if null.
22 pub llextra
: Option
<V
>,
24 /// The monomorphized type of this place, including variant information.
25 pub layout
: TyAndLayout
<'tcx
>,
27 /// The alignment we know for this place.
31 impl<'a
, 'tcx
, V
: CodegenObject
> PlaceRef
<'tcx
, V
> {
32 pub fn new_sized(llval
: V
, layout
: TyAndLayout
<'tcx
>) -> PlaceRef
<'tcx
, V
> {
33 assert
!(!layout
.is_unsized());
34 PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
37 pub fn new_sized_aligned(
39 layout
: TyAndLayout
<'tcx
>,
41 ) -> PlaceRef
<'tcx
, V
> {
42 assert
!(!layout
.is_unsized());
43 PlaceRef { llval, llextra: None, layout, align }
46 // FIXME(eddyb) pass something else for the name so no work is done
47 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
48 pub fn alloca
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
50 layout
: TyAndLayout
<'tcx
>,
52 assert
!(!layout
.is_unsized(), "tried to statically allocate unsized place");
53 let tmp
= bx
.alloca(bx
.cx().backend_type(layout
), layout
.align
.abi
);
54 Self::new_sized(tmp
, layout
)
57 /// Returns a place for an indirect reference to an unsized place.
58 // FIXME(eddyb) pass something else for the name so no work is done
59 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
60 pub fn alloca_unsized_indirect
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
62 layout
: TyAndLayout
<'tcx
>,
64 assert
!(layout
.is_unsized(), "tried to allocate indirect place for sized values");
65 let ptr_ty
= bx
.cx().tcx().mk_mut_ptr(layout
.ty
);
66 let ptr_layout
= bx
.cx().layout_of(ptr_ty
);
67 Self::alloca(bx
, ptr_layout
)
70 pub fn len
<Cx
: ConstMethods
<'tcx
, Value
= V
>>(&self, cx
: &Cx
) -> V
{
71 if let FieldsShape
::Array { count, .. }
= self.layout
.fields
{
72 if self.layout
.is_unsized() {
79 bug
!("unexpected layout `{:#?}` in PlaceRef::len", self.layout
)
84 impl<'a
, 'tcx
, V
: CodegenObject
> PlaceRef
<'tcx
, V
> {
85 /// Access a field, at a point when the value's case is known.
86 pub fn project_field
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
91 let field
= self.layout
.field(bx
.cx(), ix
);
92 let offset
= self.layout
.fields
.offset(ix
);
93 let effective_field_align
= self.align
.restrict_for_offset(offset
);
96 let llval
= match self.layout
.abi
{
97 _
if offset
.bytes() == 0 => {
98 // Unions and newtypes only use an offset of 0.
99 // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
102 Abi
::ScalarPair(ref a
, ref b
)
103 if offset
== a
.value
.size(bx
.cx()).align_to(b
.value
.align(bx
.cx()).abi
) =>
105 // Offset matches second field.
106 bx
.struct_gep(self.llval
, 1)
108 Abi
::Scalar(_
) | Abi
::ScalarPair(..) | Abi
::Vector { .. }
if field
.is_zst() => {
109 // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
110 let byte_ptr
= bx
.pointercast(self.llval
, bx
.cx().type_i8p());
111 bx
.gep(byte_ptr
, &[bx
.const_usize(offset
.bytes())])
113 Abi
::Scalar(_
) | Abi
::ScalarPair(..) => {
114 // All fields of Scalar and ScalarPair layouts must have been handled by this point.
115 // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
117 "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
122 _
=> bx
.struct_gep(self.llval
, bx
.cx().backend_field_index(self.layout
, ix
)),
125 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
126 llval
: bx
.pointercast(llval
, bx
.cx().type_ptr_to(bx
.cx().backend_type(field
))),
127 llextra
: if bx
.cx().type_has_metadata(field
.ty
) { self.llextra }
else { None }
,
129 align
: effective_field_align
,
133 // Simple cases, which don't need DST adjustment:
134 // * no metadata available - just log the case
135 // * known alignment - sized types, `[T]`, `str` or a foreign type
136 // * packed struct - there is no alignment padding
137 match field
.ty
.kind() {
138 _
if self.llextra
.is_none() => {
140 "unsized field `{}`, of `{:?}` has no metadata for adjustment",
145 _
if !field
.is_unsized() => return simple(),
146 ty
::Slice(..) | ty
::Str
| ty
::Foreign(..) => return simple(),
148 if def
.repr
.packed() {
149 // FIXME(eddyb) generalize the adjustment when we
150 // start supporting packing to larger alignments.
151 assert_eq
!(self.layout
.align
.abi
.bytes(), 1);
158 // We need to get the pointer manually now.
159 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
160 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
161 // because the field may have an arbitrary alignment in the LLVM representation
166 // struct Foo<T: ?Sized> {
171 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
172 // the `y` field has 16-bit alignment.
174 let meta
= self.llextra
;
176 let unaligned_offset
= bx
.cx().const_usize(offset
.bytes());
178 // Get the alignment of the field
179 let (_
, unsized_align
) = glue
::size_and_align_of_dst(bx
, field
.ty
, meta
);
181 // Bump the unaligned offset up to the appropriate alignment
182 let offset
= round_up_const_value_to_alignment(bx
, unaligned_offset
, unsized_align
);
184 debug
!("struct_field_ptr: DST field offset: {:?}", offset
);
186 // Cast and adjust pointer.
187 let byte_ptr
= bx
.pointercast(self.llval
, bx
.cx().type_i8p());
188 let byte_ptr
= bx
.gep(byte_ptr
, &[offset
]);
190 // Finally, cast back to the type expected.
191 let ll_fty
= bx
.cx().backend_type(field
);
192 debug
!("struct_field_ptr: Field type is {:?}", ll_fty
);
195 llval
: bx
.pointercast(byte_ptr
, bx
.cx().type_ptr_to(ll_fty
)),
196 llextra
: self.llextra
,
198 align
: effective_field_align
,
202 /// Obtain the actual discriminant of a value.
203 pub fn codegen_get_discr
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
208 let cast_to
= bx
.cx().immediate_backend_type(bx
.cx().layout_of(cast_to
));
209 if self.layout
.abi
.is_uninhabited() {
210 return bx
.cx().const_undef(cast_to
);
212 let (tag_scalar
, tag_encoding
, tag_field
) = match self.layout
.variants
{
213 Variants
::Single { index }
=> {
217 .discriminant_for_variant(bx
.cx().tcx(), index
)
218 .map_or(index
.as_u32() as u128
, |discr
| discr
.val
);
219 return bx
.cx().const_uint_big(cast_to
, discr_val
);
221 Variants
::Multiple { ref tag, ref tag_encoding, tag_field, .. }
=> {
222 (tag
, tag_encoding
, tag_field
)
226 // Read the tag/niche-encoded discriminant from memory.
227 let tag
= self.project_field(bx
, tag_field
);
228 let tag
= bx
.load_operand(tag
);
230 // Decode the discriminant (specifically if it's niche-encoded).
231 match *tag_encoding
{
232 TagEncoding
::Direct
=> {
233 let signed
= match tag_scalar
.value
{
234 // We use `i1` for bytes that are always `0` or `1`,
235 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
236 // let LLVM interpret the `i1` as signed, because
237 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
238 Int(_
, signed
) => !tag_scalar
.is_bool() && signed
,
241 bx
.intcast(tag
.immediate(), cast_to
, signed
)
243 TagEncoding
::Niche { dataful_variant, ref niche_variants, niche_start }
=> {
244 // Rebase from niche values to discriminants, and check
245 // whether the result is in range for the niche variants.
246 let niche_llty
= bx
.cx().immediate_backend_type(tag
.layout
);
247 let tag
= tag
.immediate();
249 // We first compute the "relative discriminant" (wrt `niche_variants`),
250 // that is, if `n = niche_variants.end() - niche_variants.start()`,
251 // we remap `niche_start..=niche_start + n` (which may wrap around)
252 // to (non-wrap-around) `0..=n`, to be able to check whether the
253 // discriminant corresponds to a niche variant with one comparison.
254 // We also can't go directly to the (variant index) discriminant
255 // and check that it is in the range `niche_variants`, because
256 // that might not fit in the same type, on top of needing an extra
257 // comparison (see also the comment on `let niche_discr`).
258 let relative_discr
= if niche_start
== 0 {
259 // Avoid subtracting `0`, which wouldn't work for pointers.
260 // FIXME(eddyb) check the actual primitive type here.
263 bx
.sub(tag
, bx
.cx().const_uint_big(niche_llty
, niche_start
))
265 let relative_max
= niche_variants
.end().as_u32() - niche_variants
.start().as_u32();
266 let is_niche
= if relative_max
== 0 {
267 // Avoid calling `const_uint`, which wouldn't work for pointers.
268 // Also use canonical == 0 instead of non-canonical u<= 0.
269 // FIXME(eddyb) check the actual primitive type here.
270 bx
.icmp(IntPredicate
::IntEQ
, relative_discr
, bx
.cx().const_null(niche_llty
))
272 let relative_max
= bx
.cx().const_uint(niche_llty
, relative_max
as u64);
273 bx
.icmp(IntPredicate
::IntULE
, relative_discr
, relative_max
)
276 // NOTE(eddyb) this addition needs to be performed on the final
277 // type, in case the niche itself can't represent all variant
278 // indices (e.g. `u8` niche with more than `256` variants,
279 // but enough uninhabited variants so that the remaining variants
280 // fit in the niche).
281 // In other words, `niche_variants.end - niche_variants.start`
282 // is representable in the niche, but `niche_variants.end`
283 // might not be, in extreme cases.
285 let relative_discr
= if relative_max
== 0 {
286 // HACK(eddyb) since we have only one niche, we know which
287 // one it is, and we can avoid having a dynamic value here.
288 bx
.cx().const_uint(cast_to
, 0)
290 bx
.intcast(relative_discr
, cast_to
, false)
294 bx
.cx().const_uint(cast_to
, niche_variants
.start().as_u32() as u64),
301 bx
.cx().const_uint(cast_to
, dataful_variant
.as_u32() as u64),
307 /// Sets the discriminant for a new value of the given case of the given
309 pub fn codegen_set_discr
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
312 variant_index
: VariantIdx
,
314 if self.layout
.for_variant(bx
.cx(), variant_index
).abi
.is_uninhabited() {
315 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
316 // if that turns out to be helpful.
320 match self.layout
.variants
{
321 Variants
::Single { index }
=> {
322 assert_eq
!(index
, variant_index
);
324 Variants
::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. }
=> {
325 let ptr
= self.project_field(bx
, tag_field
);
327 self.layout
.ty
.discriminant_for_variant(bx
.tcx(), variant_index
).unwrap().val
;
329 bx
.cx().const_uint_big(bx
.cx().backend_type(ptr
.layout
), to
),
336 TagEncoding
::Niche { dataful_variant, ref niche_variants, niche_start }
,
340 if variant_index
!= dataful_variant
{
341 if bx
.cx().sess().target
.arch
== "arm"
342 || bx
.cx().sess().target
.arch
== "aarch64"
344 // FIXME(#34427): as workaround for LLVM bug on ARM,
345 // use memset of 0 before assigning niche value.
346 let fill_byte
= bx
.cx().const_u8(0);
347 let size
= bx
.cx().const_usize(self.layout
.size
.bytes());
348 bx
.memset(self.llval
, fill_byte
, size
, self.align
, MemFlags
::empty());
351 let niche
= self.project_field(bx
, tag_field
);
352 let niche_llty
= bx
.cx().immediate_backend_type(niche
.layout
);
353 let niche_value
= variant_index
.as_u32() - niche_variants
.start().as_u32();
354 let niche_value
= (niche_value
as u128
).wrapping_add(niche_start
);
355 // FIXME(eddyb): check the actual primitive type here.
356 let niche_llval
= if niche_value
== 0 {
357 // HACK(eddyb): using `c_null` as it works on all types.
358 bx
.cx().const_null(niche_llty
)
360 bx
.cx().const_uint_big(niche_llty
, niche_value
)
362 OperandValue
::Immediate(niche_llval
).store(bx
, niche
);
368 pub fn project_index
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
373 // Statically compute the offset if we can, otherwise just use the element size,
374 // as this will yield the lowest alignment.
375 let layout
= self.layout
.field(bx
, 0);
376 let offset
= if let Some(llindex
) = bx
.const_to_opt_uint(llindex
) {
377 layout
.size
.checked_mul(llindex
, bx
).unwrap_or(layout
.size
)
383 llval
: bx
.inbounds_gep(self.llval
, &[bx
.cx().const_usize(0), llindex
]),
386 align
: self.align
.restrict_for_offset(offset
),
390 pub fn project_downcast
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
393 variant_index
: VariantIdx
,
395 let mut downcast
= *self;
396 downcast
.layout
= self.layout
.for_variant(bx
.cx(), variant_index
);
398 // Cast to the appropriate variant struct type.
399 let variant_ty
= bx
.cx().backend_type(downcast
.layout
);
400 downcast
.llval
= bx
.pointercast(downcast
.llval
, bx
.cx().type_ptr_to(variant_ty
));
405 pub fn project_deref
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(&self, bx
: &mut Bx
) -> Self {
406 let target_ty
= self.layout
.ty
.builtin_deref(true).expect("failed to deref");
407 let layout
= bx
.layout_of(target_ty
.ty
);
410 llval
: bx
.load(self.llval
, self.align
),
413 align
: layout
.align
.abi
,
417 pub fn storage_live
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(&self, bx
: &mut Bx
) {
418 bx
.lifetime_start(self.llval
, self.layout
.size
);
421 pub fn storage_dead
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(&self, bx
: &mut Bx
) {
422 bx
.lifetime_end(self.llval
, self.layout
.size
);
426 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
427 pub fn codegen_place(
430 place_ref
: mir
::PlaceRef
<'tcx
>,
431 ) -> PlaceRef
<'tcx
, Bx
::Value
> {
432 debug
!("codegen_place(place_ref={:?})", place_ref
);
434 let tcx
= self.cx
.tcx();
436 let result
= match place_ref
{
437 mir
::PlaceRef { local, projection: [] }
=> match self.locals
[local
] {
438 LocalRef
::Place(place
) => {
441 LocalRef
::UnsizedPlace(place
) => {
442 return bx
.load_operand(place
).deref(cx
);
444 LocalRef
::Operand(..) => {
445 bug
!("using operand local {:?} as place", place_ref
);
448 mir
::PlaceRef { local, projection: [proj_base @ .., mir::ProjectionElem::Deref] }
=> {
449 // Load the pointer from its location.
450 self.codegen_consume(bx
, mir
::PlaceRef { local, projection: proj_base }
)
453 mir
::PlaceRef { local, projection: &[ref proj_base @ .., elem] }
=> {
454 // FIXME turn this recursion into iteration
456 self.codegen_place(bx
, mir
::PlaceRef { local, projection: proj_base }
);
459 mir
::ProjectionElem
::Deref
=> bug
!(),
460 mir
::ProjectionElem
::Field(ref field
, _
) => {
461 cg_base
.project_field(bx
, field
.index())
463 mir
::ProjectionElem
::Index(index
) => {
464 let index
= &mir
::Operand
::Copy(mir
::Place
::from(index
));
465 let index
= self.codegen_operand(bx
, index
);
466 let llindex
= index
.immediate();
467 cg_base
.project_index(bx
, llindex
)
469 mir
::ProjectionElem
::ConstantIndex
{
474 let lloffset
= bx
.cx().const_usize(offset
as u64);
475 cg_base
.project_index(bx
, lloffset
)
477 mir
::ProjectionElem
::ConstantIndex
{
482 let lloffset
= bx
.cx().const_usize(offset
as u64);
483 let lllen
= cg_base
.len(bx
.cx());
484 let llindex
= bx
.sub(lllen
, lloffset
);
485 cg_base
.project_index(bx
, llindex
)
487 mir
::ProjectionElem
::Subslice { from, to, from_end }
=> {
489 cg_base
.project_index(bx
, bx
.cx().const_usize(from
as u64));
491 PlaceTy
::from_ty(cg_base
.layout
.ty
).projection_ty(tcx
, elem
).ty
;
492 subslice
.layout
= bx
.cx().layout_of(self.monomorphize(projected_ty
));
494 if subslice
.layout
.is_unsized() {
495 assert
!(from_end
, "slice subslices should be `from_end`");
496 subslice
.llextra
= Some(bx
.sub(
497 cg_base
.llextra
.unwrap(),
498 bx
.cx().const_usize((from
as u64) + (to
as u64)),
502 // Cast the place pointer type to the new
503 // array or slice type (`*[%_; new_len]`).
504 subslice
.llval
= bx
.pointercast(
506 bx
.cx().type_ptr_to(bx
.cx().backend_type(subslice
.layout
)),
511 mir
::ProjectionElem
::Downcast(_
, v
) => cg_base
.project_downcast(bx
, v
),
515 debug
!("codegen_place(place={:?}) => {:?}", place_ref
, result
);
519 pub fn monomorphized_place_ty(&self, place_ref
: mir
::PlaceRef
<'tcx
>) -> Ty
<'tcx
> {
520 let tcx
= self.cx
.tcx();
521 let place_ty
= place_ref
.ty(self.mir
, tcx
);
522 self.monomorphize(place_ty
.ty
)
526 fn round_up_const_value_to_alignment
<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>>(
533 // if value & (align - 1) == 0 {
536 // (value & !(align - 1)) + align
539 // Usually this is written without branches as
541 // (value + align - 1) & !(align - 1)
543 // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
544 // at compile time to be `1`, this expression should be optimized to `align`. However,
545 // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
546 // that `align` is a power of two, it cannot perform this optimization.
550 // value + (-value & (align - 1))
552 // Since `align` is used only once, the expression can be optimized. For `value = 0`
553 // its optimized to `0` even in debug mode.
555 // NB: The previous version of this code used
557 // (value + align - 1) & -align
559 // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
560 // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
561 let one
= bx
.const_usize(1);
562 let align_minus_1
= bx
.sub(align
, one
);
563 let neg_value
= bx
.neg(value
);
564 let offset
= bx
.and(neg_value
, align_minus_1
);
565 bx
.add(value
, offset
)