1 use super::operand
::OperandValue
;
2 use super::{FunctionCx, LocalRef}
;
4 use crate::common
::IntPredicate
;
10 use rustc
::mir
::tcx
::PlaceTy
;
11 use rustc
::ty
::layout
::{self, Align, HasTyCtxt, LayoutOf, TyLayout, VariantIdx}
;
12 use rustc
::ty
::{self, Ty}
;
14 #[derive(Copy, Clone, Debug)]
15 pub struct PlaceRef
<'tcx
, V
> {
16 /// A pointer to the contents of the place.
19 /// This place's extra data if it is unsized, or `None` if null.
20 pub llextra
: Option
<V
>,
22 /// The monomorphized type of this place, including variant information.
23 pub layout
: TyLayout
<'tcx
>,
25 /// The alignment we know for this place.
29 impl<'a
, 'tcx
, V
: CodegenObject
> PlaceRef
<'tcx
, V
> {
30 pub fn new_sized(llval
: V
, layout
: TyLayout
<'tcx
>) -> PlaceRef
<'tcx
, V
> {
31 assert
!(!layout
.is_unsized());
32 PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
35 pub fn new_sized_aligned(llval
: V
, layout
: TyLayout
<'tcx
>, align
: Align
) -> PlaceRef
<'tcx
, V
> {
36 assert
!(!layout
.is_unsized());
37 PlaceRef { llval, llextra: None, layout, align }
40 // FIXME(eddyb) pass something else for the name so no work is done
41 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
42 pub fn alloca
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
44 layout
: TyLayout
<'tcx
>,
46 assert
!(!layout
.is_unsized(), "tried to statically allocate unsized place");
47 let tmp
= bx
.alloca(bx
.cx().backend_type(layout
), layout
.align
.abi
);
48 Self::new_sized(tmp
, layout
)
51 /// Returns a place for an indirect reference to an unsized place.
52 // FIXME(eddyb) pass something else for the name so no work is done
53 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
54 pub fn alloca_unsized_indirect
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
56 layout
: TyLayout
<'tcx
>,
58 assert
!(layout
.is_unsized(), "tried to allocate indirect place for sized values");
59 let ptr_ty
= bx
.cx().tcx().mk_mut_ptr(layout
.ty
);
60 let ptr_layout
= bx
.cx().layout_of(ptr_ty
);
61 Self::alloca(bx
, ptr_layout
)
64 pub fn len
<Cx
: ConstMethods
<'tcx
, Value
= V
>>(&self, cx
: &Cx
) -> V
{
65 if let layout
::FieldPlacement
::Array { count, .. }
= self.layout
.fields
{
66 if self.layout
.is_unsized() {
73 bug
!("unexpected layout `{:#?}` in PlaceRef::len", self.layout
)
78 impl<'a
, 'tcx
, V
: CodegenObject
> PlaceRef
<'tcx
, V
> {
79 /// Access a field, at a point when the value's case is known.
80 pub fn project_field
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
85 let field
= self.layout
.field(bx
.cx(), ix
);
86 let offset
= self.layout
.fields
.offset(ix
);
87 let effective_field_align
= self.align
.restrict_for_offset(offset
);
90 // Unions and newtypes only use an offset of 0.
91 let llval
= if offset
.bytes() == 0 {
93 } else if let layout
::Abi
::ScalarPair(ref a
, ref b
) = self.layout
.abi
{
94 // Offsets have to match either first or second field.
95 assert_eq
!(offset
, a
.value
.size(bx
.cx()).align_to(b
.value
.align(bx
.cx()).abi
));
96 bx
.struct_gep(self.llval
, 1)
98 bx
.struct_gep(self.llval
, bx
.cx().backend_field_index(self.layout
, ix
))
101 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
102 llval
: bx
.pointercast(llval
, bx
.cx().type_ptr_to(bx
.cx().backend_type(field
))),
103 llextra
: if bx
.cx().type_has_metadata(field
.ty
) { self.llextra }
else { None }
,
105 align
: effective_field_align
,
109 // Simple cases, which don't need DST adjustment:
110 // * no metadata available - just log the case
111 // * known alignment - sized types, `[T]`, `str` or a foreign type
112 // * packed struct - there is no alignment padding
113 match field
.ty
.kind
{
114 _
if self.llextra
.is_none() => {
116 "unsized field `{}`, of `{:?}` has no metadata for adjustment",
121 _
if !field
.is_unsized() => return simple(),
122 ty
::Slice(..) | ty
::Str
| ty
::Foreign(..) => return simple(),
124 if def
.repr
.packed() {
125 // FIXME(eddyb) generalize the adjustment when we
126 // start supporting packing to larger alignments.
127 assert_eq
!(self.layout
.align
.abi
.bytes(), 1);
134 // We need to get the pointer manually now.
135 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
136 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
137 // because the field may have an arbitrary alignment in the LLVM representation
142 // struct Foo<T: ?Sized> {
147 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
148 // the `y` field has 16-bit alignment.
150 let meta
= self.llextra
;
152 let unaligned_offset
= bx
.cx().const_usize(offset
.bytes());
154 // Get the alignment of the field
155 let (_
, unsized_align
) = glue
::size_and_align_of_dst(bx
, field
.ty
, meta
);
157 // Bump the unaligned offset up to the appropriate alignment using the
158 // following expression:
160 // (unaligned offset + (align - 1)) & -align
163 let align_sub_1
= bx
.sub(unsized_align
, bx
.cx().const_usize(1u64));
164 let and_lhs
= bx
.add(unaligned_offset
, align_sub_1
);
165 let and_rhs
= bx
.neg(unsized_align
);
166 let offset
= bx
.and(and_lhs
, and_rhs
);
168 debug
!("struct_field_ptr: DST field offset: {:?}", offset
);
170 // Cast and adjust pointer.
171 let byte_ptr
= bx
.pointercast(self.llval
, bx
.cx().type_i8p());
172 let byte_ptr
= bx
.gep(byte_ptr
, &[offset
]);
174 // Finally, cast back to the type expected.
175 let ll_fty
= bx
.cx().backend_type(field
);
176 debug
!("struct_field_ptr: Field type is {:?}", ll_fty
);
179 llval
: bx
.pointercast(byte_ptr
, bx
.cx().type_ptr_to(ll_fty
)),
180 llextra
: self.llextra
,
182 align
: effective_field_align
,
186 /// Obtain the actual discriminant of a value.
187 pub fn codegen_get_discr
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
192 let cast_to
= bx
.cx().immediate_backend_type(bx
.cx().layout_of(cast_to
));
193 if self.layout
.abi
.is_uninhabited() {
194 return bx
.cx().const_undef(cast_to
);
196 let (discr_scalar
, discr_kind
, discr_index
) = match self.layout
.variants
{
197 layout
::Variants
::Single { index }
=> {
201 .discriminant_for_variant(bx
.cx().tcx(), index
)
202 .map_or(index
.as_u32() as u128
, |discr
| discr
.val
);
203 return bx
.cx().const_uint_big(cast_to
, discr_val
);
205 layout
::Variants
::Multiple { ref discr, ref discr_kind, discr_index, .. }
=> {
206 (discr
, discr_kind
, discr_index
)
210 // Read the tag/niche-encoded discriminant from memory.
211 let encoded_discr
= self.project_field(bx
, discr_index
);
212 let encoded_discr
= bx
.load_operand(encoded_discr
);
214 // Decode the discriminant (specifically if it's niche-encoded).
216 layout
::DiscriminantKind
::Tag
=> {
217 let signed
= match discr_scalar
.value
{
218 // We use `i1` for bytes that are always `0` or `1`,
219 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
220 // let LLVM interpret the `i1` as signed, because
221 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
222 layout
::Int(_
, signed
) => !discr_scalar
.is_bool() && signed
,
225 bx
.intcast(encoded_discr
.immediate(), cast_to
, signed
)
227 layout
::DiscriminantKind
::Niche
{
232 // Rebase from niche values to discriminants, and check
233 // whether the result is in range for the niche variants.
234 let niche_llty
= bx
.cx().immediate_backend_type(encoded_discr
.layout
);
235 let encoded_discr
= encoded_discr
.immediate();
237 // We first compute the "relative discriminant" (wrt `niche_variants`),
238 // that is, if `n = niche_variants.end() - niche_variants.start()`,
239 // we remap `niche_start..=niche_start + n` (which may wrap around)
240 // to (non-wrap-around) `0..=n`, to be able to check whether the
241 // discriminant corresponds to a niche variant with one comparison.
242 // We also can't go directly to the (variant index) discriminant
243 // and check that it is in the range `niche_variants`, because
244 // that might not fit in the same type, on top of needing an extra
245 // comparison (see also the comment on `let niche_discr`).
246 let relative_discr
= if niche_start
== 0 {
247 // Avoid subtracting `0`, which wouldn't work for pointers.
248 // FIXME(eddyb) check the actual primitive type here.
251 bx
.sub(encoded_discr
, bx
.cx().const_uint_big(niche_llty
, niche_start
))
253 let relative_max
= niche_variants
.end().as_u32() - niche_variants
.start().as_u32();
255 let relative_max
= if relative_max
== 0 {
256 // Avoid calling `const_uint`, which wouldn't work for pointers.
257 // FIXME(eddyb) check the actual primitive type here.
258 bx
.cx().const_null(niche_llty
)
260 bx
.cx().const_uint(niche_llty
, relative_max
as u64)
262 bx
.icmp(IntPredicate
::IntULE
, relative_discr
, relative_max
)
265 // NOTE(eddyb) this addition needs to be performed on the final
266 // type, in case the niche itself can't represent all variant
267 // indices (e.g. `u8` niche with more than `256` variants,
268 // but enough uninhabited variants so that the remaining variants
269 // fit in the niche).
270 // In other words, `niche_variants.end - niche_variants.start`
271 // is representable in the niche, but `niche_variants.end`
272 // might not be, in extreme cases.
274 let relative_discr
= if relative_max
== 0 {
275 // HACK(eddyb) since we have only one niche, we know which
276 // one it is, and we can avoid having a dynamic value here.
277 bx
.cx().const_uint(cast_to
, 0)
279 bx
.intcast(relative_discr
, cast_to
, false)
283 bx
.cx().const_uint(cast_to
, niche_variants
.start().as_u32() as u64),
290 bx
.cx().const_uint(cast_to
, dataful_variant
.as_u32() as u64),
296 /// Sets the discriminant for a new value of the given case of the given
298 pub fn codegen_set_discr
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
301 variant_index
: VariantIdx
,
303 if self.layout
.for_variant(bx
.cx(), variant_index
).abi
.is_uninhabited() {
304 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
305 // if that turns out to be helpful.
309 match self.layout
.variants
{
310 layout
::Variants
::Single { index }
=> {
311 assert_eq
!(index
, variant_index
);
313 layout
::Variants
::Multiple
{
314 discr_kind
: layout
::DiscriminantKind
::Tag
,
318 let ptr
= self.project_field(bx
, discr_index
);
320 self.layout
.ty
.discriminant_for_variant(bx
.tcx(), variant_index
).unwrap().val
;
322 bx
.cx().const_uint_big(bx
.cx().backend_type(ptr
.layout
), to
),
327 layout
::Variants
::Multiple
{
329 layout
::DiscriminantKind
::Niche { dataful_variant, ref niche_variants, niche_start }
,
333 if variant_index
!= dataful_variant
{
334 if bx
.cx().sess().target
.target
.arch
== "arm"
335 || bx
.cx().sess().target
.target
.arch
== "aarch64"
337 // FIXME(#34427): as workaround for LLVM bug on ARM,
338 // use memset of 0 before assigning niche value.
339 let fill_byte
= bx
.cx().const_u8(0);
340 let size
= bx
.cx().const_usize(self.layout
.size
.bytes());
341 bx
.memset(self.llval
, fill_byte
, size
, self.align
, MemFlags
::empty());
344 let niche
= self.project_field(bx
, discr_index
);
345 let niche_llty
= bx
.cx().immediate_backend_type(niche
.layout
);
346 let niche_value
= variant_index
.as_u32() - niche_variants
.start().as_u32();
347 let niche_value
= (niche_value
as u128
).wrapping_add(niche_start
);
348 // FIXME(eddyb): check the actual primitive type here.
349 let niche_llval
= if niche_value
== 0 {
350 // HACK(eddyb): using `c_null` as it works on all types.
351 bx
.cx().const_null(niche_llty
)
353 bx
.cx().const_uint_big(niche_llty
, niche_value
)
355 OperandValue
::Immediate(niche_llval
).store(bx
, niche
);
361 pub fn project_index
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
366 // Statically compute the offset if we can, otherwise just use the element size,
367 // as this will yield the lowest alignment.
368 let layout
= self.layout
.field(bx
, 0);
369 let offset
= if let Some(llindex
) = bx
.const_to_opt_uint(llindex
) {
370 layout
.size
.checked_mul(llindex
, bx
).unwrap_or(layout
.size
)
376 llval
: bx
.inbounds_gep(self.llval
, &[bx
.cx().const_usize(0), llindex
]),
379 align
: self.align
.restrict_for_offset(offset
),
383 pub fn project_downcast
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(
386 variant_index
: VariantIdx
,
388 let mut downcast
= *self;
389 downcast
.layout
= self.layout
.for_variant(bx
.cx(), variant_index
);
391 // Cast to the appropriate variant struct type.
392 let variant_ty
= bx
.cx().backend_type(downcast
.layout
);
393 downcast
.llval
= bx
.pointercast(downcast
.llval
, bx
.cx().type_ptr_to(variant_ty
));
398 pub fn storage_live
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(&self, bx
: &mut Bx
) {
399 bx
.lifetime_start(self.llval
, self.layout
.size
);
402 pub fn storage_dead
<Bx
: BuilderMethods
<'a
, 'tcx
, Value
= V
>>(&self, bx
: &mut Bx
) {
403 bx
.lifetime_end(self.llval
, self.layout
.size
);
407 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
408 pub fn codegen_place(
411 place_ref
: mir
::PlaceRef
<'tcx
>,
412 ) -> PlaceRef
<'tcx
, Bx
::Value
> {
413 debug
!("codegen_place(place_ref={:?})", place_ref
);
415 let tcx
= self.cx
.tcx();
417 let result
= match place_ref
{
418 mir
::PlaceRef { local, projection: [] }
=> match self.locals
[local
] {
419 LocalRef
::Place(place
) => {
422 LocalRef
::UnsizedPlace(place
) => {
423 return bx
.load_operand(place
).deref(cx
);
425 LocalRef
::Operand(..) => {
426 bug
!("using operand local {:?} as place", place_ref
);
429 mir
::PlaceRef { local, projection: [proj_base @ .., mir::ProjectionElem::Deref] }
=> {
430 // Load the pointer from its location.
431 self.codegen_consume(bx
, mir
::PlaceRef { local, projection: proj_base }
)
434 mir
::PlaceRef { local, projection: [proj_base @ .., elem] }
=> {
435 // FIXME turn this recursion into iteration
437 self.codegen_place(bx
, mir
::PlaceRef { local, projection: proj_base }
);
440 mir
::ProjectionElem
::Deref
=> bug
!(),
441 mir
::ProjectionElem
::Field(ref field
, _
) => {
442 cg_base
.project_field(bx
, field
.index())
444 mir
::ProjectionElem
::Index(index
) => {
445 let index
= &mir
::Operand
::Copy(mir
::Place
::from(*index
));
446 let index
= self.codegen_operand(bx
, index
);
447 let llindex
= index
.immediate();
448 cg_base
.project_index(bx
, llindex
)
450 mir
::ProjectionElem
::ConstantIndex
{
455 let lloffset
= bx
.cx().const_usize(*offset
as u64);
456 cg_base
.project_index(bx
, lloffset
)
458 mir
::ProjectionElem
::ConstantIndex
{
463 let lloffset
= bx
.cx().const_usize(*offset
as u64);
464 let lllen
= cg_base
.len(bx
.cx());
465 let llindex
= bx
.sub(lllen
, lloffset
);
466 cg_base
.project_index(bx
, llindex
)
468 mir
::ProjectionElem
::Subslice { from, to, from_end }
=> {
470 cg_base
.project_index(bx
, bx
.cx().const_usize(*from
as u64));
472 PlaceTy
::from_ty(cg_base
.layout
.ty
).projection_ty(tcx
, elem
).ty
;
473 subslice
.layout
= bx
.cx().layout_of(self.monomorphize(&projected_ty
));
475 if subslice
.layout
.is_unsized() {
476 assert
!(from_end
, "slice subslices should be `from_end`");
477 subslice
.llextra
= Some(bx
.sub(
478 cg_base
.llextra
.unwrap(),
479 bx
.cx().const_usize((*from
as u64) + (*to
as u64)),
483 // Cast the place pointer type to the new
484 // array or slice type (`*[%_; new_len]`).
485 subslice
.llval
= bx
.pointercast(
487 bx
.cx().type_ptr_to(bx
.cx().backend_type(subslice
.layout
)),
492 mir
::ProjectionElem
::Downcast(_
, v
) => cg_base
.project_downcast(bx
, *v
),
496 debug
!("codegen_place(place={:?}) => {:?}", place_ref
, result
);
500 pub fn monomorphized_place_ty(&self, place_ref
: mir
::PlaceRef
<'tcx
>) -> Ty
<'tcx
> {
501 let tcx
= self.cx
.tcx();
502 let place_ty
= mir
::Place
::ty_from(place_ref
.local
, place_ref
.projection
, *self.mir
, tcx
);
503 self.monomorphize(&place_ty
.ty
)