3 use crate::type_
::Type
;
4 use rustc_codegen_ssa
::traits
::*;
6 use rustc_middle
::ty
::layout
::{FnAbiExt, TyAndLayout}
;
7 use rustc_middle
::ty
::print
::obsolete
::DefPathBasedNames
;
8 use rustc_middle
::ty
::{self, Ty, TypeFoldable}
;
9 use rustc_target
::abi
::{Abi, AddressSpace, Align, FieldsShape}
;
10 use rustc_target
::abi
::{Int, Pointer, F32, F64}
;
11 use rustc_target
::abi
::{LayoutOf, PointeeInfo, Scalar, Size, TyAndLayoutMethods, Variants}
;
16 fn uncached_llvm_type
<'a
, 'tcx
>(
17 cx
: &CodegenCx
<'a
, 'tcx
>,
18 layout
: TyAndLayout
<'tcx
>,
19 defer
: &mut Option
<(&'a Type
, TyAndLayout
<'tcx
>)>,
22 Abi
::Scalar(_
) => bug
!("handled elsewhere"),
23 Abi
::Vector { ref element, count }
=> {
24 // LLVM has a separate type for 64-bit SIMD vectors on X86 called
25 // `x86_mmx` which is needed for some SIMD operations. As a bit of a
26 // hack (all SIMD definitions are super unstable anyway) we
27 // recognize any one-element SIMD vector as "this should be an
28 // x86_mmx" type. In general there shouldn't be a need for other
29 // one-element SIMD vectors, so it's assumed this won't clash with
31 let use_x86_mmx
= count
== 1
32 && layout
.size
.bits() == 64
33 && (cx
.sess().target
.target
.arch
== "x86"
34 || cx
.sess().target
.target
.arch
== "x86_64");
36 return cx
.type_x86_mmx();
38 let element
= layout
.scalar_llvm_type_at(cx
, element
, Size
::ZERO
);
39 return cx
.type_vector(element
, count
);
42 Abi
::ScalarPair(..) => {
43 return cx
.type_struct(
45 layout
.scalar_pair_element_llvm_type(cx
, 0, false),
46 layout
.scalar_pair_element_llvm_type(cx
, 1, false),
51 Abi
::Uninhabited
| Abi
::Aggregate { .. }
=> {}
54 let name
= match layout
.ty
.kind
{
58 // FIXME(eddyb) producing readable type names for trait objects can result
59 // in problematically distinct types due to HRTB and subtyping (see #47638).
63 let mut name
= String
::with_capacity(32);
64 let printer
= DefPathBasedNames
::new(cx
.tcx
, true, true);
65 printer
.push_type_name(layout
.ty
, &mut name
, false);
66 if let (&ty
::Adt(def
, _
), &Variants
::Single { index }
)
67 = (&layout
.ty
.kind
, &layout
.variants
)
69 if def
.is_enum() && !def
.variants
.is_empty() {
70 write
!(&mut name
, "::{}", def
.variants
[index
].ident
).unwrap();
73 if let (&ty
::Generator(_
, _
, _
), &Variants
::Single { index }
)
74 = (&layout
.ty
.kind
, &layout
.variants
)
76 write
!(&mut name
, "::{}", ty
::GeneratorSubsts
::variant_name(index
)).unwrap();
84 FieldsShape
::Primitive
| FieldsShape
::Union(_
) => {
85 let fill
= cx
.type_padding_filler(layout
.size
, layout
.align
.abi
);
88 None
=> cx
.type_struct(&[fill
], packed
),
90 let llty
= cx
.type_named_struct(name
);
91 cx
.set_struct_body(llty
, &[fill
], packed
);
96 FieldsShape
::Array { count, .. }
=> cx
.type_array(layout
.field(cx
, 0).llvm_type(cx
), count
),
97 FieldsShape
::Arbitrary { .. }
=> match name
{
99 let (llfields
, packed
) = struct_llfields(cx
, layout
);
100 cx
.type_struct(&llfields
, packed
)
103 let llty
= cx
.type_named_struct(name
);
104 *defer
= Some((llty
, layout
));
111 fn struct_llfields
<'a
, 'tcx
>(
112 cx
: &CodegenCx
<'a
, 'tcx
>,
113 layout
: TyAndLayout
<'tcx
>,
114 ) -> (Vec
<&'a Type
>, bool
) {
115 debug
!("struct_llfields: {:#?}", layout
);
116 let field_count
= layout
.fields
.count();
118 let mut packed
= false;
119 let mut offset
= Size
::ZERO
;
120 let mut prev_effective_align
= layout
.align
.abi
;
121 let mut result
: Vec
<_
> = Vec
::with_capacity(1 + field_count
* 2);
122 for i
in layout
.fields
.index_by_increasing_offset() {
123 let target_offset
= layout
.fields
.offset(i
as usize);
124 let field
= layout
.field(cx
, i
);
125 let effective_field_align
=
126 layout
.align
.abi
.min(field
.align
.abi
).restrict_for_offset(target_offset
);
127 packed
|= effective_field_align
< field
.align
.abi
;
130 "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
131 effective_field_align: {}",
136 effective_field_align
.bytes()
138 assert
!(target_offset
>= offset
);
139 let padding
= target_offset
- offset
;
140 let padding_align
= prev_effective_align
.min(effective_field_align
);
141 assert_eq
!(offset
.align_to(padding_align
) + padding
, target_offset
);
142 result
.push(cx
.type_padding_filler(padding
, padding_align
));
143 debug
!(" padding before: {:?}", padding
);
145 result
.push(field
.llvm_type(cx
));
146 offset
= target_offset
+ field
.size
;
147 prev_effective_align
= effective_field_align
;
149 if !layout
.is_unsized() && field_count
> 0 {
150 if offset
> layout
.size
{
151 bug
!("layout: {:#?} stride: {:?} offset: {:?}", layout
, layout
.size
, offset
);
153 let padding
= layout
.size
- offset
;
154 let padding_align
= prev_effective_align
;
155 assert_eq
!(offset
.align_to(padding_align
) + padding
, layout
.size
);
157 "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
158 padding
, offset
, layout
.size
160 result
.push(cx
.type_padding_filler(padding
, padding_align
));
161 assert_eq
!(result
.len(), 1 + field_count
* 2);
163 debug
!("struct_llfields: offset: {:?} stride: {:?}", offset
, layout
.size
);
169 impl<'a
, 'tcx
> CodegenCx
<'a
, 'tcx
> {
170 pub fn align_of(&self, ty
: Ty
<'tcx
>) -> Align
{
171 self.layout_of(ty
).align
.abi
174 pub fn size_of(&self, ty
: Ty
<'tcx
>) -> Size
{
175 self.layout_of(ty
).size
178 pub fn size_and_align_of(&self, ty
: Ty
<'tcx
>) -> (Size
, Align
) {
179 let layout
= self.layout_of(ty
);
180 (layout
.size
, layout
.align
.abi
)
184 pub trait LayoutLlvmExt
<'tcx
> {
185 fn is_llvm_immediate(&self) -> bool
;
186 fn is_llvm_scalar_pair(&self) -> bool
;
187 fn llvm_type
<'a
>(&self, cx
: &CodegenCx
<'a
, 'tcx
>) -> &'a Type
;
188 fn immediate_llvm_type
<'a
>(&self, cx
: &CodegenCx
<'a
, 'tcx
>) -> &'a Type
;
189 fn scalar_llvm_type_at
<'a
>(
191 cx
: &CodegenCx
<'a
, 'tcx
>,
195 fn scalar_pair_element_llvm_type
<'a
>(
197 cx
: &CodegenCx
<'a
, 'tcx
>,
201 fn llvm_field_index(&self, index
: usize) -> u64;
202 fn pointee_info_at
<'a
>(&self, cx
: &CodegenCx
<'a
, 'tcx
>, offset
: Size
) -> Option
<PointeeInfo
>;
205 impl<'tcx
> LayoutLlvmExt
<'tcx
> for TyAndLayout
<'tcx
> {
206 fn is_llvm_immediate(&self) -> bool
{
208 Abi
::Scalar(_
) | Abi
::Vector { .. }
=> true,
209 Abi
::ScalarPair(..) => false,
210 Abi
::Uninhabited
| Abi
::Aggregate { .. }
=> self.is_zst(),
214 fn is_llvm_scalar_pair(&self) -> bool
{
216 Abi
::ScalarPair(..) => true,
217 Abi
::Uninhabited
| Abi
::Scalar(_
) | Abi
::Vector { .. }
| Abi
::Aggregate { .. }
=> false,
221 /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
222 /// The pointee type of the pointer in `PlaceRef` is always this type.
223 /// For sized types, it is also the right LLVM type for an `alloca`
224 /// containing a value of that type, and most immediates (except `bool`).
225 /// Unsized types, however, are represented by a "minimal unit", e.g.
226 /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
227 /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
228 /// If the type is an unsized struct, the regular layout is generated,
229 /// with the inner-most trailing unsized field using the "minimal unit"
230 /// of that field's type - this is useful for taking the address of
231 /// that field and ensuring the struct has the right alignment.
232 fn llvm_type
<'a
>(&self, cx
: &CodegenCx
<'a
, 'tcx
>) -> &'a Type
{
233 if let Abi
::Scalar(ref scalar
) = self.abi
{
234 // Use a different cache for scalars because pointers to DSTs
235 // can be either fat or thin (data pointers of fat pointers).
236 if let Some(&llty
) = cx
.scalar_lltypes
.borrow().get(&self.ty
) {
239 let llty
= match self.ty
.kind
{
240 ty
::Ref(_
, ty
, _
) | ty
::RawPtr(ty
::TypeAndMut { ty, .. }
) => {
241 cx
.type_ptr_to(cx
.layout_of(ty
).llvm_type(cx
))
243 ty
::Adt(def
, _
) if def
.is_box() => {
244 cx
.type_ptr_to(cx
.layout_of(self.ty
.boxed_ty()).llvm_type(cx
))
246 ty
::FnPtr(sig
) => cx
.fn_ptr_backend_type(&FnAbi
::of_fn_ptr(cx
, sig
, &[])),
247 _
=> self.scalar_llvm_type_at(cx
, scalar
, Size
::ZERO
),
249 cx
.scalar_lltypes
.borrow_mut().insert(self.ty
, llty
);
254 let variant_index
= match self.variants
{
255 Variants
::Single { index }
=> Some(index
),
258 if let Some(&llty
) = cx
.lltypes
.borrow().get(&(self.ty
, variant_index
)) {
262 debug
!("llvm_type({:#?})", self);
264 assert
!(!self.ty
.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty
);
266 // Make sure lifetimes are erased, to avoid generating distinct LLVM
267 // types for Rust types that only differ in the choice of lifetimes.
268 let normal_ty
= cx
.tcx
.erase_regions(&self.ty
);
270 let mut defer
= None
;
271 let llty
= if self.ty
!= normal_ty
{
272 let mut layout
= cx
.layout_of(normal_ty
);
273 if let Some(v
) = variant_index
{
274 layout
= layout
.for_variant(cx
, v
);
278 uncached_llvm_type(cx
, *self, &mut defer
)
280 debug
!("--> mapped {:#?} to llty={:?}", self, llty
);
282 cx
.lltypes
.borrow_mut().insert((self.ty
, variant_index
), llty
);
284 if let Some((llty
, layout
)) = defer
{
285 let (llfields
, packed
) = struct_llfields(cx
, layout
);
286 cx
.set_struct_body(llty
, &llfields
, packed
)
292 fn immediate_llvm_type
<'a
>(&self, cx
: &CodegenCx
<'a
, 'tcx
>) -> &'a Type
{
293 if let Abi
::Scalar(ref scalar
) = self.abi
{
294 if scalar
.is_bool() {
301 fn scalar_llvm_type_at
<'a
>(
303 cx
: &CodegenCx
<'a
, 'tcx
>,
308 Int(i
, _
) => cx
.type_from_integer(i
),
309 F32
=> cx
.type_f32(),
310 F64
=> cx
.type_f64(),
312 // If we know the alignment, pick something better than i8.
313 let (pointee
, address_space
) =
314 if let Some(pointee
) = self.pointee_info_at(cx
, offset
) {
315 (cx
.type_pointee_for_align(pointee
.align
), pointee
.address_space
)
317 (cx
.type_i8(), AddressSpace
::DATA
)
319 cx
.type_ptr_to_ext(pointee
, address_space
)
324 fn scalar_pair_element_llvm_type
<'a
>(
326 cx
: &CodegenCx
<'a
, 'tcx
>,
330 // HACK(eddyb) special-case fat pointers until LLVM removes
331 // pointee types, to avoid bitcasting every `OperandRef::deref`.
333 ty
::Ref(..) | ty
::RawPtr(_
) => {
334 return self.field(cx
, index
).llvm_type(cx
);
336 ty
::Adt(def
, _
) if def
.is_box() => {
337 let ptr_ty
= cx
.tcx
.mk_mut_ptr(self.ty
.boxed_ty());
338 return cx
.layout_of(ptr_ty
).scalar_pair_element_llvm_type(cx
, index
, immediate
);
343 let (a
, b
) = match self.abi
{
344 Abi
::ScalarPair(ref a
, ref b
) => (a
, b
),
345 _
=> bug
!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
347 let scalar
= [a
, b
][index
];
349 // Make sure to return the same type `immediate_llvm_type` would when
350 // dealing with an immediate pair. This means that `(bool, bool)` is
351 // effectively represented as `{i8, i8}` in memory and two `i1`s as an
352 // immediate, just like `bool` is typically `i8` in memory and only `i1`
353 // when immediate. We need to load/store `bool` as `i8` to avoid
354 // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
355 if immediate
&& scalar
.is_bool() {
360 if index
== 0 { Size::ZERO }
else { a.value.size(cx).align_to(b.value.align(cx).abi) }
;
361 self.scalar_llvm_type_at(cx
, scalar
, offset
)
364 fn llvm_field_index(&self, index
: usize) -> u64 {
366 Abi
::Scalar(_
) | Abi
::ScalarPair(..) => {
367 bug
!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
372 FieldsShape
::Primitive
| FieldsShape
::Union(_
) => {
373 bug
!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
376 FieldsShape
::Array { .. }
=> index
as u64,
378 FieldsShape
::Arbitrary { .. }
=> 1 + (self.fields
.memory_index(index
) as u64) * 2,
382 fn pointee_info_at
<'a
>(&self, cx
: &CodegenCx
<'a
, 'tcx
>, offset
: Size
) -> Option
<PointeeInfo
> {
383 if let Some(&pointee
) = cx
.pointee_infos
.borrow().get(&(self.ty
, offset
)) {
387 let result
= Ty
::pointee_info_at(*self, cx
, offset
);
389 cx
.pointee_infos
.borrow_mut().insert((self.ty
, offset
), result
);