1 // The classification code for the x86_64 ABI is taken from the clay language
2 // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
4 use crate::abi
::call
::{ArgAbi, CastTarget, FnAbi, Reg, RegKind}
;
5 use crate::abi
::{self, Abi, HasDataLayout, LayoutOf, Size, TyAndLayout, TyAndLayoutMethods}
;
7 /// Classification of "eightbyte" components.
8 // N.B., the order of the variants is from general to specific,
9 // such that `unify(a, b)` is the "smaller" of `a` and `b`.
10 #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
17 #[derive(Clone, Copy, Debug)]
20 // Currently supported vector size (AVX-512).
21 const LARGEST_VECTOR_SIZE
: usize = 512;
22 const MAX_EIGHTBYTES
: usize = LARGEST_VECTOR_SIZE
/ 64;
24 fn classify_arg
<'a
, Ty
, C
>(
27 ) -> Result
<[Option
<Class
>; MAX_EIGHTBYTES
], Memory
>
29 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
30 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>> + HasDataLayout
,
32 fn classify
<'a
, Ty
, C
>(
34 layout
: TyAndLayout
<'a
, Ty
>,
35 cls
: &mut [Option
<Class
>],
37 ) -> Result
<(), Memory
>
39 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
40 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>> + HasDataLayout
,
42 if !off
.is_aligned(layout
.align
.abi
) {
49 let mut c
= match layout
.abi
{
50 Abi
::Uninhabited
=> return Ok(()),
52 Abi
::Scalar(ref scalar
) => match scalar
.value
{
53 abi
::Int(..) | abi
::Pointer
=> Class
::Int
,
54 abi
::F32
| abi
::F64
=> Class
::Sse
,
57 Abi
::Vector { .. }
=> Class
::Sse
,
59 Abi
::ScalarPair(..) | Abi
::Aggregate { .. }
=> {
60 for i
in 0..layout
.fields
.count() {
61 let field_off
= off
+ layout
.fields
.offset(i
);
62 classify(cx
, layout
.field(cx
, i
), cls
, field_off
)?
;
65 match &layout
.variants
{
66 abi
::Variants
::Single { .. }
=> {}
67 abi
::Variants
::Multiple { variants, .. }
=> {
68 // Treat enum variants like union members.
69 for variant_idx
in variants
.indices() {
70 classify(cx
, layout
.for_variant(cx
, variant_idx
), cls
, off
)?
;
79 // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
80 let first
= (off
.bytes() / 8) as usize;
81 let last
= ((off
.bytes() + layout
.size
.bytes() - 1) / 8) as usize;
82 for cls
in &mut cls
[first
..=last
] {
83 *cls
= Some(cls
.map_or(c
, |old
| old
.min(c
)));
85 // Everything after the first Sse "eightbyte"
86 // component is the upper half of a register.
95 let n
= ((arg
.layout
.size
.bytes() + 7) / 8) as usize;
96 if n
> MAX_EIGHTBYTES
{
100 let mut cls
= [None
; MAX_EIGHTBYTES
];
101 classify(cx
, arg
.layout
, &mut cls
, Size
::ZERO
)?
;
103 if cls
[0] != Some(Class
::Sse
) {
106 if cls
[1..n
].iter().any(|&c
| c
!= Some(Class
::SseUp
)) {
112 if cls
[i
] == Some(Class
::SseUp
) {
113 cls
[i
] = Some(Class
::Sse
);
114 } else if cls
[i
] == Some(Class
::Sse
) {
116 while i
!= n
&& cls
[i
] == Some(Class
::SseUp
) {
128 fn reg_component(cls
: &[Option
<Class
>], i
: &mut usize, size
: Size
) -> Option
<Reg
> {
135 Some(Class
::Int
) => {
137 Some(if size
.bytes() < 8 { Reg { kind: RegKind::Integer, size }
} else { Reg::i64() }
)
139 Some(Class
::Sse
) => {
141 1 + cls
[*i
+ 1..].iter().take_while(|&&c
| c
== Some(Class
::SseUp
)).count();
143 Some(if vec_len
== 1 {
149 Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
152 Some(c
) => unreachable
!("reg_component: unhandled class {:?}", c
),
156 fn cast_target(cls
: &[Option
<Class
>], size
: Size
) -> CastTarget
{
158 let lo
= reg_component(cls
, &mut i
, size
).unwrap();
159 let offset
= Size
::from_bytes(8) * (i
as u64);
160 let mut target
= CastTarget
::from(lo
);
162 if let Some(hi
) = reg_component(cls
, &mut i
, size
- offset
) {
163 target
= CastTarget
::pair(lo
, hi
);
166 assert_eq
!(reg_component(cls
, &mut i
, Size
::ZERO
), None
);
170 const MAX_INT_REGS
: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
171 const MAX_SSE_REGS
: usize = 8; // XMM0-7
173 pub fn compute_abi_info
<'a
, Ty
, C
>(cx
: &C
, fn_abi
: &mut FnAbi
<'a
, Ty
>)
175 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
176 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>> + HasDataLayout
,
178 let mut int_regs
= MAX_INT_REGS
;
179 let mut sse_regs
= MAX_SSE_REGS
;
181 let mut x86_64_arg_or_ret
= |arg
: &mut ArgAbi
<'a
, Ty
>, is_arg
: bool
| {
182 let mut cls_or_mem
= classify_arg(cx
, arg
);
185 if let Ok(cls
) = cls_or_mem
{
186 let mut needed_int
= 0;
187 let mut needed_sse
= 0;
190 Some(Class
::Int
) => needed_int
+= 1,
191 Some(Class
::Sse
) => needed_sse
+= 1,
195 match (int_regs
.checked_sub(needed_int
), sse_regs
.checked_sub(needed_sse
)) {
196 (Some(left_int
), Some(left_sse
)) => {
201 // Not enough registers for this argument, so it will be
202 // passed on the stack, but we only mark aggregates
203 // explicitly as indirect `byval` arguments, as LLVM will
204 // automatically put immediates on the stack itself.
205 if arg
.layout
.is_aggregate() {
206 cls_or_mem
= Err(Memory
);
216 arg
.make_indirect_byval();
218 // `sret` parameter thus one less integer register available
220 // NOTE(eddyb) return is handled first, so no registers
221 // should've been used yet.
222 assert_eq
!(int_regs
, MAX_INT_REGS
);
227 // split into sized chunks passed individually
228 if arg
.layout
.is_aggregate() {
229 let size
= arg
.layout
.size
;
230 arg
.cast_to(cast_target(cls
, size
))
232 arg
.extend_integer_width_to(32);
238 if !fn_abi
.ret
.is_ignore() {
239 x86_64_arg_or_ret(&mut fn_abi
.ret
, false);
242 for arg
in &mut fn_abi
.args
{
246 x86_64_arg_or_ret(arg
, true);