1 // Reference: RISC-V ELF psABI specification
2 // https://github.com/riscv/riscv-elf-psabi-doc
4 // Reference: Clang RISC-V ELF psABI lowering code
5 // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
7 use crate::abi
::call
::{ArgAbi, ArgAttribute, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}
;
9 self, Abi
, FieldsShape
, HasDataLayout
, LayoutOf
, Size
, TyAndLayout
, TyAndLayoutMethods
,
11 use crate::spec
::HasTargetSpec
;
13 #[derive(Copy, Clone)]
20 #[derive(Copy, Clone)]
27 #[derive(Copy, Clone)]
28 struct CannotUseFpConv
;
30 fn is_riscv_aggregate
<'a
, Ty
>(arg
: &ArgAbi
<'a
, Ty
>) -> bool
{
31 match arg
.layout
.abi
{
32 Abi
::Vector { .. }
=> true,
33 _
=> arg
.layout
.is_aggregate(),
37 fn should_use_fp_conv_helper
<'a
, Ty
, C
>(
39 arg_layout
: &TyAndLayout
<'a
, Ty
>,
42 field1_kind
: &mut RegPassKind
,
43 field2_kind
: &mut RegPassKind
,
44 ) -> Result
<(), CannotUseFpConv
>
46 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
47 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>>,
49 match arg_layout
.abi
{
50 Abi
::Scalar(ref scalar
) => match scalar
.value
{
51 abi
::Int(..) | abi
::Pointer
=> {
52 if arg_layout
.size
.bits() > xlen
{
53 return Err(CannotUseFpConv
);
55 match (*field1_kind
, *field2_kind
) {
56 (RegPassKind
::Unknown
, _
) => {
57 *field1_kind
= RegPassKind
::Integer(Reg
{
58 kind
: RegKind
::Integer
,
59 size
: arg_layout
.size
,
62 (RegPassKind
::Float(_
), RegPassKind
::Unknown
) => {
63 *field2_kind
= RegPassKind
::Integer(Reg
{
64 kind
: RegKind
::Integer
,
65 size
: arg_layout
.size
,
68 _
=> return Err(CannotUseFpConv
),
71 abi
::F32
| abi
::F64
=> {
72 if arg_layout
.size
.bits() > flen
{
73 return Err(CannotUseFpConv
);
75 match (*field1_kind
, *field2_kind
) {
76 (RegPassKind
::Unknown
, _
) => {
78 RegPassKind
::Float(Reg { kind: RegKind::Float, size: arg_layout.size }
);
80 (_
, RegPassKind
::Unknown
) => {
82 RegPassKind
::Float(Reg { kind: RegKind::Float, size: arg_layout.size }
);
84 _
=> return Err(CannotUseFpConv
),
88 Abi
::Vector { .. }
| Abi
::Uninhabited
=> return Err(CannotUseFpConv
),
89 Abi
::ScalarPair(..) | Abi
::Aggregate { .. }
=> match arg_layout
.fields
{
90 FieldsShape
::Primitive
=> {
91 unreachable
!("aggregates can't have `FieldsShape::Primitive`")
93 FieldsShape
::Union(_
) => {
94 if !arg_layout
.is_zst() {
95 return Err(CannotUseFpConv
);
98 FieldsShape
::Array { count, .. }
=> {
100 let elem_layout
= arg_layout
.field(cx
, 0);
101 should_use_fp_conv_helper(
111 FieldsShape
::Arbitrary { .. }
=> {
112 match arg_layout
.variants
{
113 abi
::Variants
::Multiple { .. }
=> return Err(CannotUseFpConv
),
114 abi
::Variants
::Single { .. }
=> (),
116 for i
in arg_layout
.fields
.index_by_increasing_offset() {
117 let field
= arg_layout
.field(cx
, i
);
118 should_use_fp_conv_helper(cx
, &field
, xlen
, flen
, field1_kind
, field2_kind
)?
;
126 fn should_use_fp_conv
<'a
, Ty
, C
>(
128 arg
: &TyAndLayout
<'a
, Ty
>,
131 ) -> Option
<FloatConv
>
133 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
134 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>>,
136 let mut field1_kind
= RegPassKind
::Unknown
;
137 let mut field2_kind
= RegPassKind
::Unknown
;
138 if should_use_fp_conv_helper(cx
, arg
, xlen
, flen
, &mut field1_kind
, &mut field2_kind
).is_err() {
141 match (field1_kind
, field2_kind
) {
142 (RegPassKind
::Integer(l
), RegPassKind
::Float(r
)) => Some(FloatConv
::MixedPair(l
, r
)),
143 (RegPassKind
::Float(l
), RegPassKind
::Integer(r
)) => Some(FloatConv
::MixedPair(l
, r
)),
144 (RegPassKind
::Float(l
), RegPassKind
::Float(r
)) => Some(FloatConv
::FloatPair(l
, r
)),
145 (RegPassKind
::Float(f
), RegPassKind
::Unknown
) => Some(FloatConv
::Float(f
)),
150 fn classify_ret
<'a
, Ty
, C
>(cx
: &C
, arg
: &mut ArgAbi
<'a
, Ty
>, xlen
: u64, flen
: u64) -> bool
152 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
153 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>>,
155 if let Some(conv
) = should_use_fp_conv(cx
, &arg
.layout
, xlen
, flen
) {
157 FloatConv
::Float(f
) => {
160 FloatConv
::FloatPair(l
, r
) => {
161 arg
.cast_to(CastTarget
::pair(l
, r
));
163 FloatConv
::MixedPair(l
, r
) => {
164 arg
.cast_to(CastTarget
::pair(l
, r
));
170 let total
= arg
.layout
.size
;
172 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
173 // the argument list with the address."
174 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
175 // replaced in the argument list with the address, as are C++ aggregates
176 // with nontrivial copy constructors, destructors, or vtables."
177 if total
.bits() > 2 * xlen
{
178 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
179 if is_riscv_aggregate(arg
) {
185 let xlen_reg
= match xlen
{
188 _
=> unreachable
!("Unsupported XLEN: {}", xlen
),
190 if is_riscv_aggregate(arg
) {
191 if total
.bits() <= xlen
{
192 arg
.cast_to(xlen_reg
);
194 arg
.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) }
);
199 // "When passed in registers, scalars narrower than XLEN bits are widened
200 // according to the sign of their type up to 32 bits, then sign-extended to
202 extend_integer_width(arg
, xlen
);
206 fn classify_arg
<'a
, Ty
, C
>(
208 arg
: &mut ArgAbi
<'a
, Ty
>,
212 avail_gprs
: &mut u64,
213 avail_fprs
: &mut u64,
215 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
216 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>>,
219 match should_use_fp_conv(cx
, &arg
.layout
, xlen
, flen
) {
220 Some(FloatConv
::Float(f
)) if *avail_fprs
>= 1 => {
225 Some(FloatConv
::FloatPair(l
, r
)) if *avail_fprs
>= 2 => {
227 arg
.cast_to(CastTarget
::pair(l
, r
));
230 Some(FloatConv
::MixedPair(l
, r
)) if *avail_fprs
>= 1 && *avail_gprs
>= 1 => {
233 arg
.cast_to(CastTarget
::pair(l
, r
));
240 let total
= arg
.layout
.size
;
241 let align
= arg
.layout
.align
.abi
.bits();
243 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
244 // the argument list with the address."
245 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
246 // replaced in the argument list with the address, as are C++ aggregates
247 // with nontrivial copy constructors, destructors, or vtables."
248 if total
.bits() > 2 * xlen
{
249 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
250 if is_riscv_aggregate(arg
) {
253 if *avail_gprs
>= 1 {
259 let double_xlen_reg
= match xlen
{
262 _
=> unreachable
!("Unsupported XLEN: {}", xlen
),
265 let xlen_reg
= match xlen
{
268 _
=> unreachable
!("Unsupported XLEN: {}", xlen
),
271 if total
.bits() > xlen
{
272 let align_regs
= align
> xlen
;
273 if is_riscv_aggregate(arg
) {
274 arg
.cast_to(Uniform
{
275 unit
: if align_regs { double_xlen_reg }
else { xlen_reg }
,
276 total
: Size
::from_bits(xlen
* 2),
279 if align_regs
&& is_vararg
{
280 *avail_gprs
-= *avail_gprs
% 2;
282 if *avail_gprs
>= 2 {
288 } else if is_riscv_aggregate(arg
) {
289 arg
.cast_to(xlen_reg
);
290 if *avail_gprs
>= 1 {
296 // "When passed in registers, scalars narrower than XLEN bits are widened
297 // according to the sign of their type up to 32 bits, then sign-extended to
299 if *avail_gprs
>= 1 {
300 extend_integer_width(arg
, xlen
);
305 fn extend_integer_width
<'a
, Ty
>(arg
: &mut ArgAbi
<'a
, Ty
>, xlen
: u64) {
306 if let Abi
::Scalar(ref scalar
) = arg
.layout
.abi
{
307 if let abi
::Int(i
, _
) = scalar
.value
{
308 // 32-bit integers are always sign-extended
309 if i
.size().bits() == 32 && xlen
> 32 {
310 if let PassMode
::Direct(ref mut attrs
) = arg
.mode
{
311 attrs
.set(ArgAttribute
::SExt
);
318 arg
.extend_integer_width_to(xlen
);
321 pub fn compute_abi_info
<'a
, Ty
, C
>(cx
: &C
, fn_abi
: &mut FnAbi
<'a
, Ty
>)
323 Ty
: TyAndLayoutMethods
<'a
, C
> + Copy
,
324 C
: LayoutOf
<Ty
= Ty
, TyAndLayout
= TyAndLayout
<'a
, Ty
>> + HasDataLayout
+ HasTargetSpec
,
326 let flen
= match &cx
.target_spec().options
.llvm_abiname
[..] {
327 "ilp32f" | "lp64f" => 32,
328 "ilp32d" | "lp64d" => 64,
331 let xlen
= cx
.data_layout().pointer_size
.bits();
333 let mut avail_gprs
= 8;
334 let mut avail_fprs
= 8;
336 if !fn_abi
.ret
.is_ignore() {
337 if classify_ret(cx
, &mut fn_abi
.ret
, xlen
, flen
) {
342 for (i
, arg
) in fn_abi
.args
.iter_mut().enumerate() {
351 i
>= fn_abi
.fixed_count
,