]> git.proxmox.com Git - rustc.git/blob - src/librustc_target/abi/call/riscv.rs
New upstream version 1.47.0+dfsg1
[rustc.git] / src / librustc_target / abi / call / riscv.rs
1 // Reference: RISC-V ELF psABI specification
2 // https://github.com/riscv/riscv-elf-psabi-doc
3 //
4 // Reference: Clang RISC-V ELF psABI lowering code
5 // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
6
7 use crate::abi::call::{ArgAbi, ArgAttribute, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
8 use crate::abi::{
9 self, Abi, FieldsShape, HasDataLayout, LayoutOf, Size, TyAndLayout, TyAndLayoutMethods,
10 };
11 use crate::spec::HasTargetSpec;
12
13 #[derive(Copy, Clone)]
14 enum RegPassKind {
15 Float(Reg),
16 Integer(Reg),
17 Unknown,
18 }
19
20 #[derive(Copy, Clone)]
21 enum FloatConv {
22 FloatPair(Reg, Reg),
23 Float(Reg),
24 MixedPair(Reg, Reg),
25 }
26
27 #[derive(Copy, Clone)]
28 struct CannotUseFpConv;
29
30 fn is_riscv_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
31 match arg.layout.abi {
32 Abi::Vector { .. } => true,
33 _ => arg.layout.is_aggregate(),
34 }
35 }
36
37 fn should_use_fp_conv_helper<'a, Ty, C>(
38 cx: &C,
39 arg_layout: &TyAndLayout<'a, Ty>,
40 xlen: u64,
41 flen: u64,
42 field1_kind: &mut RegPassKind,
43 field2_kind: &mut RegPassKind,
44 ) -> Result<(), CannotUseFpConv>
45 where
46 Ty: TyAndLayoutMethods<'a, C> + Copy,
47 C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>>,
48 {
49 match arg_layout.abi {
50 Abi::Scalar(ref scalar) => match scalar.value {
51 abi::Int(..) | abi::Pointer => {
52 if arg_layout.size.bits() > xlen {
53 return Err(CannotUseFpConv);
54 }
55 match (*field1_kind, *field2_kind) {
56 (RegPassKind::Unknown, _) => {
57 *field1_kind = RegPassKind::Integer(Reg {
58 kind: RegKind::Integer,
59 size: arg_layout.size,
60 });
61 }
62 (RegPassKind::Float(_), RegPassKind::Unknown) => {
63 *field2_kind = RegPassKind::Integer(Reg {
64 kind: RegKind::Integer,
65 size: arg_layout.size,
66 });
67 }
68 _ => return Err(CannotUseFpConv),
69 }
70 }
71 abi::F32 | abi::F64 => {
72 if arg_layout.size.bits() > flen {
73 return Err(CannotUseFpConv);
74 }
75 match (*field1_kind, *field2_kind) {
76 (RegPassKind::Unknown, _) => {
77 *field1_kind =
78 RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
79 }
80 (_, RegPassKind::Unknown) => {
81 *field2_kind =
82 RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
83 }
84 _ => return Err(CannotUseFpConv),
85 }
86 }
87 },
88 Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
89 Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
90 FieldsShape::Primitive => {
91 unreachable!("aggregates can't have `FieldsShape::Primitive`")
92 }
93 FieldsShape::Union(_) => {
94 if !arg_layout.is_zst() {
95 return Err(CannotUseFpConv);
96 }
97 }
98 FieldsShape::Array { count, .. } => {
99 for _ in 0..count {
100 let elem_layout = arg_layout.field(cx, 0);
101 should_use_fp_conv_helper(
102 cx,
103 &elem_layout,
104 xlen,
105 flen,
106 field1_kind,
107 field2_kind,
108 )?;
109 }
110 }
111 FieldsShape::Arbitrary { .. } => {
112 match arg_layout.variants {
113 abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
114 abi::Variants::Single { .. } => (),
115 }
116 for i in arg_layout.fields.index_by_increasing_offset() {
117 let field = arg_layout.field(cx, i);
118 should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
119 }
120 }
121 },
122 }
123 Ok(())
124 }
125
126 fn should_use_fp_conv<'a, Ty, C>(
127 cx: &C,
128 arg: &TyAndLayout<'a, Ty>,
129 xlen: u64,
130 flen: u64,
131 ) -> Option<FloatConv>
132 where
133 Ty: TyAndLayoutMethods<'a, C> + Copy,
134 C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>>,
135 {
136 let mut field1_kind = RegPassKind::Unknown;
137 let mut field2_kind = RegPassKind::Unknown;
138 if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
139 return None;
140 }
141 match (field1_kind, field2_kind) {
142 (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
143 (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
144 (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
145 (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
146 _ => None,
147 }
148 }
149
150 fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
151 where
152 Ty: TyAndLayoutMethods<'a, C> + Copy,
153 C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>>,
154 {
155 if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
156 match conv {
157 FloatConv::Float(f) => {
158 arg.cast_to(f);
159 }
160 FloatConv::FloatPair(l, r) => {
161 arg.cast_to(CastTarget::pair(l, r));
162 }
163 FloatConv::MixedPair(l, r) => {
164 arg.cast_to(CastTarget::pair(l, r));
165 }
166 }
167 return false;
168 }
169
170 let total = arg.layout.size;
171
172 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
173 // the argument list with the address."
174 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
175 // replaced in the argument list with the address, as are C++ aggregates
176 // with nontrivial copy constructors, destructors, or vtables."
177 if total.bits() > 2 * xlen {
178 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
179 if is_riscv_aggregate(arg) {
180 arg.make_indirect();
181 }
182 return true;
183 }
184
185 let xlen_reg = match xlen {
186 32 => Reg::i32(),
187 64 => Reg::i64(),
188 _ => unreachable!("Unsupported XLEN: {}", xlen),
189 };
190 if is_riscv_aggregate(arg) {
191 if total.bits() <= xlen {
192 arg.cast_to(xlen_reg);
193 } else {
194 arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
195 }
196 return false;
197 }
198
199 // "When passed in registers, scalars narrower than XLEN bits are widened
200 // according to the sign of their type up to 32 bits, then sign-extended to
201 // XLEN bits."
202 extend_integer_width(arg, xlen);
203 false
204 }
205
206 fn classify_arg<'a, Ty, C>(
207 cx: &C,
208 arg: &mut ArgAbi<'a, Ty>,
209 xlen: u64,
210 flen: u64,
211 is_vararg: bool,
212 avail_gprs: &mut u64,
213 avail_fprs: &mut u64,
214 ) where
215 Ty: TyAndLayoutMethods<'a, C> + Copy,
216 C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>>,
217 {
218 if !is_vararg {
219 match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
220 Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
221 *avail_fprs -= 1;
222 arg.cast_to(f);
223 return;
224 }
225 Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
226 *avail_fprs -= 2;
227 arg.cast_to(CastTarget::pair(l, r));
228 return;
229 }
230 Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
231 *avail_gprs -= 1;
232 *avail_fprs -= 1;
233 arg.cast_to(CastTarget::pair(l, r));
234 return;
235 }
236 _ => (),
237 }
238 }
239
240 let total = arg.layout.size;
241 let align = arg.layout.align.abi.bits();
242
243 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
244 // the argument list with the address."
245 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
246 // replaced in the argument list with the address, as are C++ aggregates
247 // with nontrivial copy constructors, destructors, or vtables."
248 if total.bits() > 2 * xlen {
249 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
250 if is_riscv_aggregate(arg) {
251 arg.make_indirect();
252 }
253 if *avail_gprs >= 1 {
254 *avail_gprs -= 1;
255 }
256 return;
257 }
258
259 let double_xlen_reg = match xlen {
260 32 => Reg::i64(),
261 64 => Reg::i128(),
262 _ => unreachable!("Unsupported XLEN: {}", xlen),
263 };
264
265 let xlen_reg = match xlen {
266 32 => Reg::i32(),
267 64 => Reg::i64(),
268 _ => unreachable!("Unsupported XLEN: {}", xlen),
269 };
270
271 if total.bits() > xlen {
272 let align_regs = align > xlen;
273 if is_riscv_aggregate(arg) {
274 arg.cast_to(Uniform {
275 unit: if align_regs { double_xlen_reg } else { xlen_reg },
276 total: Size::from_bits(xlen * 2),
277 });
278 }
279 if align_regs && is_vararg {
280 *avail_gprs -= *avail_gprs % 2;
281 }
282 if *avail_gprs >= 2 {
283 *avail_gprs -= 2;
284 } else {
285 *avail_gprs = 0;
286 }
287 return;
288 } else if is_riscv_aggregate(arg) {
289 arg.cast_to(xlen_reg);
290 if *avail_gprs >= 1 {
291 *avail_gprs -= 1;
292 }
293 return;
294 }
295
296 // "When passed in registers, scalars narrower than XLEN bits are widened
297 // according to the sign of their type up to 32 bits, then sign-extended to
298 // XLEN bits."
299 if *avail_gprs >= 1 {
300 extend_integer_width(arg, xlen);
301 *avail_gprs -= 1;
302 }
303 }
304
305 fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
306 if let Abi::Scalar(ref scalar) = arg.layout.abi {
307 if let abi::Int(i, _) = scalar.value {
308 // 32-bit integers are always sign-extended
309 if i.size().bits() == 32 && xlen > 32 {
310 if let PassMode::Direct(ref mut attrs) = arg.mode {
311 attrs.set(ArgAttribute::SExt);
312 return;
313 }
314 }
315 }
316 }
317
318 arg.extend_integer_width_to(xlen);
319 }
320
321 pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
322 where
323 Ty: TyAndLayoutMethods<'a, C> + Copy,
324 C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>> + HasDataLayout + HasTargetSpec,
325 {
326 let flen = match &cx.target_spec().options.llvm_abiname[..] {
327 "ilp32f" | "lp64f" => 32,
328 "ilp32d" | "lp64d" => 64,
329 _ => 0,
330 };
331 let xlen = cx.data_layout().pointer_size.bits();
332
333 let mut avail_gprs = 8;
334 let mut avail_fprs = 8;
335
336 if !fn_abi.ret.is_ignore() {
337 if classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
338 avail_gprs -= 1;
339 }
340 }
341
342 for (i, arg) in fn_abi.args.iter_mut().enumerate() {
343 if arg.is_ignore() {
344 continue;
345 }
346 classify_arg(
347 cx,
348 arg,
349 xlen,
350 flen,
351 i >= fn_abi.fixed_count,
352 &mut avail_gprs,
353 &mut avail_fprs,
354 );
355 }
356 }