]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_target/src/abi/call/loongarch.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_target / src / abi / call / loongarch.rs
CommitLineData
487cf647
FG
1use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
2use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
3use crate::spec::HasTargetSpec;
4
5#[derive(Copy, Clone)]
6enum RegPassKind {
7 Float(Reg),
8 Integer(Reg),
9 Unknown,
10}
11
12#[derive(Copy, Clone)]
13enum FloatConv {
14 FloatPair(Reg, Reg),
15 Float(Reg),
16 MixedPair(Reg, Reg),
17}
18
19#[derive(Copy, Clone)]
20struct CannotUseFpConv;
21
9c376795 22fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
487cf647
FG
23 match arg.layout.abi {
24 Abi::Vector { .. } => true,
25 _ => arg.layout.is_aggregate(),
26 }
27}
28
29fn should_use_fp_conv_helper<'a, Ty, C>(
30 cx: &C,
31 arg_layout: &TyAndLayout<'a, Ty>,
32 xlen: u64,
33 flen: u64,
34 field1_kind: &mut RegPassKind,
35 field2_kind: &mut RegPassKind,
36) -> Result<(), CannotUseFpConv>
37where
38 Ty: TyAbiInterface<'a, C> + Copy,
39{
40 match arg_layout.abi {
41 Abi::Scalar(scalar) => match scalar.primitive() {
42 abi::Int(..) | abi::Pointer => {
43 if arg_layout.size.bits() > xlen {
44 return Err(CannotUseFpConv);
45 }
46 match (*field1_kind, *field2_kind) {
47 (RegPassKind::Unknown, _) => {
48 *field1_kind = RegPassKind::Integer(Reg {
49 kind: RegKind::Integer,
50 size: arg_layout.size,
51 });
52 }
53 (RegPassKind::Float(_), RegPassKind::Unknown) => {
54 *field2_kind = RegPassKind::Integer(Reg {
55 kind: RegKind::Integer,
56 size: arg_layout.size,
57 });
58 }
59 _ => return Err(CannotUseFpConv),
60 }
61 }
62 abi::F32 | abi::F64 => {
63 if arg_layout.size.bits() > flen {
64 return Err(CannotUseFpConv);
65 }
66 match (*field1_kind, *field2_kind) {
67 (RegPassKind::Unknown, _) => {
68 *field1_kind =
69 RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
70 }
71 (_, RegPassKind::Unknown) => {
72 *field2_kind =
73 RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
74 }
75 _ => return Err(CannotUseFpConv),
76 }
77 }
78 },
79 Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
80 Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
81 FieldsShape::Primitive => {
82 unreachable!("aggregates can't have `FieldsShape::Primitive`")
83 }
84 FieldsShape::Union(_) => {
85 if !arg_layout.is_zst() {
86 return Err(CannotUseFpConv);
87 }
88 }
89 FieldsShape::Array { count, .. } => {
90 for _ in 0..count {
91 let elem_layout = arg_layout.field(cx, 0);
92 should_use_fp_conv_helper(
93 cx,
94 &elem_layout,
95 xlen,
96 flen,
97 field1_kind,
98 field2_kind,
99 )?;
100 }
101 }
102 FieldsShape::Arbitrary { .. } => {
103 match arg_layout.variants {
104 abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
105 abi::Variants::Single { .. } => (),
106 }
107 for i in arg_layout.fields.index_by_increasing_offset() {
108 let field = arg_layout.field(cx, i);
109 should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
110 }
111 }
112 },
113 }
114 Ok(())
115}
116
117fn should_use_fp_conv<'a, Ty, C>(
118 cx: &C,
119 arg: &TyAndLayout<'a, Ty>,
120 xlen: u64,
121 flen: u64,
122) -> Option<FloatConv>
123where
124 Ty: TyAbiInterface<'a, C> + Copy,
125{
126 let mut field1_kind = RegPassKind::Unknown;
127 let mut field2_kind = RegPassKind::Unknown;
128 if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
129 return None;
130 }
131 match (field1_kind, field2_kind) {
132 (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
133 (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
134 (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
135 (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
136 _ => None,
137 }
138}
139
140fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
141where
142 Ty: TyAbiInterface<'a, C> + Copy,
143{
144 if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
145 match conv {
146 FloatConv::Float(f) => {
147 arg.cast_to(f);
148 }
149 FloatConv::FloatPair(l, r) => {
150 arg.cast_to(CastTarget::pair(l, r));
151 }
152 FloatConv::MixedPair(l, r) => {
153 arg.cast_to(CastTarget::pair(l, r));
154 }
155 }
156 return false;
157 }
158
159 let total = arg.layout.size;
160
161 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
162 // the argument list with the address."
163 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
164 // replaced in the argument list with the address, as are C++ aggregates
165 // with nontrivial copy constructors, destructors, or vtables."
166 if total.bits() > 2 * xlen {
167 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
168 if is_loongarch_aggregate(arg) {
169 arg.make_indirect();
170 }
171 return true;
172 }
173
174 let xlen_reg = match xlen {
175 32 => Reg::i32(),
176 64 => Reg::i64(),
177 _ => unreachable!("Unsupported XLEN: {}", xlen),
178 };
179 if is_loongarch_aggregate(arg) {
180 if total.bits() <= xlen {
181 arg.cast_to(xlen_reg);
182 } else {
183 arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
184 }
185 return false;
186 }
187
188 // "When passed in registers, scalars narrower than XLEN bits are widened
189 // according to the sign of their type up to 32 bits, then sign-extended to
190 // XLEN bits."
191 extend_integer_width(arg, xlen);
192 false
193}
194
195fn classify_arg<'a, Ty, C>(
196 cx: &C,
197 arg: &mut ArgAbi<'a, Ty>,
198 xlen: u64,
199 flen: u64,
200 is_vararg: bool,
201 avail_gprs: &mut u64,
202 avail_fprs: &mut u64,
203) where
204 Ty: TyAbiInterface<'a, C> + Copy,
205{
206 if !is_vararg {
207 match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
208 Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
209 *avail_fprs -= 1;
210 arg.cast_to(f);
211 return;
212 }
213 Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
214 *avail_fprs -= 2;
215 arg.cast_to(CastTarget::pair(l, r));
216 return;
217 }
218 Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
219 *avail_gprs -= 1;
220 *avail_fprs -= 1;
221 arg.cast_to(CastTarget::pair(l, r));
222 return;
223 }
224 _ => (),
225 }
226 }
227
228 let total = arg.layout.size;
229 let align = arg.layout.align.abi.bits();
230
231 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
232 // the argument list with the address."
233 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
234 // replaced in the argument list with the address, as are C++ aggregates
235 // with nontrivial copy constructors, destructors, or vtables."
236 if total.bits() > 2 * xlen {
237 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
238 if is_loongarch_aggregate(arg) {
239 arg.make_indirect();
240 }
241 if *avail_gprs >= 1 {
242 *avail_gprs -= 1;
243 }
244 return;
245 }
246
247 let double_xlen_reg = match xlen {
248 32 => Reg::i64(),
249 64 => Reg::i128(),
250 _ => unreachable!("Unsupported XLEN: {}", xlen),
251 };
252
253 let xlen_reg = match xlen {
254 32 => Reg::i32(),
255 64 => Reg::i64(),
256 _ => unreachable!("Unsupported XLEN: {}", xlen),
257 };
258
259 if total.bits() > xlen {
260 let align_regs = align > xlen;
261 if is_loongarch_aggregate(arg) {
262 arg.cast_to(Uniform {
263 unit: if align_regs { double_xlen_reg } else { xlen_reg },
264 total: Size::from_bits(xlen * 2),
265 });
266 }
267 if align_regs && is_vararg {
268 *avail_gprs -= *avail_gprs % 2;
269 }
270 if *avail_gprs >= 2 {
271 *avail_gprs -= 2;
272 } else {
273 *avail_gprs = 0;
274 }
275 return;
276 } else if is_loongarch_aggregate(arg) {
277 arg.cast_to(xlen_reg);
278 if *avail_gprs >= 1 {
279 *avail_gprs -= 1;
280 }
281 return;
282 }
283
284 // "When passed in registers, scalars narrower than XLEN bits are widened
285 // according to the sign of their type up to 32 bits, then sign-extended to
286 // XLEN bits."
287 if *avail_gprs >= 1 {
288 extend_integer_width(arg, xlen);
289 *avail_gprs -= 1;
290 }
291}
292
9c376795 293fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
487cf647
FG
294 if let Abi::Scalar(scalar) = arg.layout.abi {
295 if let abi::Int(i, _) = scalar.primitive() {
296 // 32-bit integers are always sign-extended
297 if i.size().bits() == 32 && xlen > 32 {
298 if let PassMode::Direct(ref mut attrs) = arg.mode {
299 attrs.ext(ArgExtension::Sext);
300 return;
301 }
302 }
303 }
304 }
305
306 arg.extend_integer_width_to(xlen);
307}
308
309pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
310where
311 Ty: TyAbiInterface<'a, C> + Copy,
312 C: HasDataLayout + HasTargetSpec,
313{
314 let xlen = cx.data_layout().pointer_size.bits();
315 let flen = match &cx.target_spec().llvm_abiname[..] {
316 "ilp32f" | "lp64f" => 32,
317 "ilp32d" | "lp64d" => 64,
318 _ => 0,
319 };
320
321 let mut avail_gprs = 8;
322 let mut avail_fprs = 8;
323
324 if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
325 avail_gprs -= 1;
326 }
327
328 for (i, arg) in fn_abi.args.iter_mut().enumerate() {
329 if arg.is_ignore() {
330 continue;
331 }
332 classify_arg(
333 cx,
334 arg,
335 xlen,
336 flen,
337 i >= fn_abi.fixed_count as usize,
338 &mut avail_gprs,
339 &mut avail_fprs,
340 );
341 }
342}