]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_target/src/abi/call/loongarch.rs
New upstream version 1.74.1+dfsg1
[rustc.git] / compiler / rustc_target / src / abi / call / loongarch.rs
CommitLineData
487cf647
FG
1use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
2use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
3use crate::spec::HasTargetSpec;
4
5#[derive(Copy, Clone)]
6enum RegPassKind {
7 Float(Reg),
8 Integer(Reg),
9 Unknown,
10}
11
12#[derive(Copy, Clone)]
13enum FloatConv {
14 FloatPair(Reg, Reg),
15 Float(Reg),
16 MixedPair(Reg, Reg),
17}
18
19#[derive(Copy, Clone)]
20struct CannotUseFpConv;
21
9c376795 22fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
487cf647
FG
23 match arg.layout.abi {
24 Abi::Vector { .. } => true,
25 _ => arg.layout.is_aggregate(),
26 }
27}
28
29fn should_use_fp_conv_helper<'a, Ty, C>(
30 cx: &C,
31 arg_layout: &TyAndLayout<'a, Ty>,
32 xlen: u64,
33 flen: u64,
34 field1_kind: &mut RegPassKind,
35 field2_kind: &mut RegPassKind,
36) -> Result<(), CannotUseFpConv>
37where
38 Ty: TyAbiInterface<'a, C> + Copy,
39{
40 match arg_layout.abi {
41 Abi::Scalar(scalar) => match scalar.primitive() {
9ffffee4 42 abi::Int(..) | abi::Pointer(_) => {
487cf647
FG
43 if arg_layout.size.bits() > xlen {
44 return Err(CannotUseFpConv);
45 }
46 match (*field1_kind, *field2_kind) {
47 (RegPassKind::Unknown, _) => {
48 *field1_kind = RegPassKind::Integer(Reg {
49 kind: RegKind::Integer,
50 size: arg_layout.size,
51 });
52 }
53 (RegPassKind::Float(_), RegPassKind::Unknown) => {
54 *field2_kind = RegPassKind::Integer(Reg {
55 kind: RegKind::Integer,
56 size: arg_layout.size,
57 });
58 }
59 _ => return Err(CannotUseFpConv),
60 }
61 }
62 abi::F32 | abi::F64 => {
63 if arg_layout.size.bits() > flen {
64 return Err(CannotUseFpConv);
65 }
66 match (*field1_kind, *field2_kind) {
67 (RegPassKind::Unknown, _) => {
68 *field1_kind =
69 RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
70 }
71 (_, RegPassKind::Unknown) => {
72 *field2_kind =
73 RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
74 }
75 _ => return Err(CannotUseFpConv),
76 }
77 }
78 },
79 Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
80 Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
81 FieldsShape::Primitive => {
82 unreachable!("aggregates can't have `FieldsShape::Primitive`")
83 }
84 FieldsShape::Union(_) => {
85 if !arg_layout.is_zst() {
781aab86
FG
86 if arg_layout.is_transparent() {
87 let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1;
88 return should_use_fp_conv_helper(
89 cx,
90 &non_1zst_elem,
91 xlen,
92 flen,
93 field1_kind,
94 field2_kind,
95 );
96 }
487cf647
FG
97 return Err(CannotUseFpConv);
98 }
99 }
100 FieldsShape::Array { count, .. } => {
101 for _ in 0..count {
102 let elem_layout = arg_layout.field(cx, 0);
103 should_use_fp_conv_helper(
104 cx,
105 &elem_layout,
106 xlen,
107 flen,
108 field1_kind,
109 field2_kind,
110 )?;
111 }
112 }
113 FieldsShape::Arbitrary { .. } => {
114 match arg_layout.variants {
115 abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
116 abi::Variants::Single { .. } => (),
117 }
118 for i in arg_layout.fields.index_by_increasing_offset() {
119 let field = arg_layout.field(cx, i);
120 should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
121 }
122 }
123 },
124 }
125 Ok(())
126}
127
128fn should_use_fp_conv<'a, Ty, C>(
129 cx: &C,
130 arg: &TyAndLayout<'a, Ty>,
131 xlen: u64,
132 flen: u64,
133) -> Option<FloatConv>
134where
135 Ty: TyAbiInterface<'a, C> + Copy,
136{
137 let mut field1_kind = RegPassKind::Unknown;
138 let mut field2_kind = RegPassKind::Unknown;
139 if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
140 return None;
141 }
142 match (field1_kind, field2_kind) {
143 (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
144 (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
145 (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
146 (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
147 _ => None,
148 }
149}
150
151fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
152where
153 Ty: TyAbiInterface<'a, C> + Copy,
154{
155 if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
156 match conv {
157 FloatConv::Float(f) => {
158 arg.cast_to(f);
159 }
160 FloatConv::FloatPair(l, r) => {
161 arg.cast_to(CastTarget::pair(l, r));
162 }
163 FloatConv::MixedPair(l, r) => {
164 arg.cast_to(CastTarget::pair(l, r));
165 }
166 }
167 return false;
168 }
169
170 let total = arg.layout.size;
171
172 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
173 // the argument list with the address."
174 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
175 // replaced in the argument list with the address, as are C++ aggregates
176 // with nontrivial copy constructors, destructors, or vtables."
177 if total.bits() > 2 * xlen {
178 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
179 if is_loongarch_aggregate(arg) {
180 arg.make_indirect();
181 }
182 return true;
183 }
184
185 let xlen_reg = match xlen {
186 32 => Reg::i32(),
187 64 => Reg::i64(),
188 _ => unreachable!("Unsupported XLEN: {}", xlen),
189 };
190 if is_loongarch_aggregate(arg) {
191 if total.bits() <= xlen {
192 arg.cast_to(xlen_reg);
193 } else {
194 arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
195 }
196 return false;
197 }
198
199 // "When passed in registers, scalars narrower than XLEN bits are widened
200 // according to the sign of their type up to 32 bits, then sign-extended to
201 // XLEN bits."
202 extend_integer_width(arg, xlen);
203 false
204}
205
206fn classify_arg<'a, Ty, C>(
207 cx: &C,
208 arg: &mut ArgAbi<'a, Ty>,
209 xlen: u64,
210 flen: u64,
211 is_vararg: bool,
212 avail_gprs: &mut u64,
213 avail_fprs: &mut u64,
214) where
215 Ty: TyAbiInterface<'a, C> + Copy,
216{
217 if !is_vararg {
218 match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
219 Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
220 *avail_fprs -= 1;
221 arg.cast_to(f);
222 return;
223 }
224 Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
225 *avail_fprs -= 2;
226 arg.cast_to(CastTarget::pair(l, r));
227 return;
228 }
229 Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
230 *avail_gprs -= 1;
231 *avail_fprs -= 1;
232 arg.cast_to(CastTarget::pair(l, r));
233 return;
234 }
235 _ => (),
236 }
237 }
238
239 let total = arg.layout.size;
240 let align = arg.layout.align.abi.bits();
241
242 // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
243 // the argument list with the address."
244 // "Aggregates larger than 2✕XLEN bits are passed by reference and are
245 // replaced in the argument list with the address, as are C++ aggregates
246 // with nontrivial copy constructors, destructors, or vtables."
247 if total.bits() > 2 * xlen {
248 // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
249 if is_loongarch_aggregate(arg) {
250 arg.make_indirect();
251 }
252 if *avail_gprs >= 1 {
253 *avail_gprs -= 1;
254 }
255 return;
256 }
257
258 let double_xlen_reg = match xlen {
259 32 => Reg::i64(),
260 64 => Reg::i128(),
261 _ => unreachable!("Unsupported XLEN: {}", xlen),
262 };
263
264 let xlen_reg = match xlen {
265 32 => Reg::i32(),
266 64 => Reg::i64(),
267 _ => unreachable!("Unsupported XLEN: {}", xlen),
268 };
269
270 if total.bits() > xlen {
271 let align_regs = align > xlen;
272 if is_loongarch_aggregate(arg) {
273 arg.cast_to(Uniform {
274 unit: if align_regs { double_xlen_reg } else { xlen_reg },
275 total: Size::from_bits(xlen * 2),
276 });
277 }
278 if align_regs && is_vararg {
279 *avail_gprs -= *avail_gprs % 2;
280 }
281 if *avail_gprs >= 2 {
282 *avail_gprs -= 2;
283 } else {
284 *avail_gprs = 0;
285 }
286 return;
287 } else if is_loongarch_aggregate(arg) {
288 arg.cast_to(xlen_reg);
289 if *avail_gprs >= 1 {
290 *avail_gprs -= 1;
291 }
292 return;
293 }
294
295 // "When passed in registers, scalars narrower than XLEN bits are widened
296 // according to the sign of their type up to 32 bits, then sign-extended to
297 // XLEN bits."
298 if *avail_gprs >= 1 {
299 extend_integer_width(arg, xlen);
300 *avail_gprs -= 1;
301 }
302}
303
9c376795 304fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
487cf647
FG
305 if let Abi::Scalar(scalar) = arg.layout.abi {
306 if let abi::Int(i, _) = scalar.primitive() {
307 // 32-bit integers are always sign-extended
308 if i.size().bits() == 32 && xlen > 32 {
309 if let PassMode::Direct(ref mut attrs) = arg.mode {
310 attrs.ext(ArgExtension::Sext);
311 return;
312 }
313 }
314 }
315 }
316
317 arg.extend_integer_width_to(xlen);
318}
319
320pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
321where
322 Ty: TyAbiInterface<'a, C> + Copy,
323 C: HasDataLayout + HasTargetSpec,
324{
325 let xlen = cx.data_layout().pointer_size.bits();
326 let flen = match &cx.target_spec().llvm_abiname[..] {
327 "ilp32f" | "lp64f" => 32,
328 "ilp32d" | "lp64d" => 64,
329 _ => 0,
330 };
331
332 let mut avail_gprs = 8;
333 let mut avail_fprs = 8;
334
335 if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
336 avail_gprs -= 1;
337 }
338
339 for (i, arg) in fn_abi.args.iter_mut().enumerate() {
340 if arg.is_ignore() {
341 continue;
342 }
343 classify_arg(
344 cx,
345 arg,
346 xlen,
347 flen,
348 i >= fn_abi.fixed_count as usize,
349 &mut avail_gprs,
350 &mut avail_fprs,
351 );
352 }
353}