]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_target/src/abi/call/mod.rs
57011aa8a14748b7d45af3641c2b7e2f7697400b
[rustc.git] / compiler / rustc_target / src / abi / call / mod.rs
1 use crate::abi::{self, Abi, Align, FieldsShape, Size};
2 use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
3 use crate::spec::{self, HasTargetSpec};
4 use rustc_span::Symbol;
5 use std::fmt;
6 use std::str::FromStr;
7
8 mod aarch64;
9 mod amdgpu;
10 mod arm;
11 mod avr;
12 mod bpf;
13 mod hexagon;
14 mod loongarch;
15 mod m68k;
16 mod mips;
17 mod mips64;
18 mod msp430;
19 mod nvptx64;
20 mod powerpc;
21 mod powerpc64;
22 mod riscv;
23 mod s390x;
24 mod sparc;
25 mod sparc64;
26 mod wasm;
27 mod x86;
28 mod x86_64;
29 mod x86_win64;
30
31 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
32 pub enum PassMode {
33 /// Ignore the argument.
34 ///
35 /// The argument is either uninhabited or a ZST.
36 Ignore,
37 /// Pass the argument directly.
38 ///
39 /// The argument has a layout abi of `Scalar`, `Vector` or in rare cases `Aggregate`.
40 Direct(ArgAttributes),
41 /// Pass a pair's elements directly in two arguments.
42 ///
43 /// The argument has a layout abi of `ScalarPair`.
44 Pair(ArgAttributes, ArgAttributes),
45 /// Pass the argument after casting it, to either a single uniform or a
46 /// pair of registers. The bool indicates if a `Reg::i32()` dummy argument
47 /// is emitted before the real argument.
48 Cast(Box<CastTarget>, bool),
49 /// Pass the argument indirectly via a hidden pointer.
50 /// The `extra_attrs` value, if any, is for the extra data (vtable or length)
51 /// which indicates that it refers to an unsized rvalue.
52 /// `on_stack` defines that the value should be passed at a fixed
53 /// stack offset in accordance to the ABI rather than passed using a
54 /// pointer. This corresponds to the `byval` LLVM argument attribute.
55 Indirect { attrs: ArgAttributes, extra_attrs: Option<ArgAttributes>, on_stack: bool },
56 }
57
58 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
59 // of this module
60 pub use attr_impl::ArgAttribute;
61
62 #[allow(non_upper_case_globals)]
63 #[allow(unused)]
64 mod attr_impl {
65 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
66 bitflags::bitflags! {
67 #[derive(Default, HashStable_Generic)]
68 pub struct ArgAttribute: u16 {
69 const NoAlias = 1 << 1;
70 const NoCapture = 1 << 2;
71 const NonNull = 1 << 3;
72 const ReadOnly = 1 << 4;
73 const InReg = 1 << 5;
74 const NoUndef = 1 << 6;
75 }
76 }
77 }
78
79 /// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
80 /// defines if this extension should be zero-extension or sign-extension when necessary. When it is
81 /// not necessary to extend the argument, this enum is ignored.
82 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
83 pub enum ArgExtension {
84 None,
85 Zext,
86 Sext,
87 }
88
89 /// A compact representation of LLVM attributes (at least those relevant for this module)
90 /// that can be manipulated without interacting with LLVM's Attribute machinery.
91 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
92 pub struct ArgAttributes {
93 pub regular: ArgAttribute,
94 pub arg_ext: ArgExtension,
95 /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
96 /// (corresponding to LLVM's dereferenceable and dereferenceable_or_null attributes).
97 pub pointee_size: Size,
98 pub pointee_align: Option<Align>,
99 }
100
101 impl ArgAttributes {
102 pub fn new() -> Self {
103 ArgAttributes {
104 regular: ArgAttribute::default(),
105 arg_ext: ArgExtension::None,
106 pointee_size: Size::ZERO,
107 pointee_align: None,
108 }
109 }
110
111 pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
112 assert!(
113 self.arg_ext == ArgExtension::None || self.arg_ext == ext,
114 "cannot set {:?} when {:?} is already set",
115 ext,
116 self.arg_ext
117 );
118 self.arg_ext = ext;
119 self
120 }
121
122 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
123 self.regular |= attr;
124 self
125 }
126
127 pub fn contains(&self, attr: ArgAttribute) -> bool {
128 self.regular.contains(attr)
129 }
130 }
131
132 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
133 pub enum RegKind {
134 Integer,
135 Float,
136 Vector,
137 }
138
139 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
140 pub struct Reg {
141 pub kind: RegKind,
142 pub size: Size,
143 }
144
145 macro_rules! reg_ctor {
146 ($name:ident, $kind:ident, $bits:expr) => {
147 pub fn $name() -> Reg {
148 Reg { kind: RegKind::$kind, size: Size::from_bits($bits) }
149 }
150 };
151 }
152
153 impl Reg {
154 reg_ctor!(i8, Integer, 8);
155 reg_ctor!(i16, Integer, 16);
156 reg_ctor!(i32, Integer, 32);
157 reg_ctor!(i64, Integer, 64);
158 reg_ctor!(i128, Integer, 128);
159
160 reg_ctor!(f32, Float, 32);
161 reg_ctor!(f64, Float, 64);
162 }
163
164 impl Reg {
165 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
166 let dl = cx.data_layout();
167 match self.kind {
168 RegKind::Integer => match self.size.bits() {
169 1 => dl.i1_align.abi,
170 2..=8 => dl.i8_align.abi,
171 9..=16 => dl.i16_align.abi,
172 17..=32 => dl.i32_align.abi,
173 33..=64 => dl.i64_align.abi,
174 65..=128 => dl.i128_align.abi,
175 _ => panic!("unsupported integer: {self:?}"),
176 },
177 RegKind::Float => match self.size.bits() {
178 32 => dl.f32_align.abi,
179 64 => dl.f64_align.abi,
180 _ => panic!("unsupported float: {self:?}"),
181 },
182 RegKind::Vector => dl.vector_align(self.size).abi,
183 }
184 }
185 }
186
187 /// An argument passed entirely registers with the
188 /// same kind (e.g., HFA / HVA on PPC64 and AArch64).
189 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
190 pub struct Uniform {
191 pub unit: Reg,
192
193 /// The total size of the argument, which can be:
194 /// * equal to `unit.size` (one scalar/vector),
195 /// * a multiple of `unit.size` (an array of scalar/vectors),
196 /// * if `unit.kind` is `Integer`, the last element
197 /// can be shorter, i.e., `{ i64, i64, i32 }` for
198 /// 64-bit integers with a total size of 20 bytes.
199 pub total: Size,
200 }
201
202 impl From<Reg> for Uniform {
203 fn from(unit: Reg) -> Uniform {
204 Uniform { unit, total: unit.size }
205 }
206 }
207
208 impl Uniform {
209 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
210 self.unit.align(cx)
211 }
212 }
213
214 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
215 pub struct CastTarget {
216 pub prefix: [Option<Reg>; 8],
217 pub rest: Uniform,
218 pub attrs: ArgAttributes,
219 }
220
221 impl From<Reg> for CastTarget {
222 fn from(unit: Reg) -> CastTarget {
223 CastTarget::from(Uniform::from(unit))
224 }
225 }
226
227 impl From<Uniform> for CastTarget {
228 fn from(uniform: Uniform) -> CastTarget {
229 CastTarget {
230 prefix: [None; 8],
231 rest: uniform,
232 attrs: ArgAttributes {
233 regular: ArgAttribute::default(),
234 arg_ext: ArgExtension::None,
235 pointee_size: Size::ZERO,
236 pointee_align: None,
237 },
238 }
239 }
240 }
241
242 impl CastTarget {
243 pub fn pair(a: Reg, b: Reg) -> CastTarget {
244 CastTarget {
245 prefix: [Some(a), None, None, None, None, None, None, None],
246 rest: Uniform::from(b),
247 attrs: ArgAttributes {
248 regular: ArgAttribute::default(),
249 arg_ext: ArgExtension::None,
250 pointee_size: Size::ZERO,
251 pointee_align: None,
252 },
253 }
254 }
255
256 pub fn size<C: HasDataLayout>(&self, _cx: &C) -> Size {
257 let mut size = self.rest.total;
258 for i in 0..self.prefix.iter().count() {
259 match self.prefix[i] {
260 Some(v) => size += v.size,
261 None => {}
262 }
263 }
264 return size;
265 }
266
267 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
268 self.prefix
269 .iter()
270 .filter_map(|x| x.map(|reg| reg.align(cx)))
271 .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
272 acc.max(align)
273 })
274 }
275 }
276
277 /// Return value from the `homogeneous_aggregate` test function.
278 #[derive(Copy, Clone, Debug)]
279 pub enum HomogeneousAggregate {
280 /// Yes, all the "leaf fields" of this struct are passed in the
281 /// same way (specified in the `Reg` value).
282 Homogeneous(Reg),
283
284 /// There are no leaf fields at all.
285 NoData,
286 }
287
288 /// Error from the `homogeneous_aggregate` test function, indicating
289 /// there are distinct leaf fields passed in different ways,
290 /// or this is uninhabited.
291 #[derive(Copy, Clone, Debug)]
292 pub struct Heterogeneous;
293
294 impl HomogeneousAggregate {
295 /// If this is a homogeneous aggregate, returns the homogeneous
296 /// unit, else `None`.
297 pub fn unit(self) -> Option<Reg> {
298 match self {
299 HomogeneousAggregate::Homogeneous(reg) => Some(reg),
300 HomogeneousAggregate::NoData => None,
301 }
302 }
303
304 /// Try to combine two `HomogeneousAggregate`s, e.g. from two fields in
305 /// the same `struct`. Only succeeds if only one of them has any data,
306 /// or both units are identical.
307 fn merge(self, other: HomogeneousAggregate) -> Result<HomogeneousAggregate, Heterogeneous> {
308 match (self, other) {
309 (x, HomogeneousAggregate::NoData) | (HomogeneousAggregate::NoData, x) => Ok(x),
310
311 (HomogeneousAggregate::Homogeneous(a), HomogeneousAggregate::Homogeneous(b)) => {
312 if a != b {
313 return Err(Heterogeneous);
314 }
315 Ok(self)
316 }
317 }
318 }
319 }
320
321 impl<'a, Ty> TyAndLayout<'a, Ty> {
322 fn is_aggregate(&self) -> bool {
323 match self.abi {
324 Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
325 Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
326 }
327 }
328
329 /// Returns `Homogeneous` if this layout is an aggregate containing fields of
330 /// only a single type (e.g., `(u32, u32)`). Such aggregates are often
331 /// special-cased in ABIs.
332 ///
333 /// Note: We generally ignore fields of zero-sized type when computing
334 /// this value (see #56877).
335 ///
336 /// This is public so that it can be used in unit tests, but
337 /// should generally only be relevant to the ABI details of
338 /// specific targets.
339 pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, Heterogeneous>
340 where
341 Ty: TyAbiInterface<'a, C> + Copy,
342 {
343 match self.abi {
344 Abi::Uninhabited => Err(Heterogeneous),
345
346 // The primitive for this algorithm.
347 Abi::Scalar(scalar) => {
348 let kind = match scalar.primitive() {
349 abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
350 abi::F32 | abi::F64 => RegKind::Float,
351 };
352 Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
353 }
354
355 Abi::Vector { .. } => {
356 assert!(!self.is_zst());
357 Ok(HomogeneousAggregate::Homogeneous(Reg {
358 kind: RegKind::Vector,
359 size: self.size,
360 }))
361 }
362
363 Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
364 // Helper for computing `homogeneous_aggregate`, allowing a custom
365 // starting offset (used below for handling variants).
366 let from_fields_at =
367 |layout: Self,
368 start: Size|
369 -> Result<(HomogeneousAggregate, Size), Heterogeneous> {
370 let is_union = match layout.fields {
371 FieldsShape::Primitive => {
372 unreachable!("aggregates can't have `FieldsShape::Primitive`")
373 }
374 FieldsShape::Array { count, .. } => {
375 assert_eq!(start, Size::ZERO);
376
377 let result = if count > 0 {
378 layout.field(cx, 0).homogeneous_aggregate(cx)?
379 } else {
380 HomogeneousAggregate::NoData
381 };
382 return Ok((result, layout.size));
383 }
384 FieldsShape::Union(_) => true,
385 FieldsShape::Arbitrary { .. } => false,
386 };
387
388 let mut result = HomogeneousAggregate::NoData;
389 let mut total = start;
390
391 for i in 0..layout.fields.count() {
392 if !is_union && total != layout.fields.offset(i) {
393 return Err(Heterogeneous);
394 }
395
396 let field = layout.field(cx, i);
397
398 result = result.merge(field.homogeneous_aggregate(cx)?)?;
399
400 // Keep track of the offset (without padding).
401 let size = field.size;
402 if is_union {
403 total = total.max(size);
404 } else {
405 total += size;
406 }
407 }
408
409 Ok((result, total))
410 };
411
412 let (mut result, mut total) = from_fields_at(*self, Size::ZERO)?;
413
414 match &self.variants {
415 abi::Variants::Single { .. } => {}
416 abi::Variants::Multiple { variants, .. } => {
417 // Treat enum variants like union members.
418 // HACK(eddyb) pretend the `enum` field (discriminant)
419 // is at the start of every variant (otherwise the gap
420 // at the start of all variants would disqualify them).
421 //
422 // NB: for all tagged `enum`s (which include all non-C-like
423 // `enum`s with defined FFI representation), this will
424 // match the homogeneous computation on the equivalent
425 // `struct { tag; union { variant1; ... } }` and/or
426 // `union { struct { tag; variant1; } ... }`
427 // (the offsets of variant fields should be identical
428 // between the two for either to be a homogeneous aggregate).
429 let variant_start = total;
430 for variant_idx in variants.indices() {
431 let (variant_result, variant_total) =
432 from_fields_at(self.for_variant(cx, variant_idx), variant_start)?;
433
434 result = result.merge(variant_result)?;
435 total = total.max(variant_total);
436 }
437 }
438 }
439
440 // There needs to be no padding.
441 if total != self.size {
442 Err(Heterogeneous)
443 } else {
444 match result {
445 HomogeneousAggregate::Homogeneous(_) => {
446 assert_ne!(total, Size::ZERO);
447 }
448 HomogeneousAggregate::NoData => {
449 assert_eq!(total, Size::ZERO);
450 }
451 }
452 Ok(result)
453 }
454 }
455 }
456 }
457 }
458
459 /// Information about how to pass an argument to,
460 /// or return a value from, a function, under some ABI.
461 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
462 pub struct ArgAbi<'a, Ty> {
463 pub layout: TyAndLayout<'a, Ty>,
464 pub mode: PassMode,
465 }
466
467 impl<'a, Ty> ArgAbi<'a, Ty> {
468 pub fn new(
469 cx: &impl HasDataLayout,
470 layout: TyAndLayout<'a, Ty>,
471 scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
472 ) -> Self {
473 let mode = match layout.abi {
474 Abi::Uninhabited => PassMode::Ignore,
475 Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
476 Abi::ScalarPair(a, b) => PassMode::Pair(
477 scalar_attrs(&layout, a, Size::ZERO),
478 scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
479 ),
480 Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
481 Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
482 };
483 ArgAbi { layout, mode }
484 }
485
486 fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
487 let mut attrs = ArgAttributes::new();
488
489 // For non-immediate arguments the callee gets its own copy of
490 // the value on the stack, so there are no aliases. It's also
491 // program-invisible so can't possibly capture
492 attrs
493 .set(ArgAttribute::NoAlias)
494 .set(ArgAttribute::NoCapture)
495 .set(ArgAttribute::NonNull)
496 .set(ArgAttribute::NoUndef);
497 attrs.pointee_size = layout.size;
498 // FIXME(eddyb) We should be doing this, but at least on
499 // i686-pc-windows-msvc, it results in wrong stack offsets.
500 // attrs.pointee_align = Some(layout.align.abi);
501
502 let extra_attrs = layout.is_unsized().then_some(ArgAttributes::new());
503
504 PassMode::Indirect { attrs, extra_attrs, on_stack: false }
505 }
506
507 pub fn make_indirect(&mut self) {
508 match self.mode {
509 PassMode::Direct(_) | PassMode::Pair(_, _) => {}
510 PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: false } => return,
511 _ => panic!("Tried to make {:?} indirect", self.mode),
512 }
513
514 self.mode = Self::indirect_pass_mode(&self.layout);
515 }
516
517 pub fn make_indirect_byval(&mut self) {
518 self.make_indirect();
519 match self.mode {
520 PassMode::Indirect { attrs: _, extra_attrs: _, ref mut on_stack } => {
521 *on_stack = true;
522 }
523 _ => unreachable!(),
524 }
525 }
526
527 pub fn extend_integer_width_to(&mut self, bits: u64) {
528 // Only integers have signedness
529 if let Abi::Scalar(scalar) = self.layout.abi {
530 if let abi::Int(i, signed) = scalar.primitive() {
531 if i.size().bits() < bits {
532 if let PassMode::Direct(ref mut attrs) = self.mode {
533 if signed {
534 attrs.ext(ArgExtension::Sext)
535 } else {
536 attrs.ext(ArgExtension::Zext)
537 };
538 }
539 }
540 }
541 }
542 }
543
544 pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
545 self.mode = PassMode::Cast(Box::new(target.into()), false);
546 }
547
548 pub fn cast_to_and_pad_i32<T: Into<CastTarget>>(&mut self, target: T, pad_i32: bool) {
549 self.mode = PassMode::Cast(Box::new(target.into()), pad_i32);
550 }
551
552 pub fn is_indirect(&self) -> bool {
553 matches!(self.mode, PassMode::Indirect { .. })
554 }
555
556 pub fn is_sized_indirect(&self) -> bool {
557 matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ })
558 }
559
560 pub fn is_unsized_indirect(&self) -> bool {
561 matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ })
562 }
563
564 pub fn is_ignore(&self) -> bool {
565 matches!(self.mode, PassMode::Ignore)
566 }
567 }
568
569 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
570 pub enum Conv {
571 // General language calling conventions, for which every target
572 // should have its own backend (e.g. LLVM) support.
573 C,
574 Rust,
575
576 /// For things unlikely to be called, where smaller caller codegen is
577 /// preferred over raw speed.
578 /// Stronger than just `#[cold]` because `fn` pointers might be incompatible.
579 RustCold,
580
581 // Target-specific calling conventions.
582 ArmAapcs,
583 CCmseNonSecureCall,
584
585 Msp430Intr,
586
587 PtxKernel,
588
589 X86Fastcall,
590 X86Intr,
591 X86Stdcall,
592 X86ThisCall,
593 X86VectorCall,
594
595 X86_64SysV,
596 X86_64Win64,
597
598 AmdGpuKernel,
599 AvrInterrupt,
600 AvrNonBlockingInterrupt,
601 }
602
603 /// Metadata describing how the arguments to a native function
604 /// should be passed in order to respect the native ABI.
605 ///
606 /// I will do my best to describe this structure, but these
607 /// comments are reverse-engineered and may be inaccurate. -NDM
608 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
609 pub struct FnAbi<'a, Ty> {
610 /// The LLVM types of each argument.
611 pub args: Box<[ArgAbi<'a, Ty>]>,
612
613 /// LLVM return type.
614 pub ret: ArgAbi<'a, Ty>,
615
616 pub c_variadic: bool,
617
618 /// The count of non-variadic arguments.
619 ///
620 /// Should only be different from args.len() when c_variadic is true.
621 /// This can be used to know whether an argument is variadic or not.
622 pub fixed_count: u32,
623
624 pub conv: Conv,
625
626 pub can_unwind: bool,
627 }
628
629 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
630 #[derive(Copy, Clone, Debug, HashStable_Generic)]
631 pub enum AdjustForForeignAbiError {
632 /// Target architecture doesn't support "foreign" (i.e. non-Rust) ABIs.
633 Unsupported { arch: Symbol, abi: spec::abi::Abi },
634 }
635
636 impl fmt::Display for AdjustForForeignAbiError {
637 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
638 match self {
639 Self::Unsupported { arch, abi } => {
640 write!(f, "target architecture {arch:?} does not support `extern {abi}` ABI")
641 }
642 }
643 }
644 }
645
646 impl<'a, Ty> FnAbi<'a, Ty> {
647 pub fn adjust_for_foreign_abi<C>(
648 &mut self,
649 cx: &C,
650 abi: spec::abi::Abi,
651 ) -> Result<(), AdjustForForeignAbiError>
652 where
653 Ty: TyAbiInterface<'a, C> + Copy,
654 C: HasDataLayout + HasTargetSpec,
655 {
656 if abi == spec::abi::Abi::X86Interrupt {
657 if let Some(arg) = self.args.first_mut() {
658 arg.make_indirect_byval();
659 }
660 return Ok(());
661 }
662
663 match &cx.target_spec().arch[..] {
664 "x86" => {
665 let flavor = if let spec::abi::Abi::Fastcall { .. }
666 | spec::abi::Abi::Vectorcall { .. } = abi
667 {
668 x86::Flavor::FastcallOrVectorcall
669 } else {
670 x86::Flavor::General
671 };
672 x86::compute_abi_info(cx, self, flavor);
673 }
674 "x86_64" => match abi {
675 spec::abi::Abi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
676 spec::abi::Abi::Win64 { .. } => x86_win64::compute_abi_info(self),
677 _ => {
678 if cx.target_spec().is_like_windows {
679 x86_win64::compute_abi_info(self)
680 } else {
681 x86_64::compute_abi_info(cx, self)
682 }
683 }
684 },
685 "aarch64" => {
686 let param_policy = if cx.target_spec().is_like_osx {
687 aarch64::ParamExtension::ExtendTo32Bits
688 } else {
689 aarch64::ParamExtension::NoExtension
690 };
691 aarch64::compute_abi_info(cx, self, param_policy)
692 }
693 "amdgpu" => amdgpu::compute_abi_info(cx, self),
694 "arm" => arm::compute_abi_info(cx, self),
695 "avr" => avr::compute_abi_info(self),
696 "loongarch64" => loongarch::compute_abi_info(cx, self),
697 "m68k" => m68k::compute_abi_info(self),
698 "mips" => mips::compute_abi_info(cx, self),
699 "mips64" => mips64::compute_abi_info(cx, self),
700 "powerpc" => powerpc::compute_abi_info(self),
701 "powerpc64" => powerpc64::compute_abi_info(cx, self),
702 "s390x" => s390x::compute_abi_info(cx, self),
703 "msp430" => msp430::compute_abi_info(self),
704 "sparc" => sparc::compute_abi_info(cx, self),
705 "sparc64" => sparc64::compute_abi_info(cx, self),
706 "nvptx64" => {
707 if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::PtxKernel {
708 nvptx64::compute_ptx_kernel_abi_info(cx, self)
709 } else {
710 nvptx64::compute_abi_info(self)
711 }
712 }
713 "hexagon" => hexagon::compute_abi_info(self),
714 "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
715 "wasm32" | "wasm64" => {
716 if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::Wasm {
717 wasm::compute_wasm_abi_info(self)
718 } else {
719 wasm::compute_c_abi_info(cx, self)
720 }
721 }
722 "asmjs" => wasm::compute_c_abi_info(cx, self),
723 "bpf" => bpf::compute_abi_info(self),
724 arch => {
725 return Err(AdjustForForeignAbiError::Unsupported {
726 arch: Symbol::intern(arch),
727 abi,
728 });
729 }
730 }
731
732 Ok(())
733 }
734 }
735
736 impl FromStr for Conv {
737 type Err = String;
738
739 fn from_str(s: &str) -> Result<Self, Self::Err> {
740 match s {
741 "C" => Ok(Conv::C),
742 "Rust" => Ok(Conv::Rust),
743 "RustCold" => Ok(Conv::Rust),
744 "ArmAapcs" => Ok(Conv::ArmAapcs),
745 "CCmseNonSecureCall" => Ok(Conv::CCmseNonSecureCall),
746 "Msp430Intr" => Ok(Conv::Msp430Intr),
747 "PtxKernel" => Ok(Conv::PtxKernel),
748 "X86Fastcall" => Ok(Conv::X86Fastcall),
749 "X86Intr" => Ok(Conv::X86Intr),
750 "X86Stdcall" => Ok(Conv::X86Stdcall),
751 "X86ThisCall" => Ok(Conv::X86ThisCall),
752 "X86VectorCall" => Ok(Conv::X86VectorCall),
753 "X86_64SysV" => Ok(Conv::X86_64SysV),
754 "X86_64Win64" => Ok(Conv::X86_64Win64),
755 "AmdGpuKernel" => Ok(Conv::AmdGpuKernel),
756 "AvrInterrupt" => Ok(Conv::AvrInterrupt),
757 "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt),
758 _ => Err(format!("'{s}' is not a valid value for entry function call convention.")),
759 }
760 }
761 }
762
763 // Some types are used a lot. Make sure they don't unintentionally get bigger.
764 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
765 mod size_asserts {
766 use super::*;
767 use rustc_data_structures::static_assert_size;
768 // tidy-alphabetical-start
769 static_assert_size!(ArgAbi<'_, usize>, 56);
770 static_assert_size!(FnAbi<'_, usize>, 80);
771 // tidy-alphabetical-end
772 }