]> git.proxmox.com Git - rustc.git/blob - src/librustc_target/abi/call/mod.rs
New upstream version 1.46.0~beta.2+dfsg1
[rustc.git] / src / librustc_target / abi / call / mod.rs
1 use crate::abi::{self, Abi, Align, FieldsShape, Size};
2 use crate::abi::{HasDataLayout, LayoutOf, TyAndLayout, TyAndLayoutMethods};
3 use crate::spec::{self, HasTargetSpec};
4
5 mod aarch64;
6 mod amdgpu;
7 mod arm;
8 mod avr;
9 mod hexagon;
10 mod mips;
11 mod mips64;
12 mod msp430;
13 mod nvptx;
14 mod nvptx64;
15 mod powerpc;
16 mod powerpc64;
17 mod riscv;
18 mod s390x;
19 mod sparc;
20 mod sparc64;
21 mod wasm32;
22 mod wasm32_bindgen_compat;
23 mod x86;
24 mod x86_64;
25 mod x86_win64;
26
27 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
28 pub enum PassMode {
29 /// Ignore the argument.
30 Ignore,
31 /// Pass the argument directly.
32 Direct(ArgAttributes),
33 /// Pass a pair's elements directly in two arguments.
34 Pair(ArgAttributes, ArgAttributes),
35 /// Pass the argument after casting it, to either
36 /// a single uniform or a pair of registers.
37 Cast(CastTarget),
38 /// Pass the argument indirectly via a hidden pointer.
39 /// The second value, if any, is for the extra data (vtable or length)
40 /// which indicates that it refers to an unsized rvalue.
41 Indirect(ArgAttributes, Option<ArgAttributes>),
42 }
43
44 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
45 // of this module
46 pub use attr_impl::ArgAttribute;
47
48 #[allow(non_upper_case_globals)]
49 #[allow(unused)]
50 mod attr_impl {
51 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
52 bitflags::bitflags! {
53 #[derive(Default)]
54 pub struct ArgAttribute: u16 {
55 const ByVal = 1 << 0;
56 const NoAlias = 1 << 1;
57 const NoCapture = 1 << 2;
58 const NonNull = 1 << 3;
59 const ReadOnly = 1 << 4;
60 const SExt = 1 << 5;
61 const StructRet = 1 << 6;
62 const ZExt = 1 << 7;
63 const InReg = 1 << 8;
64 }
65 }
66 }
67
68 /// A compact representation of LLVM attributes (at least those relevant for this module)
69 /// that can be manipulated without interacting with LLVM's Attribute machinery.
70 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
71 pub struct ArgAttributes {
72 pub regular: ArgAttribute,
73 /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
74 /// (corresponding to LLVM's dereferenceable and dereferenceable_or_null attributes).
75 pub pointee_size: Size,
76 pub pointee_align: Option<Align>,
77 }
78
79 impl ArgAttributes {
80 pub fn new() -> Self {
81 ArgAttributes {
82 regular: ArgAttribute::default(),
83 pointee_size: Size::ZERO,
84 pointee_align: None,
85 }
86 }
87
88 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
89 self.regular |= attr;
90 self
91 }
92
93 pub fn contains(&self, attr: ArgAttribute) -> bool {
94 self.regular.contains(attr)
95 }
96 }
97
98 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
99 pub enum RegKind {
100 Integer,
101 Float,
102 Vector,
103 }
104
105 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
106 pub struct Reg {
107 pub kind: RegKind,
108 pub size: Size,
109 }
110
111 macro_rules! reg_ctor {
112 ($name:ident, $kind:ident, $bits:expr) => {
113 pub fn $name() -> Reg {
114 Reg { kind: RegKind::$kind, size: Size::from_bits($bits) }
115 }
116 };
117 }
118
119 impl Reg {
120 reg_ctor!(i8, Integer, 8);
121 reg_ctor!(i16, Integer, 16);
122 reg_ctor!(i32, Integer, 32);
123 reg_ctor!(i64, Integer, 64);
124 reg_ctor!(i128, Integer, 128);
125
126 reg_ctor!(f32, Float, 32);
127 reg_ctor!(f64, Float, 64);
128 }
129
130 impl Reg {
131 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
132 let dl = cx.data_layout();
133 match self.kind {
134 RegKind::Integer => match self.size.bits() {
135 1 => dl.i1_align.abi,
136 2..=8 => dl.i8_align.abi,
137 9..=16 => dl.i16_align.abi,
138 17..=32 => dl.i32_align.abi,
139 33..=64 => dl.i64_align.abi,
140 65..=128 => dl.i128_align.abi,
141 _ => panic!("unsupported integer: {:?}", self),
142 },
143 RegKind::Float => match self.size.bits() {
144 32 => dl.f32_align.abi,
145 64 => dl.f64_align.abi,
146 _ => panic!("unsupported float: {:?}", self),
147 },
148 RegKind::Vector => dl.vector_align(self.size).abi,
149 }
150 }
151 }
152
153 /// An argument passed entirely registers with the
154 /// same kind (e.g., HFA / HVA on PPC64 and AArch64).
155 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
156 pub struct Uniform {
157 pub unit: Reg,
158
159 /// The total size of the argument, which can be:
160 /// * equal to `unit.size` (one scalar/vector),
161 /// * a multiple of `unit.size` (an array of scalar/vectors),
162 /// * if `unit.kind` is `Integer`, the last element
163 /// can be shorter, i.e., `{ i64, i64, i32 }` for
164 /// 64-bit integers with a total size of 20 bytes.
165 pub total: Size,
166 }
167
168 impl From<Reg> for Uniform {
169 fn from(unit: Reg) -> Uniform {
170 Uniform { unit, total: unit.size }
171 }
172 }
173
174 impl Uniform {
175 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
176 self.unit.align(cx)
177 }
178 }
179
180 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
181 pub struct CastTarget {
182 pub prefix: [Option<RegKind>; 8],
183 pub prefix_chunk: Size,
184 pub rest: Uniform,
185 }
186
187 impl From<Reg> for CastTarget {
188 fn from(unit: Reg) -> CastTarget {
189 CastTarget::from(Uniform::from(unit))
190 }
191 }
192
193 impl From<Uniform> for CastTarget {
194 fn from(uniform: Uniform) -> CastTarget {
195 CastTarget { prefix: [None; 8], prefix_chunk: Size::ZERO, rest: uniform }
196 }
197 }
198
199 impl CastTarget {
200 pub fn pair(a: Reg, b: Reg) -> CastTarget {
201 CastTarget {
202 prefix: [Some(a.kind), None, None, None, None, None, None, None],
203 prefix_chunk: a.size,
204 rest: Uniform::from(b),
205 }
206 }
207
208 pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
209 (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
210 .align_to(self.rest.align(cx))
211 + self.rest.total
212 }
213
214 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
215 self.prefix
216 .iter()
217 .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx)))
218 .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
219 acc.max(align)
220 })
221 }
222 }
223
224 /// Return value from the `homogeneous_aggregate` test function.
225 #[derive(Copy, Clone, Debug)]
226 pub enum HomogeneousAggregate {
227 /// Yes, all the "leaf fields" of this struct are passed in the
228 /// same way (specified in the `Reg` value).
229 Homogeneous(Reg),
230
231 /// There are no leaf fields at all.
232 NoData,
233 }
234
235 /// Error from the `homogeneous_aggregate` test function, indicating
236 /// there are distinct leaf fields passed in different ways,
237 /// or this is uninhabited.
238 #[derive(Copy, Clone, Debug)]
239 pub struct Heterogeneous;
240
241 impl HomogeneousAggregate {
242 /// If this is a homogeneous aggregate, returns the homogeneous
243 /// unit, else `None`.
244 pub fn unit(self) -> Option<Reg> {
245 match self {
246 HomogeneousAggregate::Homogeneous(reg) => Some(reg),
247 HomogeneousAggregate::NoData => None,
248 }
249 }
250
251 /// Try to combine two `HomogeneousAggregate`s, e.g. from two fields in
252 /// the same `struct`. Only succeeds if only one of them has any data,
253 /// or both units are identical.
254 fn merge(self, other: HomogeneousAggregate) -> Result<HomogeneousAggregate, Heterogeneous> {
255 match (self, other) {
256 (x, HomogeneousAggregate::NoData) | (HomogeneousAggregate::NoData, x) => Ok(x),
257
258 (HomogeneousAggregate::Homogeneous(a), HomogeneousAggregate::Homogeneous(b)) => {
259 if a != b {
260 return Err(Heterogeneous);
261 }
262 Ok(self)
263 }
264 }
265 }
266 }
267
268 impl<'a, Ty> TyAndLayout<'a, Ty> {
269 fn is_aggregate(&self) -> bool {
270 match self.abi {
271 Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
272 Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
273 }
274 }
275
276 /// Returns `Homogeneous` if this layout is an aggregate containing fields of
277 /// only a single type (e.g., `(u32, u32)`). Such aggregates are often
278 /// special-cased in ABIs.
279 ///
280 /// Note: We generally ignore fields of zero-sized type when computing
281 /// this value (see #56877).
282 ///
283 /// This is public so that it can be used in unit tests, but
284 /// should generally only be relevant to the ABI details of
285 /// specific targets.
286 pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, Heterogeneous>
287 where
288 Ty: TyAndLayoutMethods<'a, C> + Copy,
289 C: LayoutOf<Ty = Ty, TyAndLayout = Self>,
290 {
291 match self.abi {
292 Abi::Uninhabited => Err(Heterogeneous),
293
294 // The primitive for this algorithm.
295 Abi::Scalar(ref scalar) => {
296 let kind = match scalar.value {
297 abi::Int(..) | abi::Pointer => RegKind::Integer,
298 abi::F32 | abi::F64 => RegKind::Float,
299 };
300 Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
301 }
302
303 Abi::Vector { .. } => {
304 assert!(!self.is_zst());
305 Ok(HomogeneousAggregate::Homogeneous(Reg {
306 kind: RegKind::Vector,
307 size: self.size,
308 }))
309 }
310
311 Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
312 // Helper for computing `homogeneous_aggregate`, allowing a custom
313 // starting offset (used below for handling variants).
314 let from_fields_at =
315 |layout: Self,
316 start: Size|
317 -> Result<(HomogeneousAggregate, Size), Heterogeneous> {
318 let is_union = match layout.fields {
319 FieldsShape::Primitive => {
320 unreachable!("aggregates can't have `FieldsShape::Primitive`")
321 }
322 FieldsShape::Array { count, .. } => {
323 assert_eq!(start, Size::ZERO);
324
325 let result = if count > 0 {
326 layout.field(cx, 0).homogeneous_aggregate(cx)?
327 } else {
328 HomogeneousAggregate::NoData
329 };
330 return Ok((result, layout.size));
331 }
332 FieldsShape::Union(_) => true,
333 FieldsShape::Arbitrary { .. } => false,
334 };
335
336 let mut result = HomogeneousAggregate::NoData;
337 let mut total = start;
338
339 for i in 0..layout.fields.count() {
340 if !is_union && total != layout.fields.offset(i) {
341 return Err(Heterogeneous);
342 }
343
344 let field = layout.field(cx, i);
345
346 result = result.merge(field.homogeneous_aggregate(cx)?)?;
347
348 // Keep track of the offset (without padding).
349 let size = field.size;
350 if is_union {
351 total = total.max(size);
352 } else {
353 total += size;
354 }
355 }
356
357 Ok((result, total))
358 };
359
360 let (mut result, mut total) = from_fields_at(*self, Size::ZERO)?;
361
362 match &self.variants {
363 abi::Variants::Single { .. } => {}
364 abi::Variants::Multiple { variants, .. } => {
365 // Treat enum variants like union members.
366 // HACK(eddyb) pretend the `enum` field (discriminant)
367 // is at the start of every variant (otherwise the gap
368 // at the start of all variants would disqualify them).
369 //
370 // NB: for all tagged `enum`s (which include all non-C-like
371 // `enum`s with defined FFI representation), this will
372 // match the homogeneous computation on the equivalent
373 // `struct { tag; union { variant1; ... } }` and/or
374 // `union { struct { tag; variant1; } ... }`
375 // (the offsets of variant fields should be identical
376 // between the two for either to be a homogeneous aggregate).
377 let variant_start = total;
378 for variant_idx in variants.indices() {
379 let (variant_result, variant_total) =
380 from_fields_at(self.for_variant(cx, variant_idx), variant_start)?;
381
382 result = result.merge(variant_result)?;
383 total = total.max(variant_total);
384 }
385 }
386 }
387
388 // There needs to be no padding.
389 if total != self.size {
390 Err(Heterogeneous)
391 } else {
392 match result {
393 HomogeneousAggregate::Homogeneous(_) => {
394 assert_ne!(total, Size::ZERO);
395 }
396 HomogeneousAggregate::NoData => {
397 assert_eq!(total, Size::ZERO);
398 }
399 }
400 Ok(result)
401 }
402 }
403 }
404 }
405 }
406
407 /// Information about how to pass an argument to,
408 /// or return a value from, a function, under some ABI.
409 #[derive(Debug)]
410 pub struct ArgAbi<'a, Ty> {
411 pub layout: TyAndLayout<'a, Ty>,
412
413 /// Dummy argument, which is emitted before the real argument.
414 pub pad: Option<Reg>,
415
416 pub mode: PassMode,
417 }
418
419 impl<'a, Ty> ArgAbi<'a, Ty> {
420 pub fn new(layout: TyAndLayout<'a, Ty>) -> Self {
421 ArgAbi { layout, pad: None, mode: PassMode::Direct(ArgAttributes::new()) }
422 }
423
424 pub fn make_indirect(&mut self) {
425 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
426
427 // Start with fresh attributes for the pointer.
428 let mut attrs = ArgAttributes::new();
429
430 // For non-immediate arguments the callee gets its own copy of
431 // the value on the stack, so there are no aliases. It's also
432 // program-invisible so can't possibly capture
433 attrs.set(ArgAttribute::NoAlias).set(ArgAttribute::NoCapture).set(ArgAttribute::NonNull);
434 attrs.pointee_size = self.layout.size;
435 // FIXME(eddyb) We should be doing this, but at least on
436 // i686-pc-windows-msvc, it results in wrong stack offsets.
437 // attrs.pointee_align = Some(self.layout.align.abi);
438
439 let extra_attrs = self.layout.is_unsized().then_some(ArgAttributes::new());
440
441 self.mode = PassMode::Indirect(attrs, extra_attrs);
442 }
443
444 pub fn make_indirect_byval(&mut self) {
445 self.make_indirect();
446 match self.mode {
447 PassMode::Indirect(ref mut attrs, _) => {
448 attrs.set(ArgAttribute::ByVal);
449 }
450 _ => unreachable!(),
451 }
452 }
453
454 pub fn extend_integer_width_to(&mut self, bits: u64) {
455 // Only integers have signedness
456 if let Abi::Scalar(ref scalar) = self.layout.abi {
457 if let abi::Int(i, signed) = scalar.value {
458 if i.size().bits() < bits {
459 if let PassMode::Direct(ref mut attrs) = self.mode {
460 attrs.set(if signed { ArgAttribute::SExt } else { ArgAttribute::ZExt });
461 }
462 }
463 }
464 }
465 }
466
467 pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
468 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
469 self.mode = PassMode::Cast(target.into());
470 }
471
472 pub fn pad_with(&mut self, reg: Reg) {
473 self.pad = Some(reg);
474 }
475
476 pub fn is_indirect(&self) -> bool {
477 match self.mode {
478 PassMode::Indirect(..) => true,
479 _ => false,
480 }
481 }
482
483 pub fn is_sized_indirect(&self) -> bool {
484 match self.mode {
485 PassMode::Indirect(_, None) => true,
486 _ => false,
487 }
488 }
489
490 pub fn is_unsized_indirect(&self) -> bool {
491 match self.mode {
492 PassMode::Indirect(_, Some(_)) => true,
493 _ => false,
494 }
495 }
496
497 pub fn is_ignore(&self) -> bool {
498 match self.mode {
499 PassMode::Ignore => true,
500 _ => false,
501 }
502 }
503 }
504
505 #[derive(Copy, Clone, PartialEq, Debug)]
506 pub enum Conv {
507 // General language calling conventions, for which every target
508 // should have its own backend (e.g. LLVM) support.
509 C,
510 Rust,
511
512 // Target-specific calling conventions.
513 ArmAapcs,
514
515 Msp430Intr,
516
517 PtxKernel,
518
519 X86Fastcall,
520 X86Intr,
521 X86Stdcall,
522 X86ThisCall,
523 X86VectorCall,
524
525 X86_64SysV,
526 X86_64Win64,
527
528 AmdGpuKernel,
529 AvrInterrupt,
530 AvrNonBlockingInterrupt,
531 }
532
533 /// Metadata describing how the arguments to a native function
534 /// should be passed in order to respect the native ABI.
535 ///
536 /// I will do my best to describe this structure, but these
537 /// comments are reverse-engineered and may be inaccurate. -NDM
538 #[derive(Debug)]
539 pub struct FnAbi<'a, Ty> {
540 /// The LLVM types of each argument.
541 pub args: Vec<ArgAbi<'a, Ty>>,
542
543 /// LLVM return type.
544 pub ret: ArgAbi<'a, Ty>,
545
546 pub c_variadic: bool,
547
548 /// The count of non-variadic arguments.
549 ///
550 /// Should only be different from args.len() when c_variadic is true.
551 /// This can be used to know whether an argument is variadic or not.
552 pub fixed_count: usize,
553
554 pub conv: Conv,
555
556 pub can_unwind: bool,
557 }
558
559 impl<'a, Ty> FnAbi<'a, Ty> {
560 pub fn adjust_for_cabi<C>(&mut self, cx: &C, abi: spec::abi::Abi) -> Result<(), String>
561 where
562 Ty: TyAndLayoutMethods<'a, C> + Copy,
563 C: LayoutOf<Ty = Ty, TyAndLayout = TyAndLayout<'a, Ty>> + HasDataLayout + HasTargetSpec,
564 {
565 match &cx.target_spec().arch[..] {
566 "x86" => {
567 let flavor = if abi == spec::abi::Abi::Fastcall {
568 x86::Flavor::Fastcall
569 } else {
570 x86::Flavor::General
571 };
572 x86::compute_abi_info(cx, self, flavor);
573 }
574 "x86_64" => {
575 if abi == spec::abi::Abi::SysV64 {
576 x86_64::compute_abi_info(cx, self);
577 } else if abi == spec::abi::Abi::Win64 || cx.target_spec().options.is_like_windows {
578 x86_win64::compute_abi_info(self);
579 } else {
580 x86_64::compute_abi_info(cx, self);
581 }
582 }
583 "aarch64" => aarch64::compute_abi_info(cx, self),
584 "amdgpu" => amdgpu::compute_abi_info(cx, self),
585 "arm" => arm::compute_abi_info(cx, self),
586 "avr" => avr::compute_abi_info(self),
587 "mips" => mips::compute_abi_info(cx, self),
588 "mips64" => mips64::compute_abi_info(cx, self),
589 "powerpc" => powerpc::compute_abi_info(self),
590 "powerpc64" => powerpc64::compute_abi_info(cx, self),
591 "s390x" => s390x::compute_abi_info(cx, self),
592 "msp430" => msp430::compute_abi_info(self),
593 "sparc" => sparc::compute_abi_info(cx, self),
594 "sparc64" => sparc64::compute_abi_info(cx, self),
595 "nvptx" => nvptx::compute_abi_info(self),
596 "nvptx64" => nvptx64::compute_abi_info(self),
597 "hexagon" => hexagon::compute_abi_info(self),
598 "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
599 "wasm32" if cx.target_spec().target_os != "emscripten" => {
600 wasm32_bindgen_compat::compute_abi_info(self)
601 }
602 "wasm32" | "asmjs" => wasm32::compute_abi_info(cx, self),
603 a => return Err(format!("unrecognized arch \"{}\" in target specification", a)),
604 }
605
606 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
607 attrs.set(ArgAttribute::StructRet);
608 }
609
610 Ok(())
611 }
612 }