]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/abi.rs
New upstream version 1.18.0+dfsg1
[rustc.git] / src / librustc_trans / abi.rs
1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, ValueRef, AttributePlace};
12 use base;
13 use builder::Builder;
14 use common::{type_is_fat_ptr, C_uint};
15 use context::CrateContext;
16 use cabi_x86;
17 use cabi_x86_64;
18 use cabi_x86_win64;
19 use cabi_arm;
20 use cabi_aarch64;
21 use cabi_powerpc;
22 use cabi_powerpc64;
23 use cabi_s390x;
24 use cabi_mips;
25 use cabi_mips64;
26 use cabi_asmjs;
27 use cabi_msp430;
28 use cabi_sparc;
29 use cabi_sparc64;
30 use cabi_nvptx;
31 use cabi_nvptx64;
32 use machine::llalign_of_min;
33 use type_::Type;
34 use type_of;
35
36 use rustc::hir;
37 use rustc::ty::{self, Ty};
38 use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size};
39
40 use libc::c_uint;
41 use std::cmp;
42 use std::iter;
43
44 pub use syntax::abi::Abi;
45 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
46
47 #[derive(Clone, Copy, PartialEq, Debug)]
48 enum ArgKind {
49 /// Pass the argument directly using the normal converted
50 /// LLVM type or by coercing to another specified type
51 Direct,
52 /// Pass the argument indirectly via a hidden pointer
53 Indirect,
54 /// Ignore the argument (useful for empty struct)
55 Ignore,
56 }
57
58 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
59 // of this module
60 pub use self::attr_impl::ArgAttribute;
61
62 #[allow(non_upper_case_globals)]
63 #[allow(unused)]
64 mod attr_impl {
65 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
66 bitflags! {
67 #[derive(Default, Debug)]
68 flags ArgAttribute : u16 {
69 const ByVal = 1 << 0,
70 const NoAlias = 1 << 1,
71 const NoCapture = 1 << 2,
72 const NonNull = 1 << 3,
73 const ReadOnly = 1 << 4,
74 const SExt = 1 << 5,
75 const StructRet = 1 << 6,
76 const ZExt = 1 << 7,
77 const InReg = 1 << 8,
78 }
79 }
80 }
81
82 macro_rules! for_each_kind {
83 ($flags: ident, $f: ident, $($kind: ident),+) => ({
84 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
85 })
86 }
87
88 impl ArgAttribute {
89 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
90 for_each_kind!(self, f,
91 ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
92 }
93 }
94
95 /// A compact representation of LLVM attributes (at least those relevant for this module)
96 /// that can be manipulated without interacting with LLVM's Attribute machinery.
97 #[derive(Copy, Clone, Debug, Default)]
98 pub struct ArgAttributes {
99 regular: ArgAttribute,
100 dereferenceable_bytes: u64,
101 }
102
103 impl ArgAttributes {
104 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
105 self.regular = self.regular | attr;
106 self
107 }
108
109 pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self {
110 self.dereferenceable_bytes = bytes;
111 self
112 }
113
114 pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
115 unsafe {
116 self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
117 if self.dereferenceable_bytes != 0 {
118 llvm::LLVMRustAddDereferenceableAttr(llfn,
119 idx.as_uint(),
120 self.dereferenceable_bytes);
121 }
122 }
123 }
124
125 pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
126 unsafe {
127 self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
128 if self.dereferenceable_bytes != 0 {
129 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
130 idx.as_uint(),
131 self.dereferenceable_bytes);
132 }
133 }
134 }
135 }
136 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
137 pub enum RegKind {
138 Integer,
139 Float,
140 Vector
141 }
142
143 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
144 pub struct Reg {
145 pub kind: RegKind,
146 pub size: Size,
147 }
148
149 macro_rules! reg_ctor {
150 ($name:ident, $kind:ident, $bits:expr) => {
151 pub fn $name() -> Reg {
152 Reg {
153 kind: RegKind::$kind,
154 size: Size::from_bits($bits)
155 }
156 }
157 }
158 }
159
160 impl Reg {
161 reg_ctor!(i8, Integer, 8);
162 reg_ctor!(i16, Integer, 16);
163 reg_ctor!(i32, Integer, 32);
164 reg_ctor!(i64, Integer, 64);
165
166 reg_ctor!(f32, Float, 32);
167 reg_ctor!(f64, Float, 64);
168 }
169
170 impl Reg {
171 fn llvm_type(&self, ccx: &CrateContext) -> Type {
172 match self.kind {
173 RegKind::Integer => Type::ix(ccx, self.size.bits()),
174 RegKind::Float => {
175 match self.size.bits() {
176 32 => Type::f32(ccx),
177 64 => Type::f64(ccx),
178 _ => bug!("unsupported float: {:?}", self)
179 }
180 }
181 RegKind::Vector => {
182 Type::vector(&Type::i8(ccx), self.size.bytes())
183 }
184 }
185 }
186 }
187
188 /// An argument passed entirely registers with the
189 /// same kind (e.g. HFA / HVA on PPC64 and AArch64).
190 #[derive(Copy, Clone)]
191 pub struct Uniform {
192 pub unit: Reg,
193
194 /// The total size of the argument, which can be:
195 /// * equal to `unit.size` (one scalar/vector)
196 /// * a multiple of `unit.size` (an array of scalar/vectors)
197 /// * if `unit.kind` is `Integer`, the last element
198 /// can be shorter, i.e. `{ i64, i64, i32 }` for
199 /// 64-bit integers with a total size of 20 bytes
200 pub total: Size,
201 }
202
203 impl From<Reg> for Uniform {
204 fn from(unit: Reg) -> Uniform {
205 Uniform {
206 unit,
207 total: unit.size
208 }
209 }
210 }
211
212 impl Uniform {
213 fn llvm_type(&self, ccx: &CrateContext) -> Type {
214 let llunit = self.unit.llvm_type(ccx);
215
216 if self.total <= self.unit.size {
217 return llunit;
218 }
219
220 let count = self.total.bytes() / self.unit.size.bytes();
221 let rem_bytes = self.total.bytes() % self.unit.size.bytes();
222
223 if rem_bytes == 0 {
224 return Type::array(&llunit, count);
225 }
226
227 // Only integers can be really split further.
228 assert_eq!(self.unit.kind, RegKind::Integer);
229
230 let args: Vec<_> = (0..count).map(|_| llunit)
231 .chain(iter::once(Type::ix(ccx, rem_bytes * 8)))
232 .collect();
233
234 Type::struct_(ccx, &args, false)
235 }
236 }
237
238 pub trait LayoutExt<'tcx> {
239 fn is_aggregate(&self) -> bool;
240 fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
241 }
242
243 impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
244 fn is_aggregate(&self) -> bool {
245 match *self.layout {
246 Layout::Scalar { .. } |
247 Layout::RawNullablePointer { .. } |
248 Layout::CEnum { .. } |
249 Layout::Vector { .. } => false,
250
251 Layout::Array { .. } |
252 Layout::FatPointer { .. } |
253 Layout::Univariant { .. } |
254 Layout::UntaggedUnion { .. } |
255 Layout::General { .. } |
256 Layout::StructWrappedNullablePointer { .. } => true
257 }
258 }
259
260 fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
261 match *self.layout {
262 // The primitives for this algorithm.
263 Layout::Scalar { value, .. } |
264 Layout::RawNullablePointer { value, .. } => {
265 let kind = match value {
266 layout::Int(_) |
267 layout::Pointer => RegKind::Integer,
268 layout::F32 |
269 layout::F64 => RegKind::Float
270 };
271 Some(Reg {
272 kind,
273 size: self.size(ccx)
274 })
275 }
276
277 Layout::CEnum { .. } => {
278 Some(Reg {
279 kind: RegKind::Integer,
280 size: self.size(ccx)
281 })
282 }
283
284 Layout::Vector { .. } => {
285 Some(Reg {
286 kind: RegKind::Vector,
287 size: self.size(ccx)
288 })
289 }
290
291 Layout::Array { count, .. } => {
292 if count > 0 {
293 self.field(ccx, 0).homogenous_aggregate(ccx)
294 } else {
295 None
296 }
297 }
298
299 Layout::Univariant { ref variant, .. } => {
300 let mut unaligned_offset = Size::from_bytes(0);
301 let mut result = None;
302
303 for i in 0..self.field_count() {
304 if unaligned_offset != variant.offsets[i] {
305 return None;
306 }
307
308 let field = self.field(ccx, i);
309 match (result, field.homogenous_aggregate(ccx)) {
310 // The field itself must be a homogenous aggregate.
311 (_, None) => return None,
312 // If this is the first field, record the unit.
313 (None, Some(unit)) => {
314 result = Some(unit);
315 }
316 // For all following fields, the unit must be the same.
317 (Some(prev_unit), Some(unit)) => {
318 if prev_unit != unit {
319 return None;
320 }
321 }
322 }
323
324 // Keep track of the offset (without padding).
325 let size = field.size(ccx);
326 match unaligned_offset.checked_add(size, ccx) {
327 Some(offset) => unaligned_offset = offset,
328 None => return None
329 }
330 }
331
332 // There needs to be no padding.
333 if unaligned_offset != self.size(ccx) {
334 None
335 } else {
336 result
337 }
338 }
339
340 Layout::UntaggedUnion { .. } => {
341 let mut max = Size::from_bytes(0);
342 let mut result = None;
343
344 for i in 0..self.field_count() {
345 let field = self.field(ccx, i);
346 match (result, field.homogenous_aggregate(ccx)) {
347 // The field itself must be a homogenous aggregate.
348 (_, None) => return None,
349 // If this is the first field, record the unit.
350 (None, Some(unit)) => {
351 result = Some(unit);
352 }
353 // For all following fields, the unit must be the same.
354 (Some(prev_unit), Some(unit)) => {
355 if prev_unit != unit {
356 return None;
357 }
358 }
359 }
360
361 // Keep track of the offset (without padding).
362 let size = field.size(ccx);
363 if size > max {
364 max = size;
365 }
366 }
367
368 // There needs to be no padding.
369 if max != self.size(ccx) {
370 None
371 } else {
372 result
373 }
374 }
375
376 // Rust-specific types, which we can ignore for C ABIs.
377 Layout::FatPointer { .. } |
378 Layout::General { .. } |
379 Layout::StructWrappedNullablePointer { .. } => None
380 }
381 }
382 }
383
384 pub enum CastTarget {
385 Uniform(Uniform),
386 Pair(Reg, Reg)
387 }
388
389 impl From<Reg> for CastTarget {
390 fn from(unit: Reg) -> CastTarget {
391 CastTarget::Uniform(Uniform::from(unit))
392 }
393 }
394
395 impl From<Uniform> for CastTarget {
396 fn from(uniform: Uniform) -> CastTarget {
397 CastTarget::Uniform(uniform)
398 }
399 }
400
401 impl CastTarget {
402 fn llvm_type(&self, ccx: &CrateContext) -> Type {
403 match *self {
404 CastTarget::Uniform(u) => u.llvm_type(ccx),
405 CastTarget::Pair(a, b) => {
406 Type::struct_(ccx, &[
407 a.llvm_type(ccx),
408 b.llvm_type(ccx)
409 ], false)
410 }
411 }
412 }
413 }
414
415 /// Information about how a specific C type
416 /// should be passed to or returned from a function
417 ///
418 /// This is borrowed from clang's ABIInfo.h
419 #[derive(Clone, Copy, Debug)]
420 pub struct ArgType<'tcx> {
421 kind: ArgKind,
422 pub layout: TyLayout<'tcx>,
423 /// Coerced LLVM Type
424 pub cast: Option<Type>,
425 /// Dummy argument, which is emitted before the real argument
426 pub pad: Option<Type>,
427 /// LLVM attributes of argument
428 pub attrs: ArgAttributes
429 }
430
431 impl<'a, 'tcx> ArgType<'tcx> {
432 fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
433 ArgType {
434 kind: ArgKind::Direct,
435 layout: layout,
436 cast: None,
437 pad: None,
438 attrs: ArgAttributes::default()
439 }
440 }
441
442 pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) {
443 assert_eq!(self.kind, ArgKind::Direct);
444
445 // Wipe old attributes, likely not valid through indirection.
446 self.attrs = ArgAttributes::default();
447
448 let llarg_sz = self.layout.size(ccx).bytes();
449
450 // For non-immediate arguments the callee gets its own copy of
451 // the value on the stack, so there are no aliases. It's also
452 // program-invisible so can't possibly capture
453 self.attrs.set(ArgAttribute::NoAlias)
454 .set(ArgAttribute::NoCapture)
455 .set_dereferenceable(llarg_sz);
456
457 self.kind = ArgKind::Indirect;
458 }
459
460 pub fn ignore(&mut self) {
461 assert_eq!(self.kind, ArgKind::Direct);
462 self.kind = ArgKind::Ignore;
463 }
464
465 pub fn extend_integer_width_to(&mut self, bits: u64) {
466 // Only integers have signedness
467 let (i, signed) = match *self.layout {
468 Layout::Scalar { value, .. } => {
469 match value {
470 layout::Int(i) => {
471 if self.layout.ty.is_integral() {
472 (i, self.layout.ty.is_signed())
473 } else {
474 return;
475 }
476 }
477 _ => return
478 }
479 }
480
481 // Rust enum types that map onto C enums also need to follow
482 // the target ABI zero-/sign-extension rules.
483 Layout::CEnum { discr, signed, .. } => (discr, signed),
484
485 _ => return
486 };
487
488 if i.size().bits() < bits {
489 self.attrs.set(if signed {
490 ArgAttribute::SExt
491 } else {
492 ArgAttribute::ZExt
493 });
494 }
495 }
496
497 pub fn cast_to<T: Into<CastTarget>>(&mut self, ccx: &CrateContext, target: T) {
498 self.cast = Some(target.into().llvm_type(ccx));
499 }
500
501 pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) {
502 self.pad = Some(reg.llvm_type(ccx));
503 }
504
505 pub fn is_indirect(&self) -> bool {
506 self.kind == ArgKind::Indirect
507 }
508
509 pub fn is_ignore(&self) -> bool {
510 self.kind == ArgKind::Ignore
511 }
512
513 /// Get the LLVM type for an lvalue of the original Rust type of
514 /// this argument/return, i.e. the result of `type_of::type_of`.
515 pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
516 type_of::type_of(ccx, self.layout.ty)
517 }
518
519 /// Store a direct/indirect value described by this ArgType into a
520 /// lvalue for the original Rust type of this argument/return.
521 /// Can be used for both storing formal arguments into Rust variables
522 /// or results of call/invoke instructions into their destinations.
523 pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) {
524 if self.is_ignore() {
525 return;
526 }
527 let ccx = bcx.ccx;
528 if self.is_indirect() {
529 let llsz = C_uint(ccx, self.layout.size(ccx).bytes());
530 let llalign = self.layout.align(ccx).abi();
531 base::call_memcpy(bcx, dst, val, llsz, llalign as u32);
532 } else if let Some(ty) = self.cast {
533 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
534 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
535 let can_store_through_cast_ptr = false;
536 if can_store_through_cast_ptr {
537 let cast_dst = bcx.pointercast(dst, ty.ptr_to());
538 let llalign = self.layout.align(ccx).abi();
539 bcx.store(val, cast_dst, Some(llalign as u32));
540 } else {
541 // The actual return type is a struct, but the ABI
542 // adaptation code has cast it into some scalar type. The
543 // code that follows is the only reliable way I have
544 // found to do a transform like i64 -> {i32,i32}.
545 // Basically we dump the data onto the stack then memcpy it.
546 //
547 // Other approaches I tried:
548 // - Casting rust ret pointer to the foreign type and using Store
549 // is (a) unsafe if size of foreign type > size of rust type and
550 // (b) runs afoul of strict aliasing rules, yielding invalid
551 // assembly under -O (specifically, the store gets removed).
552 // - Truncating foreign type to correct integral type and then
553 // bitcasting to the struct type yields invalid cast errors.
554
555 // We instead thus allocate some scratch space...
556 let llscratch = bcx.alloca(ty, "abi_cast", None);
557 base::Lifetime::Start.call(bcx, llscratch);
558
559 // ...where we first store the value...
560 bcx.store(val, llscratch, None);
561
562 // ...and then memcpy it to the intended destination.
563 base::call_memcpy(bcx,
564 bcx.pointercast(dst, Type::i8p(ccx)),
565 bcx.pointercast(llscratch, Type::i8p(ccx)),
566 C_uint(ccx, self.layout.size(ccx).bytes()),
567 cmp::min(self.layout.align(ccx).abi() as u32,
568 llalign_of_min(ccx, ty)));
569
570 base::Lifetime::End.call(bcx, llscratch);
571 }
572 } else {
573 if self.layout.ty == ccx.tcx().types.bool {
574 val = bcx.zext(val, Type::i8(ccx));
575 }
576 bcx.store(val, dst, None);
577 }
578 }
579
580 pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) {
581 if self.pad.is_some() {
582 *idx += 1;
583 }
584 if self.is_ignore() {
585 return;
586 }
587 let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
588 *idx += 1;
589 self.store(bcx, val, dst);
590 }
591 }
592
593 /// Metadata describing how the arguments to a native function
594 /// should be passed in order to respect the native ABI.
595 ///
596 /// I will do my best to describe this structure, but these
597 /// comments are reverse-engineered and may be inaccurate. -NDM
598 #[derive(Clone, Debug)]
599 pub struct FnType<'tcx> {
600 /// The LLVM types of each argument.
601 pub args: Vec<ArgType<'tcx>>,
602
603 /// LLVM return type.
604 pub ret: ArgType<'tcx>,
605
606 pub variadic: bool,
607
608 pub cconv: llvm::CallConv
609 }
610
611 impl<'a, 'tcx> FnType<'tcx> {
612 pub fn new(ccx: &CrateContext<'a, 'tcx>,
613 sig: ty::FnSig<'tcx>,
614 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
615 let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
616 fn_ty.adjust_for_abi(ccx, sig);
617 fn_ty
618 }
619
620 pub fn new_vtable(ccx: &CrateContext<'a, 'tcx>,
621 sig: ty::FnSig<'tcx>,
622 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
623 let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
624 // Don't pass the vtable, it's not an argument of the virtual fn.
625 fn_ty.args[1].ignore();
626 fn_ty.adjust_for_abi(ccx, sig);
627 fn_ty
628 }
629
630 pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>,
631 sig: ty::FnSig<'tcx>,
632 extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
633 use self::Abi::*;
634 let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) {
635 RustIntrinsic | PlatformIntrinsic |
636 Rust | RustCall => llvm::CCallConv,
637
638 // It's the ABI's job to select this, not us.
639 System => bug!("system abi should be selected elsewhere"),
640
641 Stdcall => llvm::X86StdcallCallConv,
642 Fastcall => llvm::X86FastcallCallConv,
643 Vectorcall => llvm::X86_VectorCall,
644 C => llvm::CCallConv,
645 Unadjusted => llvm::CCallConv,
646 Win64 => llvm::X86_64_Win64,
647 SysV64 => llvm::X86_64_SysV,
648 Aapcs => llvm::ArmAapcsCallConv,
649 PtxKernel => llvm::PtxKernel,
650 Msp430Interrupt => llvm::Msp430Intr,
651 X86Interrupt => llvm::X86_Intr,
652
653 // These API constants ought to be more specific...
654 Cdecl => llvm::CCallConv,
655 };
656
657 let mut inputs = sig.inputs();
658 let extra_args = if sig.abi == RustCall {
659 assert!(!sig.variadic && extra_args.is_empty());
660
661 match sig.inputs().last().unwrap().sty {
662 ty::TyTuple(ref tupled_arguments, _) => {
663 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
664 tupled_arguments
665 }
666 _ => {
667 bug!("argument to function with \"rust-call\" ABI \
668 is not a tuple");
669 }
670 }
671 } else {
672 assert!(sig.variadic || extra_args.is_empty());
673 extra_args
674 };
675
676 let target = &ccx.sess().target.target;
677 let win_x64_gnu = target.target_os == "windows"
678 && target.arch == "x86_64"
679 && target.target_env == "gnu";
680 let linux_s390x = target.target_os == "linux"
681 && target.arch == "s390x"
682 && target.target_env == "gnu";
683 let rust_abi = match sig.abi {
684 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
685 _ => false
686 };
687
688 let arg_of = |ty: Ty<'tcx>, is_return: bool| {
689 let mut arg = ArgType::new(ccx.layout_of(ty));
690 if ty.is_bool() {
691 arg.attrs.set(ArgAttribute::ZExt);
692 } else {
693 if arg.layout.size(ccx).bytes() == 0 {
694 // For some forsaken reason, x86_64-pc-windows-gnu
695 // doesn't ignore zero-sized struct arguments.
696 // The same is true for s390x-unknown-linux-gnu.
697 if is_return || rust_abi ||
698 (!win_x64_gnu && !linux_s390x) {
699 arg.ignore();
700 }
701 }
702 }
703 arg
704 };
705
706 let ret_ty = sig.output();
707 let mut ret = arg_of(ret_ty, true);
708
709 if !type_is_fat_ptr(ccx, ret_ty) {
710 // The `noalias` attribute on the return value is useful to a
711 // function ptr caller.
712 if ret_ty.is_box() {
713 // `Box` pointer return values never alias because ownership
714 // is transferred
715 ret.attrs.set(ArgAttribute::NoAlias);
716 }
717
718 // We can also mark the return value as `dereferenceable` in certain cases
719 match ret_ty.sty {
720 // These are not really pointers but pairs, (pointer, len)
721 ty::TyRef(_, ty::TypeAndMut { ty, .. }) => {
722 ret.attrs.set_dereferenceable(ccx.size_of(ty));
723 }
724 ty::TyAdt(def, _) if def.is_box() => {
725 ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty()));
726 }
727 _ => {}
728 }
729 }
730
731 let mut args = Vec::with_capacity(inputs.len() + extra_args.len());
732
733 // Handle safe Rust thin and fat pointers.
734 let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty {
735 // `Box` pointer parameters never alias because ownership is transferred
736 ty::TyAdt(def, _) if def.is_box() => {
737 arg.attrs.set(ArgAttribute::NoAlias);
738 Some(ty.boxed_ty())
739 }
740
741 ty::TyRef(b, mt) => {
742 use rustc::ty::{BrAnon, ReLateBound};
743
744 // `&mut` pointer parameters never alias other parameters, or mutable global data
745 //
746 // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
747 // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
748 // on memory dependencies rather than pointer equality
749 let is_freeze = ccx.shared().type_is_freeze(mt.ty);
750
751 if mt.mutbl != hir::MutMutable && is_freeze {
752 arg.attrs.set(ArgAttribute::NoAlias);
753 }
754
755 if mt.mutbl == hir::MutImmutable && is_freeze {
756 arg.attrs.set(ArgAttribute::ReadOnly);
757 }
758
759 // When a reference in an argument has no named lifetime, it's
760 // impossible for that reference to escape this function
761 // (returned or stored beyond the call by a closure).
762 if let ReLateBound(_, BrAnon(_)) = *b {
763 arg.attrs.set(ArgAttribute::NoCapture);
764 }
765
766 Some(mt.ty)
767 }
768 _ => None
769 };
770
771 for ty in inputs.iter().chain(extra_args.iter()) {
772 let mut arg = arg_of(ty, false);
773
774 if let ty::layout::FatPointer { .. } = *arg.layout {
775 let mut data = ArgType::new(arg.layout.field(ccx, 0));
776 let mut info = ArgType::new(arg.layout.field(ccx, 1));
777
778 if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
779 data.attrs.set(ArgAttribute::NonNull);
780 if ccx.tcx().struct_tail(inner).is_trait() {
781 // vtables can be safely marked non-null, readonly
782 // and noalias.
783 info.attrs.set(ArgAttribute::NonNull);
784 info.attrs.set(ArgAttribute::ReadOnly);
785 info.attrs.set(ArgAttribute::NoAlias);
786 }
787 }
788 args.push(data);
789 args.push(info);
790 } else {
791 if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
792 arg.attrs.set_dereferenceable(ccx.size_of(inner));
793 }
794 args.push(arg);
795 }
796 }
797
798 FnType {
799 args: args,
800 ret: ret,
801 variadic: sig.variadic,
802 cconv: cconv
803 }
804 }
805
806 fn adjust_for_abi(&mut self,
807 ccx: &CrateContext<'a, 'tcx>,
808 sig: ty::FnSig<'tcx>) {
809 let abi = sig.abi;
810 if abi == Abi::Unadjusted { return }
811
812 if abi == Abi::Rust || abi == Abi::RustCall ||
813 abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
814 let fixup = |arg: &mut ArgType<'tcx>| {
815 if !arg.layout.is_aggregate() {
816 return;
817 }
818
819 let size = arg.layout.size(ccx);
820
821 if let Some(unit) = arg.layout.homogenous_aggregate(ccx) {
822 // Replace newtypes with their inner-most type.
823 if unit.size == size {
824 // Needs a cast as we've unpacked a newtype.
825 arg.cast_to(ccx, unit);
826 return;
827 }
828
829 // Pairs of floats.
830 if unit.kind == RegKind::Float {
831 if unit.size.checked_mul(2, ccx) == Some(size) {
832 // FIXME(eddyb) This should be using Uniform instead of a pair,
833 // but the resulting [2 x float/double] breaks emscripten.
834 // See https://github.com/kripken/emscripten-fastcomp/issues/178.
835 arg.cast_to(ccx, CastTarget::Pair(unit, unit));
836 return;
837 }
838 }
839 }
840
841 if size > layout::Pointer.size(ccx) {
842 arg.make_indirect(ccx);
843 } else {
844 // We want to pass small aggregates as immediates, but using
845 // a LLVM aggregate type for this leads to bad optimizations,
846 // so we pick an appropriately sized integer type instead.
847 arg.cast_to(ccx, Reg {
848 kind: RegKind::Integer,
849 size
850 });
851 }
852 };
853 // Fat pointers are returned by-value.
854 if !self.ret.is_ignore() {
855 if !type_is_fat_ptr(ccx, sig.output()) {
856 fixup(&mut self.ret);
857 }
858 }
859 for arg in &mut self.args {
860 if arg.is_ignore() { continue; }
861 fixup(arg);
862 }
863 if self.ret.is_indirect() {
864 self.ret.attrs.set(ArgAttribute::StructRet);
865 }
866 return;
867 }
868
869 match &ccx.sess().target.target.arch[..] {
870 "x86" => {
871 let flavor = if abi == Abi::Fastcall {
872 cabi_x86::Flavor::Fastcall
873 } else {
874 cabi_x86::Flavor::General
875 };
876 cabi_x86::compute_abi_info(ccx, self, flavor);
877 },
878 "x86_64" => if abi == Abi::SysV64 {
879 cabi_x86_64::compute_abi_info(ccx, self);
880 } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows {
881 cabi_x86_win64::compute_abi_info(ccx, self);
882 } else {
883 cabi_x86_64::compute_abi_info(ccx, self);
884 },
885 "aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
886 "arm" => cabi_arm::compute_abi_info(ccx, self),
887 "mips" => cabi_mips::compute_abi_info(ccx, self),
888 "mips64" => cabi_mips64::compute_abi_info(ccx, self),
889 "powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
890 "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
891 "s390x" => cabi_s390x::compute_abi_info(ccx, self),
892 "asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
893 "wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
894 "msp430" => cabi_msp430::compute_abi_info(ccx, self),
895 "sparc" => cabi_sparc::compute_abi_info(ccx, self),
896 "sparc64" => cabi_sparc64::compute_abi_info(ccx, self),
897 "nvptx" => cabi_nvptx::compute_abi_info(ccx, self),
898 "nvptx64" => cabi_nvptx64::compute_abi_info(ccx, self),
899 a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
900 }
901
902 if self.ret.is_indirect() {
903 self.ret.attrs.set(ArgAttribute::StructRet);
904 }
905 }
906
907 pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
908 let mut llargument_tys = Vec::new();
909
910 let llreturn_ty = if self.ret.is_ignore() {
911 Type::void(ccx)
912 } else if self.ret.is_indirect() {
913 llargument_tys.push(self.ret.memory_ty(ccx).ptr_to());
914 Type::void(ccx)
915 } else {
916 self.ret.cast.unwrap_or_else(|| {
917 type_of::immediate_type_of(ccx, self.ret.layout.ty)
918 })
919 };
920
921 for arg in &self.args {
922 if arg.is_ignore() {
923 continue;
924 }
925 // add padding
926 if let Some(ty) = arg.pad {
927 llargument_tys.push(ty);
928 }
929
930 let llarg_ty = if arg.is_indirect() {
931 arg.memory_ty(ccx).ptr_to()
932 } else {
933 arg.cast.unwrap_or_else(|| {
934 type_of::immediate_type_of(ccx, arg.layout.ty)
935 })
936 };
937
938 llargument_tys.push(llarg_ty);
939 }
940
941 if self.variadic {
942 Type::variadic_func(&llargument_tys, &llreturn_ty)
943 } else {
944 Type::func(&llargument_tys, &llreturn_ty)
945 }
946 }
947
948 pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
949 let mut i = if self.ret.is_indirect() { 1 } else { 0 };
950 if !self.ret.is_ignore() {
951 self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
952 }
953 i += 1;
954 for arg in &self.args {
955 if !arg.is_ignore() {
956 if arg.pad.is_some() { i += 1; }
957 arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
958 i += 1;
959 }
960 }
961 }
962
963 pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
964 let mut i = if self.ret.is_indirect() { 1 } else { 0 };
965 if !self.ret.is_ignore() {
966 self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
967 }
968 i += 1;
969 for arg in &self.args {
970 if !arg.is_ignore() {
971 if arg.pad.is_some() { i += 1; }
972 arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
973 i += 1;
974 }
975 }
976
977 if self.cconv != llvm::CCallConv {
978 llvm::SetInstructionCallConv(callsite, self.cconv);
979 }
980 }
981 }
982
983 pub fn align_up_to(off: u64, a: u64) -> u64 {
984 (off + a - 1) / a * a
985 }