]>
Commit | Line | Data |
---|---|---|
54a0048b SL |
1 | // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT |
2 | // file at the top-level directory of this distribution and at | |
3 | // http://rust-lang.org/COPYRIGHT. | |
4 | // | |
5 | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | |
6 | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | |
7 | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | |
8 | // option. This file may not be copied, modified, or distributed | |
9 | // except according to those terms. | |
10 | ||
11 | use llvm::{self, ValueRef}; | |
12 | use base; | |
a7813a04 XL |
13 | use build::AllocaFcx; |
14 | use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; | |
54a0048b SL |
15 | use context::CrateContext; |
16 | use cabi_x86; | |
17 | use cabi_x86_64; | |
18 | use cabi_x86_win64; | |
19 | use cabi_arm; | |
20 | use cabi_aarch64; | |
21 | use cabi_powerpc; | |
22 | use cabi_powerpc64; | |
23 | use cabi_mips; | |
24 | use cabi_asmjs; | |
a7813a04 | 25 | use machine::{llalign_of_min, llsize_of, llsize_of_real, llsize_of_store}; |
54a0048b SL |
26 | use type_::Type; |
27 | use type_of; | |
28 | ||
29 | use rustc::hir; | |
30 | use rustc::ty::{self, Ty}; | |
31 | ||
32 | use libc::c_uint; | |
a7813a04 | 33 | use std::cmp; |
54a0048b SL |
34 | |
35 | pub use syntax::abi::Abi; | |
36 | pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; | |
37 | ||
38 | #[derive(Clone, Copy, PartialEq, Debug)] | |
39 | enum ArgKind { | |
40 | /// Pass the argument directly using the normal converted | |
41 | /// LLVM type or by coercing to another specified type | |
42 | Direct, | |
43 | /// Pass the argument indirectly via a hidden pointer | |
44 | Indirect, | |
45 | /// Ignore the argument (useful for empty struct) | |
46 | Ignore, | |
47 | } | |
48 | ||
49 | /// Information about how a specific C type | |
50 | /// should be passed to or returned from a function | |
51 | /// | |
52 | /// This is borrowed from clang's ABIInfo.h | |
53 | #[derive(Clone, Copy, Debug)] | |
54 | pub struct ArgType { | |
55 | kind: ArgKind, | |
56 | /// Original LLVM type | |
57 | pub original_ty: Type, | |
58 | /// Sizing LLVM type (pointers are opaque). | |
59 | /// Unlike original_ty, this is guaranteed to be complete. | |
60 | /// | |
61 | /// For example, while we're computing the function pointer type in | |
62 | /// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`. | |
63 | /// The field type will likely end up being `void(%Foo)*`, but we cannot | |
64 | /// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`, | |
65 | /// until `%Foo` is completed by having all of its field types inserted, | |
66 | /// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers | |
67 | /// with opaque ones, resulting in `{i8*}` for `Foo`. | |
68 | /// ABI-specific logic can then look at the size, alignment and fields of | |
69 | /// `{i8*}` in order to determine how the argument will be passed. | |
70 | /// Only later will `original_ty` aka `%Foo` be used in the LLVM function | |
71 | /// pointer type, without ever having introspected it. | |
72 | pub ty: Type, | |
73 | /// Signedness for integer types, None for other types | |
74 | pub signedness: Option<bool>, | |
75 | /// Coerced LLVM Type | |
76 | pub cast: Option<Type>, | |
77 | /// Dummy argument, which is emitted before the real argument | |
78 | pub pad: Option<Type>, | |
79 | /// LLVM attributes of argument | |
80 | pub attrs: llvm::Attributes | |
81 | } | |
82 | ||
83 | impl ArgType { | |
84 | fn new(original_ty: Type, ty: Type) -> ArgType { | |
85 | ArgType { | |
86 | kind: ArgKind::Direct, | |
87 | original_ty: original_ty, | |
88 | ty: ty, | |
89 | signedness: None, | |
90 | cast: None, | |
91 | pad: None, | |
92 | attrs: llvm::Attributes::default() | |
93 | } | |
94 | } | |
95 | ||
96 | pub fn make_indirect(&mut self, ccx: &CrateContext) { | |
97 | assert_eq!(self.kind, ArgKind::Direct); | |
98 | ||
99 | // Wipe old attributes, likely not valid through indirection. | |
100 | self.attrs = llvm::Attributes::default(); | |
101 | ||
102 | let llarg_sz = llsize_of_real(ccx, self.ty); | |
103 | ||
104 | // For non-immediate arguments the callee gets its own copy of | |
105 | // the value on the stack, so there are no aliases. It's also | |
106 | // program-invisible so can't possibly capture | |
107 | self.attrs.set(llvm::Attribute::NoAlias) | |
108 | .set(llvm::Attribute::NoCapture) | |
109 | .set_dereferenceable(llarg_sz); | |
110 | ||
111 | self.kind = ArgKind::Indirect; | |
112 | } | |
113 | ||
114 | pub fn ignore(&mut self) { | |
115 | assert_eq!(self.kind, ArgKind::Direct); | |
116 | self.kind = ArgKind::Ignore; | |
117 | } | |
118 | ||
119 | pub fn extend_integer_width_to(&mut self, bits: u64) { | |
120 | // Only integers have signedness | |
121 | if let Some(signed) = self.signedness { | |
122 | if self.ty.int_width() < bits { | |
123 | self.attrs.set(if signed { | |
124 | llvm::Attribute::SExt | |
125 | } else { | |
126 | llvm::Attribute::ZExt | |
127 | }); | |
128 | } | |
129 | } | |
130 | } | |
131 | ||
132 | pub fn is_indirect(&self) -> bool { | |
133 | self.kind == ArgKind::Indirect | |
134 | } | |
135 | ||
136 | pub fn is_ignore(&self) -> bool { | |
137 | self.kind == ArgKind::Ignore | |
138 | } | |
139 | ||
140 | /// Get the LLVM type for an lvalue of the original Rust type of | |
141 | /// this argument/return, i.e. the result of `type_of::type_of`. | |
142 | pub fn memory_ty(&self, ccx: &CrateContext) -> Type { | |
143 | if self.original_ty == Type::i1(ccx) { | |
144 | Type::i8(ccx) | |
145 | } else { | |
146 | self.original_ty | |
147 | } | |
148 | } | |
149 | ||
150 | /// Store a direct/indirect value described by this ArgType into a | |
151 | /// lvalue for the original Rust type of this argument/return. | |
152 | /// Can be used for both storing formal arguments into Rust variables | |
153 | /// or results of call/invoke instructions into their destinations. | |
a7813a04 | 154 | pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { |
54a0048b SL |
155 | if self.is_ignore() { |
156 | return; | |
157 | } | |
a7813a04 | 158 | let ccx = bcx.ccx(); |
54a0048b | 159 | if self.is_indirect() { |
a7813a04 XL |
160 | let llsz = llsize_of(ccx, self.ty); |
161 | let llalign = llalign_of_min(ccx, self.ty); | |
162 | base::call_memcpy(bcx, dst, val, llsz, llalign as u32); | |
54a0048b | 163 | } else if let Some(ty) = self.cast { |
a7813a04 XL |
164 | // FIXME(eddyb): Figure out when the simpler Store is safe, clang |
165 | // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. | |
166 | let can_store_through_cast_ptr = false; | |
167 | if can_store_through_cast_ptr { | |
168 | let cast_dst = bcx.pointercast(dst, ty.ptr_to()); | |
169 | let store = bcx.store(val, cast_dst); | |
170 | let llalign = llalign_of_min(ccx, self.ty); | |
171 | unsafe { | |
172 | llvm::LLVMSetAlignment(store, llalign); | |
173 | } | |
174 | } else { | |
175 | // The actual return type is a struct, but the ABI | |
176 | // adaptation code has cast it into some scalar type. The | |
177 | // code that follows is the only reliable way I have | |
178 | // found to do a transform like i64 -> {i32,i32}. | |
179 | // Basically we dump the data onto the stack then memcpy it. | |
180 | // | |
181 | // Other approaches I tried: | |
182 | // - Casting rust ret pointer to the foreign type and using Store | |
183 | // is (a) unsafe if size of foreign type > size of rust type and | |
184 | // (b) runs afoul of strict aliasing rules, yielding invalid | |
185 | // assembly under -O (specifically, the store gets removed). | |
186 | // - Truncating foreign type to correct integral type and then | |
187 | // bitcasting to the struct type yields invalid cast errors. | |
188 | ||
189 | // We instead thus allocate some scratch space... | |
190 | let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast"); | |
191 | base::Lifetime::Start.call(bcx, llscratch); | |
192 | ||
193 | // ...where we first store the value... | |
194 | bcx.store(val, llscratch); | |
195 | ||
196 | // ...and then memcpy it to the intended destination. | |
197 | base::call_memcpy(bcx, | |
198 | bcx.pointercast(dst, Type::i8p(ccx)), | |
199 | bcx.pointercast(llscratch, Type::i8p(ccx)), | |
200 | C_uint(ccx, llsize_of_store(ccx, self.ty)), | |
201 | cmp::min(llalign_of_min(ccx, self.ty), | |
202 | llalign_of_min(ccx, ty)) as u32); | |
203 | ||
204 | base::Lifetime::End.call(bcx, llscratch); | |
54a0048b SL |
205 | } |
206 | } else { | |
a7813a04 XL |
207 | if self.original_ty == Type::i1(ccx) { |
208 | val = bcx.zext(val, Type::i8(ccx)); | |
54a0048b | 209 | } |
a7813a04 | 210 | bcx.store(val, dst); |
54a0048b SL |
211 | } |
212 | } | |
213 | ||
214 | pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) { | |
215 | if self.pad.is_some() { | |
216 | *idx += 1; | |
217 | } | |
218 | if self.is_ignore() { | |
219 | return; | |
220 | } | |
221 | let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint); | |
222 | *idx += 1; | |
223 | self.store(bcx, val, dst); | |
224 | } | |
225 | } | |
226 | ||
227 | /// Metadata describing how the arguments to a native function | |
228 | /// should be passed in order to respect the native ABI. | |
229 | /// | |
230 | /// I will do my best to describe this structure, but these | |
231 | /// comments are reverse-engineered and may be inaccurate. -NDM | |
5bcae85e | 232 | #[derive(Clone)] |
54a0048b SL |
233 | pub struct FnType { |
234 | /// The LLVM types of each argument. | |
235 | pub args: Vec<ArgType>, | |
236 | ||
237 | /// LLVM return type. | |
238 | pub ret: ArgType, | |
239 | ||
240 | pub variadic: bool, | |
241 | ||
242 | pub cconv: llvm::CallConv | |
243 | } | |
244 | ||
245 | impl FnType { | |
246 | pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, | |
247 | abi: Abi, | |
248 | sig: &ty::FnSig<'tcx>, | |
249 | extra_args: &[Ty<'tcx>]) -> FnType { | |
250 | let mut fn_ty = FnType::unadjusted(ccx, abi, sig, extra_args); | |
251 | fn_ty.adjust_for_abi(ccx, abi, sig); | |
252 | fn_ty | |
253 | } | |
254 | ||
255 | pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, | |
256 | abi: Abi, | |
257 | sig: &ty::FnSig<'tcx>, | |
258 | extra_args: &[Ty<'tcx>]) -> FnType { | |
259 | use self::Abi::*; | |
260 | let cconv = match ccx.sess().target.target.adjust_abi(abi) { | |
261 | RustIntrinsic | PlatformIntrinsic | | |
262 | Rust | RustCall => llvm::CCallConv, | |
263 | ||
264 | // It's the ABI's job to select this, not us. | |
265 | System => bug!("system abi should be selected elsewhere"), | |
266 | ||
267 | Stdcall => llvm::X86StdcallCallConv, | |
268 | Fastcall => llvm::X86FastcallCallConv, | |
269 | Vectorcall => llvm::X86_VectorCall, | |
270 | C => llvm::CCallConv, | |
271 | Win64 => llvm::X86_64_Win64, | |
272 | ||
273 | // These API constants ought to be more specific... | |
274 | Cdecl => llvm::CCallConv, | |
275 | Aapcs => llvm::CCallConv, | |
276 | }; | |
277 | ||
278 | let mut inputs = &sig.inputs[..]; | |
279 | let extra_args = if abi == RustCall { | |
280 | assert!(!sig.variadic && extra_args.is_empty()); | |
281 | ||
282 | match inputs[inputs.len() - 1].sty { | |
283 | ty::TyTuple(ref tupled_arguments) => { | |
284 | inputs = &inputs[..inputs.len() - 1]; | |
285 | &tupled_arguments[..] | |
286 | } | |
287 | _ => { | |
288 | bug!("argument to function with \"rust-call\" ABI \ | |
289 | is not a tuple"); | |
290 | } | |
291 | } | |
292 | } else { | |
293 | assert!(sig.variadic || extra_args.is_empty()); | |
294 | extra_args | |
295 | }; | |
296 | ||
297 | let target = &ccx.sess().target.target; | |
298 | let win_x64_gnu = target.target_os == "windows" | |
299 | && target.arch == "x86_64" | |
300 | && target.target_env == "gnu"; | |
301 | let rust_abi = match abi { | |
302 | RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true, | |
303 | _ => false | |
304 | }; | |
305 | ||
306 | let arg_of = |ty: Ty<'tcx>, is_return: bool| { | |
307 | if ty.is_bool() { | |
308 | let llty = Type::i1(ccx); | |
309 | let mut arg = ArgType::new(llty, llty); | |
310 | arg.attrs.set(llvm::Attribute::ZExt); | |
311 | arg | |
312 | } else { | |
313 | let mut arg = ArgType::new(type_of::type_of(ccx, ty), | |
314 | type_of::sizing_type_of(ccx, ty)); | |
315 | if ty.is_integral() { | |
316 | arg.signedness = Some(ty.is_signed()); | |
317 | } | |
318 | if llsize_of_real(ccx, arg.ty) == 0 { | |
319 | // For some forsaken reason, x86_64-pc-windows-gnu | |
320 | // doesn't ignore zero-sized struct arguments. | |
321 | if is_return || rust_abi || !win_x64_gnu { | |
322 | arg.ignore(); | |
323 | } | |
324 | } | |
325 | arg | |
326 | } | |
327 | }; | |
328 | ||
5bcae85e | 329 | let ret_ty = sig.output; |
54a0048b SL |
330 | let mut ret = arg_of(ret_ty, true); |
331 | ||
332 | if !type_is_fat_ptr(ccx.tcx(), ret_ty) { | |
333 | // The `noalias` attribute on the return value is useful to a | |
334 | // function ptr caller. | |
335 | if let ty::TyBox(_) = ret_ty.sty { | |
336 | // `Box` pointer return values never alias because ownership | |
337 | // is transferred | |
338 | ret.attrs.set(llvm::Attribute::NoAlias); | |
339 | } | |
340 | ||
341 | // We can also mark the return value as `dereferenceable` in certain cases | |
342 | match ret_ty.sty { | |
343 | // These are not really pointers but pairs, (pointer, len) | |
344 | ty::TyRef(_, ty::TypeAndMut { ty, .. }) | | |
345 | ty::TyBox(ty) => { | |
346 | let llty = type_of::sizing_type_of(ccx, ty); | |
347 | let llsz = llsize_of_real(ccx, llty); | |
348 | ret.attrs.set_dereferenceable(llsz); | |
349 | } | |
350 | _ => {} | |
351 | } | |
352 | } | |
353 | ||
354 | let mut args = Vec::with_capacity(inputs.len() + extra_args.len()); | |
355 | ||
356 | // Handle safe Rust thin and fat pointers. | |
357 | let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty { | |
358 | // `Box` pointer parameters never alias because ownership is transferred | |
359 | ty::TyBox(inner) => { | |
360 | arg.attrs.set(llvm::Attribute::NoAlias); | |
361 | Some(inner) | |
362 | } | |
363 | ||
364 | ty::TyRef(b, mt) => { | |
365 | use rustc::ty::{BrAnon, ReLateBound}; | |
366 | ||
367 | // `&mut` pointer parameters never alias other parameters, or mutable global data | |
368 | // | |
369 | // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as | |
370 | // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely | |
371 | // on memory dependencies rather than pointer equality | |
372 | let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe(); | |
373 | ||
374 | if mt.mutbl != hir::MutMutable && !interior_unsafe { | |
375 | arg.attrs.set(llvm::Attribute::NoAlias); | |
376 | } | |
377 | ||
378 | if mt.mutbl == hir::MutImmutable && !interior_unsafe { | |
379 | arg.attrs.set(llvm::Attribute::ReadOnly); | |
380 | } | |
381 | ||
382 | // When a reference in an argument has no named lifetime, it's | |
383 | // impossible for that reference to escape this function | |
384 | // (returned or stored beyond the call by a closure). | |
385 | if let ReLateBound(_, BrAnon(_)) = *b { | |
386 | arg.attrs.set(llvm::Attribute::NoCapture); | |
387 | } | |
388 | ||
389 | Some(mt.ty) | |
390 | } | |
391 | _ => None | |
392 | }; | |
393 | ||
394 | for ty in inputs.iter().chain(extra_args.iter()) { | |
395 | let mut arg = arg_of(ty, false); | |
396 | ||
397 | if type_is_fat_ptr(ccx.tcx(), ty) { | |
398 | let original_tys = arg.original_ty.field_types(); | |
399 | let sizing_tys = arg.ty.field_types(); | |
400 | assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2)); | |
401 | ||
402 | let mut data = ArgType::new(original_tys[0], sizing_tys[0]); | |
403 | let mut info = ArgType::new(original_tys[1], sizing_tys[1]); | |
404 | ||
405 | if let Some(inner) = rust_ptr_attrs(ty, &mut data) { | |
406 | data.attrs.set(llvm::Attribute::NonNull); | |
407 | if ccx.tcx().struct_tail(inner).is_trait() { | |
408 | info.attrs.set(llvm::Attribute::NonNull); | |
409 | } | |
410 | } | |
411 | args.push(data); | |
412 | args.push(info); | |
413 | } else { | |
414 | if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { | |
415 | let llty = type_of::sizing_type_of(ccx, inner); | |
416 | let llsz = llsize_of_real(ccx, llty); | |
417 | arg.attrs.set_dereferenceable(llsz); | |
418 | } | |
419 | args.push(arg); | |
420 | } | |
421 | } | |
422 | ||
423 | FnType { | |
424 | args: args, | |
425 | ret: ret, | |
426 | variadic: sig.variadic, | |
427 | cconv: cconv | |
428 | } | |
429 | } | |
430 | ||
431 | pub fn adjust_for_abi<'a, 'tcx>(&mut self, | |
432 | ccx: &CrateContext<'a, 'tcx>, | |
433 | abi: Abi, | |
434 | sig: &ty::FnSig<'tcx>) { | |
435 | if abi == Abi::Rust || abi == Abi::RustCall || | |
436 | abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { | |
437 | let fixup = |arg: &mut ArgType| { | |
438 | let mut llty = arg.ty; | |
439 | ||
440 | // Replace newtypes with their inner-most type. | |
441 | while llty.kind() == llvm::TypeKind::Struct { | |
442 | let inner = llty.field_types(); | |
443 | if inner.len() != 1 { | |
444 | break; | |
445 | } | |
446 | llty = inner[0]; | |
447 | } | |
448 | ||
449 | if !llty.is_aggregate() { | |
450 | // Scalars and vectors, always immediate. | |
451 | if llty != arg.ty { | |
452 | // Needs a cast as we've unpacked a newtype. | |
453 | arg.cast = Some(llty); | |
454 | } | |
455 | return; | |
456 | } | |
457 | ||
458 | let size = llsize_of_real(ccx, llty); | |
459 | if size > llsize_of_real(ccx, ccx.int_type()) { | |
460 | arg.make_indirect(ccx); | |
461 | } else if size > 0 { | |
462 | // We want to pass small aggregates as immediates, but using | |
463 | // a LLVM aggregate type for this leads to bad optimizations, | |
464 | // so we pick an appropriately sized integer type instead. | |
465 | arg.cast = Some(Type::ix(ccx, size * 8)); | |
466 | } | |
467 | }; | |
468 | // Fat pointers are returned by-value. | |
469 | if !self.ret.is_ignore() { | |
5bcae85e | 470 | if !type_is_fat_ptr(ccx.tcx(), sig.output) { |
54a0048b SL |
471 | fixup(&mut self.ret); |
472 | } | |
473 | } | |
474 | for arg in &mut self.args { | |
475 | if arg.is_ignore() { continue; } | |
476 | fixup(arg); | |
477 | } | |
478 | if self.ret.is_indirect() { | |
479 | self.ret.attrs.set(llvm::Attribute::StructRet); | |
480 | } | |
481 | return; | |
482 | } | |
483 | ||
484 | match &ccx.sess().target.target.arch[..] { | |
485 | "x86" => cabi_x86::compute_abi_info(ccx, self), | |
486 | "x86_64" => if ccx.sess().target.target.options.is_like_windows { | |
487 | cabi_x86_win64::compute_abi_info(ccx, self); | |
488 | } else { | |
489 | cabi_x86_64::compute_abi_info(ccx, self); | |
490 | }, | |
491 | "aarch64" => cabi_aarch64::compute_abi_info(ccx, self), | |
492 | "arm" => { | |
493 | let flavor = if ccx.sess().target.target.target_os == "ios" { | |
494 | cabi_arm::Flavor::Ios | |
495 | } else { | |
496 | cabi_arm::Flavor::General | |
497 | }; | |
498 | cabi_arm::compute_abi_info(ccx, self, flavor); | |
499 | }, | |
500 | "mips" => cabi_mips::compute_abi_info(ccx, self), | |
501 | "powerpc" => cabi_powerpc::compute_abi_info(ccx, self), | |
502 | "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self), | |
503 | "asmjs" => cabi_asmjs::compute_abi_info(ccx, self), | |
504 | a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)) | |
505 | } | |
506 | ||
507 | if self.ret.is_indirect() { | |
508 | self.ret.attrs.set(llvm::Attribute::StructRet); | |
509 | } | |
510 | } | |
511 | ||
512 | pub fn llvm_type(&self, ccx: &CrateContext) -> Type { | |
513 | let mut llargument_tys = Vec::new(); | |
514 | ||
515 | let llreturn_ty = if self.ret.is_ignore() { | |
516 | Type::void(ccx) | |
517 | } else if self.ret.is_indirect() { | |
518 | llargument_tys.push(self.ret.original_ty.ptr_to()); | |
519 | Type::void(ccx) | |
520 | } else { | |
521 | self.ret.cast.unwrap_or(self.ret.original_ty) | |
522 | }; | |
523 | ||
524 | for arg in &self.args { | |
525 | if arg.is_ignore() { | |
526 | continue; | |
527 | } | |
528 | // add padding | |
529 | if let Some(ty) = arg.pad { | |
530 | llargument_tys.push(ty); | |
531 | } | |
532 | ||
533 | let llarg_ty = if arg.is_indirect() { | |
534 | arg.original_ty.ptr_to() | |
535 | } else { | |
536 | arg.cast.unwrap_or(arg.original_ty) | |
537 | }; | |
538 | ||
539 | llargument_tys.push(llarg_ty); | |
540 | } | |
541 | ||
542 | if self.variadic { | |
543 | Type::variadic_func(&llargument_tys, &llreturn_ty) | |
544 | } else { | |
545 | Type::func(&llargument_tys, &llreturn_ty) | |
546 | } | |
547 | } | |
548 | ||
549 | pub fn apply_attrs_llfn(&self, llfn: ValueRef) { | |
550 | let mut i = if self.ret.is_indirect() { 1 } else { 0 }; | |
551 | if !self.ret.is_ignore() { | |
5bcae85e | 552 | self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); |
54a0048b SL |
553 | } |
554 | i += 1; | |
555 | for arg in &self.args { | |
556 | if !arg.is_ignore() { | |
557 | if arg.pad.is_some() { i += 1; } | |
5bcae85e | 558 | arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); |
54a0048b SL |
559 | i += 1; |
560 | } | |
561 | } | |
562 | } | |
563 | ||
564 | pub fn apply_attrs_callsite(&self, callsite: ValueRef) { | |
565 | let mut i = if self.ret.is_indirect() { 1 } else { 0 }; | |
566 | if !self.ret.is_ignore() { | |
5bcae85e | 567 | self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); |
54a0048b SL |
568 | } |
569 | i += 1; | |
570 | for arg in &self.args { | |
571 | if !arg.is_ignore() { | |
572 | if arg.pad.is_some() { i += 1; } | |
5bcae85e | 573 | arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); |
54a0048b SL |
574 | i += 1; |
575 | } | |
576 | } | |
577 | ||
578 | if self.cconv != llvm::CCallConv { | |
579 | llvm::SetInstructionCallConv(callsite, self.cconv); | |
580 | } | |
581 | } | |
582 | } |