]> git.proxmox.com Git - rustc.git/blob - src/librustc_codegen_llvm/abi.rs
New upstream version 1.40.0+dfsg1
[rustc.git] / src / librustc_codegen_llvm / abi.rs
1 use crate::llvm::{self, AttributePlace};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
4 use crate::type_::Type;
5 use crate::value::Value;
6 use crate::type_of::{LayoutLlvmExt};
7 use rustc_codegen_ssa::MemFlags;
8 use rustc_codegen_ssa::mir::place::PlaceRef;
9 use rustc_codegen_ssa::mir::operand::OperandValue;
10 use rustc_target::abi::call::ArgType;
11
12 use rustc_codegen_ssa::traits::*;
13
14 use rustc_target::abi::{HasDataLayout, LayoutOf};
15 use rustc::ty::{Ty};
16 use rustc::ty::layout::{self};
17
18 use libc::c_uint;
19
20 pub use rustc_target::spec::abi::Abi;
21 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
22 pub use rustc_target::abi::call::*;
23
24 macro_rules! for_each_kind {
25 ($flags: ident, $f: ident, $($kind: ident),+) => ({
26 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
27 })
28 }
29
30 trait ArgAttributeExt {
31 fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
32 }
33
34 impl ArgAttributeExt for ArgAttribute {
35 fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
36 for_each_kind!(self, f,
37 NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
38 }
39 }
40
41 pub trait ArgAttributesExt {
42 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>);
43 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>);
44 }
45
46 impl ArgAttributesExt for ArgAttributes {
47 fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>) {
48 let mut regular = self.regular;
49 unsafe {
50 let deref = self.pointee_size.bytes();
51 if deref != 0 {
52 if regular.contains(ArgAttribute::NonNull) {
53 llvm::LLVMRustAddDereferenceableAttr(llfn,
54 idx.as_uint(),
55 deref);
56 } else {
57 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
58 idx.as_uint(),
59 deref);
60 }
61 regular -= ArgAttribute::NonNull;
62 }
63 if let Some(align) = self.pointee_align {
64 llvm::LLVMRustAddAlignmentAttr(llfn,
65 idx.as_uint(),
66 align.bytes() as u32);
67 }
68 if regular.contains(ArgAttribute::ByVal) {
69 llvm::LLVMRustAddByValAttr(llfn, idx.as_uint(), ty.unwrap());
70 }
71 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
72 }
73 }
74
75 fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>) {
76 let mut regular = self.regular;
77 unsafe {
78 let deref = self.pointee_size.bytes();
79 if deref != 0 {
80 if regular.contains(ArgAttribute::NonNull) {
81 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
82 idx.as_uint(),
83 deref);
84 } else {
85 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
86 idx.as_uint(),
87 deref);
88 }
89 regular -= ArgAttribute::NonNull;
90 }
91 if let Some(align) = self.pointee_align {
92 llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
93 idx.as_uint(),
94 align.bytes() as u32);
95 }
96 if regular.contains(ArgAttribute::ByVal) {
97 llvm::LLVMRustAddByValCallSiteAttr(callsite, idx.as_uint(), ty.unwrap());
98 }
99 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
100 }
101 }
102 }
103
104 pub trait LlvmType {
105 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
106 }
107
108 impl LlvmType for Reg {
109 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
110 match self.kind {
111 RegKind::Integer => cx.type_ix(self.size.bits()),
112 RegKind::Float => {
113 match self.size.bits() {
114 32 => cx.type_f32(),
115 64 => cx.type_f64(),
116 _ => bug!("unsupported float: {:?}", self)
117 }
118 }
119 RegKind::Vector => {
120 cx.type_vector(cx.type_i8(), self.size.bytes())
121 }
122 }
123 }
124 }
125
126 impl LlvmType for CastTarget {
127 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
128 let rest_ll_unit = self.rest.unit.llvm_type(cx);
129 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
130 (0, 0)
131 } else {
132 (self.rest.total.bytes() / self.rest.unit.size.bytes(),
133 self.rest.total.bytes() % self.rest.unit.size.bytes())
134 };
135
136 if self.prefix.iter().all(|x| x.is_none()) {
137 // Simplify to a single unit when there is no prefix and size <= unit size
138 if self.rest.total <= self.rest.unit.size {
139 return rest_ll_unit;
140 }
141
142 // Simplify to array when all chunks are the same size and type
143 if rem_bytes == 0 {
144 return cx.type_array(rest_ll_unit, rest_count);
145 }
146 }
147
148 // Create list of fields in the main structure
149 let mut args: Vec<_> =
150 self.prefix.iter().flat_map(|option_kind| option_kind.map(
151 |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
152 .chain((0..rest_count).map(|_| rest_ll_unit))
153 .collect();
154
155 // Append final integer
156 if rem_bytes != 0 {
157 // Only integers can be really split further.
158 assert_eq!(self.rest.unit.kind, RegKind::Integer);
159 args.push(cx.type_ix(rem_bytes * 8));
160 }
161
162 cx.type_struct(&args, false)
163 }
164 }
165
166 pub trait ArgTypeExt<'ll, 'tcx> {
167 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
168 fn store(
169 &self,
170 bx: &mut Builder<'_, 'll, 'tcx>,
171 val: &'ll Value,
172 dst: PlaceRef<'tcx, &'ll Value>,
173 );
174 fn store_fn_arg(
175 &self,
176 bx: &mut Builder<'_, 'll, 'tcx>,
177 idx: &mut usize,
178 dst: PlaceRef<'tcx, &'ll Value>,
179 );
180 }
181
182 impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
183 /// Gets the LLVM type for a place of the original Rust type of
184 /// this argument/return, i.e., the result of `type_of::type_of`.
185 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
186 self.layout.llvm_type(cx)
187 }
188
189 /// Stores a direct/indirect value described by this ArgType into a
190 /// place for the original Rust type of this argument/return.
191 /// Can be used for both storing formal arguments into Rust variables
192 /// or results of call/invoke instructions into their destinations.
193 fn store(
194 &self,
195 bx: &mut Builder<'_, 'll, 'tcx>,
196 val: &'ll Value,
197 dst: PlaceRef<'tcx, &'ll Value>,
198 ) {
199 if self.is_ignore() {
200 return;
201 }
202 if self.is_sized_indirect() {
203 OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
204 } else if self.is_unsized_indirect() {
205 bug!("unsized ArgType must be handled through store_fn_arg");
206 } else if let PassMode::Cast(cast) = self.mode {
207 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
208 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
209 let can_store_through_cast_ptr = false;
210 if can_store_through_cast_ptr {
211 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
212 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
213 bx.store(val, cast_dst, self.layout.align.abi);
214 } else {
215 // The actual return type is a struct, but the ABI
216 // adaptation code has cast it into some scalar type. The
217 // code that follows is the only reliable way I have
218 // found to do a transform like i64 -> {i32,i32}.
219 // Basically we dump the data onto the stack then memcpy it.
220 //
221 // Other approaches I tried:
222 // - Casting rust ret pointer to the foreign type and using Store
223 // is (a) unsafe if size of foreign type > size of rust type and
224 // (b) runs afoul of strict aliasing rules, yielding invalid
225 // assembly under -O (specifically, the store gets removed).
226 // - Truncating foreign type to correct integral type and then
227 // bitcasting to the struct type yields invalid cast errors.
228
229 // We instead thus allocate some scratch space...
230 let scratch_size = cast.size(bx);
231 let scratch_align = cast.align(bx);
232 let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
233 bx.lifetime_start(llscratch, scratch_size);
234
235 // ...where we first store the value...
236 bx.store(val, llscratch, scratch_align);
237
238 // ...and then memcpy it to the intended destination.
239 bx.memcpy(
240 dst.llval,
241 self.layout.align.abi,
242 llscratch,
243 scratch_align,
244 bx.const_usize(self.layout.size.bytes()),
245 MemFlags::empty()
246 );
247
248 bx.lifetime_end(llscratch, scratch_size);
249 }
250 } else {
251 OperandValue::Immediate(val).store(bx, dst);
252 }
253 }
254
255 fn store_fn_arg(
256 &self,
257 bx: &mut Builder<'a, 'll, 'tcx>,
258 idx: &mut usize,
259 dst: PlaceRef<'tcx, &'ll Value>,
260 ) {
261 let mut next = || {
262 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
263 *idx += 1;
264 val
265 };
266 match self.mode {
267 PassMode::Ignore => {}
268 PassMode::Pair(..) => {
269 OperandValue::Pair(next(), next()).store(bx, dst);
270 }
271 PassMode::Indirect(_, Some(_)) => {
272 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
273 }
274 PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
275 let next_arg = next();
276 self.store(bx, next_arg, dst);
277 }
278 }
279 }
280 }
281
282 impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
283 fn store_fn_arg(
284 &mut self,
285 ty: &ArgType<'tcx, Ty<'tcx>>,
286 idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
287 ) {
288 ty.store_fn_arg(self, idx, dst)
289 }
290 fn store_arg_ty(
291 &mut self,
292 ty: &ArgType<'tcx, Ty<'tcx>>,
293 val: &'ll Value,
294 dst: PlaceRef<'tcx, &'ll Value>
295 ) {
296 ty.store(self, val, dst)
297 }
298 fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
299 ty.memory_ty(self)
300 }
301 }
302
303 pub trait FnTypeLlvmExt<'tcx> {
304 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
305 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
306 fn llvm_cconv(&self) -> llvm::CallConv;
307 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
308 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
309 }
310
311 impl<'tcx> FnTypeLlvmExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
312 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
313 let args_capacity: usize = self.args.iter().map(|arg|
314 if arg.pad.is_some() { 1 } else { 0 } +
315 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
316 ).sum();
317 let mut llargument_tys = Vec::with_capacity(
318 if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
319 );
320
321 let llreturn_ty = match self.ret.mode {
322 PassMode::Ignore => cx.type_void(),
323 PassMode::Direct(_) | PassMode::Pair(..) => {
324 self.ret.layout.immediate_llvm_type(cx)
325 }
326 PassMode::Cast(cast) => cast.llvm_type(cx),
327 PassMode::Indirect(..) => {
328 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
329 cx.type_void()
330 }
331 };
332
333 for arg in &self.args {
334 // add padding
335 if let Some(ty) = arg.pad {
336 llargument_tys.push(ty.llvm_type(cx));
337 }
338
339 let llarg_ty = match arg.mode {
340 PassMode::Ignore => continue,
341 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
342 PassMode::Pair(..) => {
343 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
344 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
345 continue;
346 }
347 PassMode::Indirect(_, Some(_)) => {
348 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
349 let ptr_layout = cx.layout_of(ptr_ty);
350 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
351 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
352 continue;
353 }
354 PassMode::Cast(cast) => cast.llvm_type(cx),
355 PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
356 };
357 llargument_tys.push(llarg_ty);
358 }
359
360 if self.c_variadic {
361 cx.type_variadic_func(&llargument_tys, llreturn_ty)
362 } else {
363 cx.type_func(&llargument_tys, llreturn_ty)
364 }
365 }
366
367 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
368 unsafe {
369 llvm::LLVMPointerType(self.llvm_type(cx),
370 cx.data_layout().instruction_address_space as c_uint)
371 }
372 }
373
374 fn llvm_cconv(&self) -> llvm::CallConv {
375 match self.conv {
376 Conv::C => llvm::CCallConv,
377 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
378 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
379 Conv::Msp430Intr => llvm::Msp430Intr,
380 Conv::PtxKernel => llvm::PtxKernel,
381 Conv::X86Fastcall => llvm::X86FastcallCallConv,
382 Conv::X86Intr => llvm::X86_Intr,
383 Conv::X86Stdcall => llvm::X86StdcallCallConv,
384 Conv::X86ThisCall => llvm::X86_ThisCall,
385 Conv::X86VectorCall => llvm::X86_VectorCall,
386 Conv::X86_64SysV => llvm::X86_64_SysV,
387 Conv::X86_64Win64 => llvm::X86_64_Win64,
388 }
389 }
390
391 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
392 let mut i = 0;
393 let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
394 attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
395 i += 1;
396 };
397 match self.ret.mode {
398 PassMode::Direct(ref attrs) => {
399 attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
400 }
401 PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(cx))),
402 _ => {}
403 }
404 for arg in &self.args {
405 if arg.pad.is_some() {
406 apply(&ArgAttributes::new(), None);
407 }
408 match arg.mode {
409 PassMode::Ignore => {}
410 PassMode::Direct(ref attrs) |
411 PassMode::Indirect(ref attrs, None) => apply(attrs, Some(arg.layout.llvm_type(cx))),
412 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
413 apply(attrs, None);
414 apply(extra_attrs, None);
415 }
416 PassMode::Pair(ref a, ref b) => {
417 apply(a, None);
418 apply(b, None);
419 }
420 PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
421 }
422 }
423 }
424
425 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
426 let mut i = 0;
427 let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
428 attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
429 i += 1;
430 };
431 match self.ret.mode {
432 PassMode::Direct(ref attrs) => {
433 attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
434 }
435 PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(bx))),
436 _ => {}
437 }
438 if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
439 // If the value is a boolean, the range is 0..2 and that ultimately
440 // become 0..0 when the type becomes i1, which would be rejected
441 // by the LLVM verifier.
442 if let layout::Int(..) = scalar.value {
443 if !scalar.is_bool() {
444 let range = scalar.valid_range_exclusive(bx);
445 if range.start != range.end {
446 bx.range_metadata(callsite, range);
447 }
448 }
449 }
450 }
451 for arg in &self.args {
452 if arg.pad.is_some() {
453 apply(&ArgAttributes::new(), None);
454 }
455 match arg.mode {
456 PassMode::Ignore => {}
457 PassMode::Direct(ref attrs) |
458 PassMode::Indirect(ref attrs, None) => apply(attrs, Some(arg.layout.llvm_type(bx))),
459 PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
460 apply(attrs, None);
461 apply(extra_attrs, None);
462 }
463 PassMode::Pair(ref a, ref b) => {
464 apply(a, None);
465 apply(b, None);
466 }
467 PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
468 }
469 }
470
471 let cconv = self.llvm_cconv();
472 if cconv != llvm::CCallConv {
473 llvm::SetInstructionCallConv(callsite, cconv);
474 }
475 }
476 }
477
478 impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
479 fn apply_attrs_callsite(
480 &mut self,
481 ty: &FnType<'tcx, Ty<'tcx>>,
482 callsite: Self::Value
483 ) {
484 ty.apply_attrs_callsite(self, callsite)
485 }
486
487 fn get_param(&self, index: usize) -> Self::Value {
488 llvm::get_param(self.llfn(), index as c_uint)
489 }
490 }