]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_codegen_llvm/src/abi.rs
New upstream version 1.52.0~beta.3+dfsg1
[rustc.git] / compiler / rustc_codegen_llvm / src / abi.rs
1 use crate::builder::Builder;
2 use crate::context::CodegenCx;
3 use crate::llvm::{self, AttributePlace};
4 use crate::type_::Type;
5 use crate::type_of::LayoutLlvmExt;
6 use crate::value::Value;
7
8 use rustc_codegen_ssa::mir::operand::OperandValue;
9 use rustc_codegen_ssa::mir::place::PlaceRef;
10 use rustc_codegen_ssa::traits::*;
11 use rustc_codegen_ssa::MemFlags;
12 use rustc_middle::bug;
13 pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
14 use rustc_middle::ty::Ty;
15 use rustc_target::abi::call::ArgAbi;
16 pub use rustc_target::abi::call::*;
17 use rustc_target::abi::{self, HasDataLayout, Int, LayoutOf};
18 pub use rustc_target::spec::abi::Abi;
19
20 use libc::c_uint;
21
22 macro_rules! for_each_kind {
23 ($flags: ident, $f: ident, $($kind: ident),+) => ({
24 $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
25 })
26 }
27
28 trait ArgAttributeExt {
29 fn for_each_kind<F>(&self, f: F)
30 where
31 F: FnMut(llvm::Attribute);
32 }
33
34 impl ArgAttributeExt for ArgAttribute {
35 fn for_each_kind<F>(&self, mut f: F)
36 where
37 F: FnMut(llvm::Attribute),
38 {
39 for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, InReg)
40 }
41 }
42
43 pub trait ArgAttributesExt {
44 fn apply_attrs_to_llfn(&self, idx: AttributePlace, llfn: &Value);
45 fn apply_attrs_to_callsite(&self, idx: AttributePlace, callsite: &Value);
46 }
47
48 impl ArgAttributesExt for ArgAttributes {
49 fn apply_attrs_to_llfn(&self, idx: AttributePlace, llfn: &Value) {
50 let mut regular = self.regular;
51 unsafe {
52 let deref = self.pointee_size.bytes();
53 if deref != 0 {
54 if regular.contains(ArgAttribute::NonNull) {
55 llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref);
56 } else {
57 llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref);
58 }
59 regular -= ArgAttribute::NonNull;
60 }
61 if let Some(align) = self.pointee_align {
62 llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
63 }
64 regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
65 match self.arg_ext {
66 ArgExtension::None => {}
67 ArgExtension::Zext => {
68 llvm::Attribute::ZExt.apply_llfn(idx, llfn);
69 }
70 ArgExtension::Sext => {
71 llvm::Attribute::SExt.apply_llfn(idx, llfn);
72 }
73 }
74 }
75 }
76
77 fn apply_attrs_to_callsite(&self, idx: AttributePlace, callsite: &Value) {
78 let mut regular = self.regular;
79 unsafe {
80 let deref = self.pointee_size.bytes();
81 if deref != 0 {
82 if regular.contains(ArgAttribute::NonNull) {
83 llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref);
84 } else {
85 llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(
86 callsite,
87 idx.as_uint(),
88 deref,
89 );
90 }
91 regular -= ArgAttribute::NonNull;
92 }
93 if let Some(align) = self.pointee_align {
94 llvm::LLVMRustAddAlignmentCallSiteAttr(
95 callsite,
96 idx.as_uint(),
97 align.bytes() as u32,
98 );
99 }
100 regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
101 match self.arg_ext {
102 ArgExtension::None => {}
103 ArgExtension::Zext => {
104 llvm::Attribute::ZExt.apply_callsite(idx, callsite);
105 }
106 ArgExtension::Sext => {
107 llvm::Attribute::SExt.apply_callsite(idx, callsite);
108 }
109 }
110 }
111 }
112 }
113
114 pub trait LlvmType {
115 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
116 }
117
118 impl LlvmType for Reg {
119 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
120 match self.kind {
121 RegKind::Integer => cx.type_ix(self.size.bits()),
122 RegKind::Float => match self.size.bits() {
123 32 => cx.type_f32(),
124 64 => cx.type_f64(),
125 _ => bug!("unsupported float: {:?}", self),
126 },
127 RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
128 }
129 }
130 }
131
132 impl LlvmType for CastTarget {
133 fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
134 let rest_ll_unit = self.rest.unit.llvm_type(cx);
135 let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
136 (0, 0)
137 } else {
138 (
139 self.rest.total.bytes() / self.rest.unit.size.bytes(),
140 self.rest.total.bytes() % self.rest.unit.size.bytes(),
141 )
142 };
143
144 if self.prefix.iter().all(|x| x.is_none()) {
145 // Simplify to a single unit when there is no prefix and size <= unit size
146 if self.rest.total <= self.rest.unit.size {
147 return rest_ll_unit;
148 }
149
150 // Simplify to array when all chunks are the same size and type
151 if rem_bytes == 0 {
152 return cx.type_array(rest_ll_unit, rest_count);
153 }
154 }
155
156 // Create list of fields in the main structure
157 let mut args: Vec<_> = self
158 .prefix
159 .iter()
160 .flat_map(|option_kind| {
161 option_kind.map(|kind| Reg { kind, size: self.prefix_chunk_size }.llvm_type(cx))
162 })
163 .chain((0..rest_count).map(|_| rest_ll_unit))
164 .collect();
165
166 // Append final integer
167 if rem_bytes != 0 {
168 // Only integers can be really split further.
169 assert_eq!(self.rest.unit.kind, RegKind::Integer);
170 args.push(cx.type_ix(rem_bytes * 8));
171 }
172
173 cx.type_struct(&args, false)
174 }
175 }
176
177 pub trait ArgAbiExt<'ll, 'tcx> {
178 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
179 fn store(
180 &self,
181 bx: &mut Builder<'_, 'll, 'tcx>,
182 val: &'ll Value,
183 dst: PlaceRef<'tcx, &'ll Value>,
184 );
185 fn store_fn_arg(
186 &self,
187 bx: &mut Builder<'_, 'll, 'tcx>,
188 idx: &mut usize,
189 dst: PlaceRef<'tcx, &'ll Value>,
190 );
191 }
192
193 impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
194 /// Gets the LLVM type for a place of the original Rust type of
195 /// this argument/return, i.e., the result of `type_of::type_of`.
196 fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
197 self.layout.llvm_type(cx)
198 }
199
200 /// Stores a direct/indirect value described by this ArgAbi into a
201 /// place for the original Rust type of this argument/return.
202 /// Can be used for both storing formal arguments into Rust variables
203 /// or results of call/invoke instructions into their destinations.
204 fn store(
205 &self,
206 bx: &mut Builder<'_, 'll, 'tcx>,
207 val: &'ll Value,
208 dst: PlaceRef<'tcx, &'ll Value>,
209 ) {
210 if self.is_ignore() {
211 return;
212 }
213 if self.is_sized_indirect() {
214 OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
215 } else if self.is_unsized_indirect() {
216 bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
217 } else if let PassMode::Cast(cast) = self.mode {
218 // FIXME(eddyb): Figure out when the simpler Store is safe, clang
219 // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
220 let can_store_through_cast_ptr = false;
221 if can_store_through_cast_ptr {
222 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
223 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
224 bx.store(val, cast_dst, self.layout.align.abi);
225 } else {
226 // The actual return type is a struct, but the ABI
227 // adaptation code has cast it into some scalar type. The
228 // code that follows is the only reliable way I have
229 // found to do a transform like i64 -> {i32,i32}.
230 // Basically we dump the data onto the stack then memcpy it.
231 //
232 // Other approaches I tried:
233 // - Casting rust ret pointer to the foreign type and using Store
234 // is (a) unsafe if size of foreign type > size of rust type and
235 // (b) runs afoul of strict aliasing rules, yielding invalid
236 // assembly under -O (specifically, the store gets removed).
237 // - Truncating foreign type to correct integral type and then
238 // bitcasting to the struct type yields invalid cast errors.
239
240 // We instead thus allocate some scratch space...
241 let scratch_size = cast.size(bx);
242 let scratch_align = cast.align(bx);
243 let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
244 bx.lifetime_start(llscratch, scratch_size);
245
246 // ... where we first store the value...
247 bx.store(val, llscratch, scratch_align);
248
249 // ... and then memcpy it to the intended destination.
250 bx.memcpy(
251 dst.llval,
252 self.layout.align.abi,
253 llscratch,
254 scratch_align,
255 bx.const_usize(self.layout.size.bytes()),
256 MemFlags::empty(),
257 );
258
259 bx.lifetime_end(llscratch, scratch_size);
260 }
261 } else {
262 OperandValue::Immediate(val).store(bx, dst);
263 }
264 }
265
266 fn store_fn_arg(
267 &self,
268 bx: &mut Builder<'a, 'll, 'tcx>,
269 idx: &mut usize,
270 dst: PlaceRef<'tcx, &'ll Value>,
271 ) {
272 let mut next = || {
273 let val = llvm::get_param(bx.llfn(), *idx as c_uint);
274 *idx += 1;
275 val
276 };
277 match self.mode {
278 PassMode::Ignore => {}
279 PassMode::Pair(..) => {
280 OperandValue::Pair(next(), next()).store(bx, dst);
281 }
282 PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
283 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
284 }
285 PassMode::Direct(_)
286 | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
287 | PassMode::Cast(_) => {
288 let next_arg = next();
289 self.store(bx, next_arg, dst);
290 }
291 }
292 }
293 }
294
295 impl ArgAbiMethods<'tcx> for Builder<'a, 'll, 'tcx> {
296 fn store_fn_arg(
297 &mut self,
298 arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
299 idx: &mut usize,
300 dst: PlaceRef<'tcx, Self::Value>,
301 ) {
302 arg_abi.store_fn_arg(self, idx, dst)
303 }
304 fn store_arg(
305 &mut self,
306 arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
307 val: &'ll Value,
308 dst: PlaceRef<'tcx, &'ll Value>,
309 ) {
310 arg_abi.store(self, val, dst)
311 }
312 fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
313 arg_abi.memory_ty(self)
314 }
315 }
316
317 pub trait FnAbiLlvmExt<'tcx> {
318 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
319 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
320 fn llvm_cconv(&self) -> llvm::CallConv;
321 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
322 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
323 }
324
325 impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
326 fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
327 let args_capacity: usize = self.args.iter().map(|arg|
328 if arg.pad.is_some() { 1 } else { 0 } +
329 if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
330 ).sum();
331 let mut llargument_tys = Vec::with_capacity(
332 if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
333 );
334
335 let llreturn_ty = match self.ret.mode {
336 PassMode::Ignore => cx.type_void(),
337 PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
338 PassMode::Cast(cast) => cast.llvm_type(cx),
339 PassMode::Indirect { .. } => {
340 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
341 cx.type_void()
342 }
343 };
344
345 for arg in &self.args {
346 // add padding
347 if let Some(ty) = arg.pad {
348 llargument_tys.push(ty.llvm_type(cx));
349 }
350
351 let llarg_ty = match arg.mode {
352 PassMode::Ignore => continue,
353 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
354 PassMode::Pair(..) => {
355 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
356 llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
357 continue;
358 }
359 PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
360 let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
361 let ptr_layout = cx.layout_of(ptr_ty);
362 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
363 llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
364 continue;
365 }
366 PassMode::Cast(cast) => cast.llvm_type(cx),
367 PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
368 cx.type_ptr_to(arg.memory_ty(cx))
369 }
370 };
371 llargument_tys.push(llarg_ty);
372 }
373
374 if self.c_variadic {
375 cx.type_variadic_func(&llargument_tys, llreturn_ty)
376 } else {
377 cx.type_func(&llargument_tys, llreturn_ty)
378 }
379 }
380
381 fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
382 unsafe {
383 llvm::LLVMPointerType(
384 self.llvm_type(cx),
385 cx.data_layout().instruction_address_space.0 as c_uint,
386 )
387 }
388 }
389
390 fn llvm_cconv(&self) -> llvm::CallConv {
391 match self.conv {
392 Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
393 Conv::AmdGpuKernel => llvm::AmdGpuKernel,
394 Conv::AvrInterrupt => llvm::AvrInterrupt,
395 Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
396 Conv::ArmAapcs => llvm::ArmAapcsCallConv,
397 Conv::Msp430Intr => llvm::Msp430Intr,
398 Conv::PtxKernel => llvm::PtxKernel,
399 Conv::X86Fastcall => llvm::X86FastcallCallConv,
400 Conv::X86Intr => llvm::X86_Intr,
401 Conv::X86Stdcall => llvm::X86StdcallCallConv,
402 Conv::X86ThisCall => llvm::X86_ThisCall,
403 Conv::X86VectorCall => llvm::X86_VectorCall,
404 Conv::X86_64SysV => llvm::X86_64_SysV,
405 Conv::X86_64Win64 => llvm::X86_64_Win64,
406 }
407 }
408
409 fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
410 // FIXME(eddyb) can this also be applied to callsites?
411 if self.ret.layout.abi.is_uninhabited() {
412 llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
413 }
414
415 // FIXME(eddyb, wesleywiser): apply this to callsites as well?
416 if !self.can_unwind {
417 llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
418 }
419
420 let mut i = 0;
421 let mut apply = |attrs: &ArgAttributes| {
422 attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), llfn);
423 i += 1;
424 i - 1
425 };
426 match self.ret.mode {
427 PassMode::Direct(ref attrs) => {
428 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, llfn);
429 }
430 PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
431 assert!(!on_stack);
432 let i = apply(attrs);
433 unsafe {
434 llvm::LLVMRustAddStructRetAttr(
435 llfn,
436 llvm::AttributePlace::Argument(i).as_uint(),
437 self.ret.layout.llvm_type(cx),
438 );
439 }
440 }
441 _ => {}
442 }
443 for arg in &self.args {
444 if arg.pad.is_some() {
445 apply(&ArgAttributes::new());
446 }
447 match arg.mode {
448 PassMode::Ignore => {}
449 PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
450 let i = apply(attrs);
451 unsafe {
452 llvm::LLVMRustAddByValAttr(
453 llfn,
454 llvm::AttributePlace::Argument(i).as_uint(),
455 arg.layout.llvm_type(cx),
456 );
457 }
458 }
459 PassMode::Direct(ref attrs)
460 | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
461 apply(attrs);
462 }
463 PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
464 assert!(!on_stack);
465 apply(attrs);
466 apply(extra_attrs);
467 }
468 PassMode::Pair(ref a, ref b) => {
469 apply(a);
470 apply(b);
471 }
472 PassMode::Cast(_) => {
473 apply(&ArgAttributes::new());
474 }
475 }
476 }
477 }
478
479 fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
480 // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
481
482 let mut i = 0;
483 let mut apply = |attrs: &ArgAttributes| {
484 attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), callsite);
485 i += 1;
486 i - 1
487 };
488 match self.ret.mode {
489 PassMode::Direct(ref attrs) => {
490 attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, callsite);
491 }
492 PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
493 assert!(!on_stack);
494 let i = apply(attrs);
495 unsafe {
496 llvm::LLVMRustAddStructRetCallSiteAttr(
497 callsite,
498 llvm::AttributePlace::Argument(i).as_uint(),
499 self.ret.layout.llvm_type(bx),
500 );
501 }
502 }
503 _ => {}
504 }
505 if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
506 // If the value is a boolean, the range is 0..2 and that ultimately
507 // become 0..0 when the type becomes i1, which would be rejected
508 // by the LLVM verifier.
509 if let Int(..) = scalar.value {
510 if !scalar.is_bool() {
511 let range = scalar.valid_range_exclusive(bx);
512 if range.start != range.end {
513 bx.range_metadata(callsite, range);
514 }
515 }
516 }
517 }
518 for arg in &self.args {
519 if arg.pad.is_some() {
520 apply(&ArgAttributes::new());
521 }
522 match arg.mode {
523 PassMode::Ignore => {}
524 PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
525 let i = apply(attrs);
526 unsafe {
527 llvm::LLVMRustAddByValCallSiteAttr(
528 callsite,
529 llvm::AttributePlace::Argument(i).as_uint(),
530 arg.layout.llvm_type(bx),
531 );
532 }
533 }
534 PassMode::Direct(ref attrs)
535 | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
536 apply(attrs);
537 }
538 PassMode::Indirect {
539 ref attrs,
540 extra_attrs: Some(ref extra_attrs),
541 on_stack: _,
542 } => {
543 apply(attrs);
544 apply(extra_attrs);
545 }
546 PassMode::Pair(ref a, ref b) => {
547 apply(a);
548 apply(b);
549 }
550 PassMode::Cast(_) => {
551 apply(&ArgAttributes::new());
552 }
553 }
554 }
555
556 let cconv = self.llvm_cconv();
557 if cconv != llvm::CCallConv {
558 llvm::SetInstructionCallConv(callsite, cconv);
559 }
560
561 if self.conv == Conv::CCmseNonSecureCall {
562 // This will probably get ignored on all targets but those supporting the TrustZone-M
563 // extension (thumbv8m targets).
564 unsafe {
565 llvm::AddCallSiteAttrString(
566 callsite,
567 llvm::AttributePlace::Function,
568 cstr::cstr!("cmse_nonsecure_call"),
569 );
570 }
571 }
572 }
573 }
574
575 impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
576 fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
577 fn_abi.apply_attrs_callsite(self, callsite)
578 }
579
580 fn get_param(&self, index: usize) -> Self::Value {
581 llvm::get_param(self.llfn(), index as c_uint)
582 }
583 }