]>
Commit | Line | Data |
---|---|---|
1 | use crate::attributes; | |
2 | use crate::builder::Builder; | |
3 | use crate::context::CodegenCx; | |
4 | use crate::llvm::{self, Attribute, AttributePlace}; | |
5 | use crate::type_::Type; | |
6 | use crate::type_of::LayoutLlvmExt; | |
7 | use crate::value::Value; | |
8 | ||
9 | use rustc_codegen_ssa::mir::operand::OperandValue; | |
10 | use rustc_codegen_ssa::mir::place::PlaceRef; | |
11 | use rustc_codegen_ssa::traits::*; | |
12 | use rustc_codegen_ssa::MemFlags; | |
13 | use rustc_middle::bug; | |
14 | use rustc_middle::ty::layout::LayoutOf; | |
15 | pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; | |
16 | use rustc_middle::ty::Ty; | |
17 | use rustc_session::config; | |
18 | use rustc_target::abi::call::ArgAbi; | |
19 | pub use rustc_target::abi::call::*; | |
20 | use rustc_target::abi::{self, HasDataLayout, Int}; | |
21 | pub use rustc_target::spec::abi::Abi; | |
22 | use rustc_target::spec::SanitizerSet; | |
23 | ||
24 | use libc::c_uint; | |
25 | use smallvec::SmallVec; | |
26 | ||
27 | pub trait ArgAttributesExt { | |
28 | fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value); | |
29 | fn apply_attrs_to_callsite( | |
30 | &self, | |
31 | idx: AttributePlace, | |
32 | cx: &CodegenCx<'_, '_>, | |
33 | callsite: &Value, | |
34 | ); | |
35 | } | |
36 | ||
37 | const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] = | |
38 | [(ArgAttribute::InReg, llvm::AttributeKind::InReg)]; | |
39 | ||
40 | const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [ | |
41 | (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias), | |
42 | (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture), | |
43 | (ArgAttribute::NonNull, llvm::AttributeKind::NonNull), | |
44 | (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly), | |
45 | (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef), | |
46 | ]; | |
47 | ||
48 | fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> { | |
49 | let mut regular = this.regular; | |
50 | ||
51 | let mut attrs = SmallVec::new(); | |
52 | ||
53 | // ABI-affecting attributes must always be applied | |
54 | for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES { | |
55 | if regular.contains(attr) { | |
56 | attrs.push(llattr.create_attr(cx.llcx)); | |
57 | } | |
58 | } | |
59 | if let Some(align) = this.pointee_align { | |
60 | attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes())); | |
61 | } | |
62 | match this.arg_ext { | |
63 | ArgExtension::None => {} | |
64 | ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)), | |
65 | ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)), | |
66 | } | |
67 | ||
68 | // Only apply remaining attributes when optimizing | |
69 | if cx.sess().opts.optimize != config::OptLevel::No { | |
70 | let deref = this.pointee_size.bytes(); | |
71 | if deref != 0 { | |
72 | if regular.contains(ArgAttribute::NonNull) { | |
73 | attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref)); | |
74 | } else { | |
75 | attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref)); | |
76 | } | |
77 | regular -= ArgAttribute::NonNull; | |
78 | } | |
79 | for (attr, llattr) in OPTIMIZATION_ATTRIBUTES { | |
80 | if regular.contains(attr) { | |
81 | attrs.push(llattr.create_attr(cx.llcx)); | |
82 | } | |
83 | } | |
84 | } else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) { | |
85 | // If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects | |
86 | // memory sanitizer's behavior. | |
87 | ||
88 | if regular.contains(ArgAttribute::NoUndef) { | |
89 | attrs.push(llvm::AttributeKind::NoUndef.create_attr(cx.llcx)); | |
90 | } | |
91 | } | |
92 | ||
93 | attrs | |
94 | } | |
95 | ||
96 | impl ArgAttributesExt for ArgAttributes { | |
97 | fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) { | |
98 | let attrs = get_attrs(self, cx); | |
99 | attributes::apply_to_llfn(llfn, idx, &attrs); | |
100 | } | |
101 | ||
102 | fn apply_attrs_to_callsite( | |
103 | &self, | |
104 | idx: AttributePlace, | |
105 | cx: &CodegenCx<'_, '_>, | |
106 | callsite: &Value, | |
107 | ) { | |
108 | let attrs = get_attrs(self, cx); | |
109 | attributes::apply_to_callsite(callsite, idx, &attrs); | |
110 | } | |
111 | } | |
112 | ||
113 | pub trait LlvmType { | |
114 | fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type; | |
115 | } | |
116 | ||
117 | impl LlvmType for Reg { | |
118 | fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { | |
119 | match self.kind { | |
120 | RegKind::Integer => cx.type_ix(self.size.bits()), | |
121 | RegKind::Float => match self.size.bits() { | |
122 | 32 => cx.type_f32(), | |
123 | 64 => cx.type_f64(), | |
124 | _ => bug!("unsupported float: {:?}", self), | |
125 | }, | |
126 | RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()), | |
127 | } | |
128 | } | |
129 | } | |
130 | ||
131 | impl LlvmType for CastTarget { | |
132 | fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { | |
133 | let rest_ll_unit = self.rest.unit.llvm_type(cx); | |
134 | let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 { | |
135 | (0, 0) | |
136 | } else { | |
137 | ( | |
138 | self.rest.total.bytes() / self.rest.unit.size.bytes(), | |
139 | self.rest.total.bytes() % self.rest.unit.size.bytes(), | |
140 | ) | |
141 | }; | |
142 | ||
143 | if self.prefix.iter().all(|x| x.is_none()) { | |
144 | // Simplify to a single unit when there is no prefix and size <= unit size | |
145 | if self.rest.total <= self.rest.unit.size { | |
146 | return rest_ll_unit; | |
147 | } | |
148 | ||
149 | // Simplify to array when all chunks are the same size and type | |
150 | if rem_bytes == 0 { | |
151 | return cx.type_array(rest_ll_unit, rest_count); | |
152 | } | |
153 | } | |
154 | ||
155 | // Create list of fields in the main structure | |
156 | let mut args: Vec<_> = self | |
157 | .prefix | |
158 | .iter() | |
159 | .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx))) | |
160 | .chain((0..rest_count).map(|_| rest_ll_unit)) | |
161 | .collect(); | |
162 | ||
163 | // Append final integer | |
164 | if rem_bytes != 0 { | |
165 | // Only integers can be really split further. | |
166 | assert_eq!(self.rest.unit.kind, RegKind::Integer); | |
167 | args.push(cx.type_ix(rem_bytes * 8)); | |
168 | } | |
169 | ||
170 | cx.type_struct(&args, false) | |
171 | } | |
172 | } | |
173 | ||
174 | pub trait ArgAbiExt<'ll, 'tcx> { | |
175 | fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; | |
176 | fn store( | |
177 | &self, | |
178 | bx: &mut Builder<'_, 'll, 'tcx>, | |
179 | val: &'ll Value, | |
180 | dst: PlaceRef<'tcx, &'ll Value>, | |
181 | ); | |
182 | fn store_fn_arg( | |
183 | &self, | |
184 | bx: &mut Builder<'_, 'll, 'tcx>, | |
185 | idx: &mut usize, | |
186 | dst: PlaceRef<'tcx, &'ll Value>, | |
187 | ); | |
188 | } | |
189 | ||
190 | impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { | |
191 | /// Gets the LLVM type for a place of the original Rust type of | |
192 | /// this argument/return, i.e., the result of `type_of::type_of`. | |
193 | fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { | |
194 | self.layout.llvm_type(cx) | |
195 | } | |
196 | ||
197 | /// Stores a direct/indirect value described by this ArgAbi into a | |
198 | /// place for the original Rust type of this argument/return. | |
199 | /// Can be used for both storing formal arguments into Rust variables | |
200 | /// or results of call/invoke instructions into their destinations. | |
201 | fn store( | |
202 | &self, | |
203 | bx: &mut Builder<'_, 'll, 'tcx>, | |
204 | val: &'ll Value, | |
205 | dst: PlaceRef<'tcx, &'ll Value>, | |
206 | ) { | |
207 | if self.is_ignore() { | |
208 | return; | |
209 | } | |
210 | if self.is_sized_indirect() { | |
211 | OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) | |
212 | } else if self.is_unsized_indirect() { | |
213 | bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); | |
214 | } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode { | |
215 | // FIXME(eddyb): Figure out when the simpler Store is safe, clang | |
216 | // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. | |
217 | let can_store_through_cast_ptr = false; | |
218 | if can_store_through_cast_ptr { | |
219 | bx.store(val, dst.llval, self.layout.align.abi); | |
220 | } else { | |
221 | // The actual return type is a struct, but the ABI | |
222 | // adaptation code has cast it into some scalar type. The | |
223 | // code that follows is the only reliable way I have | |
224 | // found to do a transform like i64 -> {i32,i32}. | |
225 | // Basically we dump the data onto the stack then memcpy it. | |
226 | // | |
227 | // Other approaches I tried: | |
228 | // - Casting rust ret pointer to the foreign type and using Store | |
229 | // is (a) unsafe if size of foreign type > size of rust type and | |
230 | // (b) runs afoul of strict aliasing rules, yielding invalid | |
231 | // assembly under -O (specifically, the store gets removed). | |
232 | // - Truncating foreign type to correct integral type and then | |
233 | // bitcasting to the struct type yields invalid cast errors. | |
234 | ||
235 | // We instead thus allocate some scratch space... | |
236 | let scratch_size = cast.size(bx); | |
237 | let scratch_align = cast.align(bx); | |
238 | let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); | |
239 | bx.lifetime_start(llscratch, scratch_size); | |
240 | ||
241 | // ... where we first store the value... | |
242 | bx.store(val, llscratch, scratch_align); | |
243 | ||
244 | // ... and then memcpy it to the intended destination. | |
245 | bx.memcpy( | |
246 | dst.llval, | |
247 | self.layout.align.abi, | |
248 | llscratch, | |
249 | scratch_align, | |
250 | bx.const_usize(self.layout.size.bytes()), | |
251 | MemFlags::empty(), | |
252 | ); | |
253 | ||
254 | bx.lifetime_end(llscratch, scratch_size); | |
255 | } | |
256 | } else { | |
257 | OperandValue::Immediate(val).store(bx, dst); | |
258 | } | |
259 | } | |
260 | ||
261 | fn store_fn_arg( | |
262 | &self, | |
263 | bx: &mut Builder<'_, 'll, 'tcx>, | |
264 | idx: &mut usize, | |
265 | dst: PlaceRef<'tcx, &'ll Value>, | |
266 | ) { | |
267 | let mut next = || { | |
268 | let val = llvm::get_param(bx.llfn(), *idx as c_uint); | |
269 | *idx += 1; | |
270 | val | |
271 | }; | |
272 | match self.mode { | |
273 | PassMode::Ignore => {} | |
274 | PassMode::Pair(..) => { | |
275 | OperandValue::Pair(next(), next()).store(bx, dst); | |
276 | } | |
277 | PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { | |
278 | OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); | |
279 | } | |
280 | PassMode::Direct(_) | |
281 | | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } | |
282 | | PassMode::Cast { .. } => { | |
283 | let next_arg = next(); | |
284 | self.store(bx, next_arg, dst); | |
285 | } | |
286 | } | |
287 | } | |
288 | } | |
289 | ||
290 | impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> { | |
291 | fn store_fn_arg( | |
292 | &mut self, | |
293 | arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, | |
294 | idx: &mut usize, | |
295 | dst: PlaceRef<'tcx, Self::Value>, | |
296 | ) { | |
297 | arg_abi.store_fn_arg(self, idx, dst) | |
298 | } | |
299 | fn store_arg( | |
300 | &mut self, | |
301 | arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, | |
302 | val: &'ll Value, | |
303 | dst: PlaceRef<'tcx, &'ll Value>, | |
304 | ) { | |
305 | arg_abi.store(self, val, dst) | |
306 | } | |
307 | fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type { | |
308 | arg_abi.memory_ty(self) | |
309 | } | |
310 | } | |
311 | ||
312 | pub trait FnAbiLlvmExt<'ll, 'tcx> { | |
313 | fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; | |
314 | fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; | |
315 | fn llvm_cconv(&self) -> llvm::CallConv; | |
316 | fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value); | |
317 | fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value); | |
318 | } | |
319 | ||
320 | impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { | |
321 | fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { | |
322 | // Ignore "extra" args from the call site for C variadic functions. | |
323 | // Only the "fixed" args are part of the LLVM function signature. | |
324 | let args = | |
325 | if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args }; | |
326 | ||
327 | // This capacity calculation is approximate. | |
328 | let mut llargument_tys = Vec::with_capacity( | |
329 | self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 }, | |
330 | ); | |
331 | ||
332 | let llreturn_ty = match &self.ret.mode { | |
333 | PassMode::Ignore => cx.type_void(), | |
334 | PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), | |
335 | PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx), | |
336 | PassMode::Indirect { .. } => { | |
337 | llargument_tys.push(cx.type_ptr()); | |
338 | cx.type_void() | |
339 | } | |
340 | }; | |
341 | ||
342 | for arg in args { | |
343 | // Note that the exact number of arguments pushed here is carefully synchronized with | |
344 | // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how | |
345 | // other code then knows which LLVM argument(s) correspond to the n-th Rust argument. | |
346 | let llarg_ty = match &arg.mode { | |
347 | PassMode::Ignore => continue, | |
348 | PassMode::Direct(_) => { | |
349 | // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges), | |
350 | // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`, | |
351 | // guarnateeing that we generate ABI-compatible LLVM IR. Things get tricky for | |
352 | // aggregates... | |
353 | if matches!(arg.layout.abi, abi::Abi::Aggregate { .. }) { | |
354 | assert!( | |
355 | arg.layout.is_sized(), | |
356 | "`PassMode::Direct` for unsized type: {}", | |
357 | arg.layout.ty | |
358 | ); | |
359 | // This really shouldn't happen, since `immediate_llvm_type` will use | |
360 | // `layout.fields` to turn this Rust type into an LLVM type. This means all | |
361 | // sorts of Rust type details leak into the ABI. However wasm sadly *does* | |
362 | // currently use this mode so we have to allow it -- but we absolutely | |
363 | // shouldn't let any more targets do that. | |
364 | // (Also see <https://github.com/rust-lang/rust/issues/115666>.) | |
365 | assert!( | |
366 | matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"), | |
367 | "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}", | |
368 | arg.layout, | |
369 | ); | |
370 | } | |
371 | arg.layout.immediate_llvm_type(cx) | |
372 | } | |
373 | PassMode::Pair(..) => { | |
374 | // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges), | |
375 | // so for ScalarPair we can easily be sure that we are generating ABI-compatible | |
376 | // LLVM IR. | |
377 | assert!( | |
378 | matches!(arg.layout.abi, abi::Abi::ScalarPair(..)), | |
379 | "PassMode::Pair for type {}", | |
380 | arg.layout.ty | |
381 | ); | |
382 | llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true)); | |
383 | llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true)); | |
384 | continue; | |
385 | } | |
386 | PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack } => { | |
387 | // `Indirect` with metadata is only for unsized types, and doesn't work with | |
388 | // on-stack passing. | |
389 | assert!(arg.layout.is_unsized() && !on_stack); | |
390 | // Construct the type of a (wide) pointer to `ty`, and pass its two fields. | |
391 | // Any two ABI-compatible unsized types have the same metadata type and | |
392 | // moreover the same metadata value leads to the same dynamic size and | |
393 | // alignment, so this respects ABI compatibility. | |
394 | let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty); | |
395 | let ptr_layout = cx.layout_of(ptr_ty); | |
396 | llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true)); | |
397 | llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true)); | |
398 | continue; | |
399 | } | |
400 | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => { | |
401 | assert!(arg.layout.is_sized()); | |
402 | cx.type_ptr() | |
403 | } | |
404 | PassMode::Cast { cast, pad_i32 } => { | |
405 | // `Cast` means "transmute to `CastType`"; that only makes sense for sized types. | |
406 | assert!(arg.layout.is_sized()); | |
407 | // add padding | |
408 | if *pad_i32 { | |
409 | llargument_tys.push(Reg::i32().llvm_type(cx)); | |
410 | } | |
411 | // Compute the LLVM type we use for this function from the cast type. | |
412 | // We assume here that ABI-compatible Rust types have the same cast type. | |
413 | cast.llvm_type(cx) | |
414 | } | |
415 | }; | |
416 | llargument_tys.push(llarg_ty); | |
417 | } | |
418 | ||
419 | if self.c_variadic { | |
420 | cx.type_variadic_func(&llargument_tys, llreturn_ty) | |
421 | } else { | |
422 | cx.type_func(&llargument_tys, llreturn_ty) | |
423 | } | |
424 | } | |
425 | ||
426 | fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { | |
427 | cx.type_ptr_ext(cx.data_layout().instruction_address_space) | |
428 | } | |
429 | ||
430 | fn llvm_cconv(&self) -> llvm::CallConv { | |
431 | self.conv.into() | |
432 | } | |
433 | ||
434 | fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { | |
435 | let mut func_attrs = SmallVec::<[_; 3]>::new(); | |
436 | if self.ret.layout.abi.is_uninhabited() { | |
437 | func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx)); | |
438 | } | |
439 | if !self.can_unwind { | |
440 | func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx)); | |
441 | } | |
442 | if let Conv::RiscvInterrupt { kind } = self.conv { | |
443 | func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str())); | |
444 | } | |
445 | attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs }); | |
446 | ||
447 | let mut i = 0; | |
448 | let mut apply = |attrs: &ArgAttributes| { | |
449 | attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn); | |
450 | i += 1; | |
451 | i - 1 | |
452 | }; | |
453 | match &self.ret.mode { | |
454 | PassMode::Direct(attrs) => { | |
455 | attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); | |
456 | } | |
457 | PassMode::Indirect { attrs, meta_attrs: _, on_stack } => { | |
458 | assert!(!on_stack); | |
459 | let i = apply(attrs); | |
460 | let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx)); | |
461 | attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]); | |
462 | } | |
463 | PassMode::Cast { cast, pad_i32: _ } => { | |
464 | cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); | |
465 | } | |
466 | _ => {} | |
467 | } | |
468 | for arg in self.args.iter() { | |
469 | match &arg.mode { | |
470 | PassMode::Ignore => {} | |
471 | PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => { | |
472 | let i = apply(attrs); | |
473 | let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx)); | |
474 | attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]); | |
475 | } | |
476 | PassMode::Direct(attrs) | |
477 | | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => { | |
478 | apply(attrs); | |
479 | } | |
480 | PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => { | |
481 | assert!(!on_stack); | |
482 | apply(attrs); | |
483 | apply(meta_attrs); | |
484 | } | |
485 | PassMode::Pair(a, b) => { | |
486 | apply(a); | |
487 | apply(b); | |
488 | } | |
489 | PassMode::Cast { cast, pad_i32 } => { | |
490 | if *pad_i32 { | |
491 | apply(&ArgAttributes::new()); | |
492 | } | |
493 | apply(&cast.attrs); | |
494 | } | |
495 | } | |
496 | } | |
497 | } | |
498 | ||
499 | fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) { | |
500 | let mut func_attrs = SmallVec::<[_; 2]>::new(); | |
501 | if self.ret.layout.abi.is_uninhabited() { | |
502 | func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx)); | |
503 | } | |
504 | if !self.can_unwind { | |
505 | func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx)); | |
506 | } | |
507 | attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs }); | |
508 | ||
509 | let mut i = 0; | |
510 | let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| { | |
511 | attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite); | |
512 | i += 1; | |
513 | i - 1 | |
514 | }; | |
515 | match &self.ret.mode { | |
516 | PassMode::Direct(attrs) => { | |
517 | attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite); | |
518 | } | |
519 | PassMode::Indirect { attrs, meta_attrs: _, on_stack } => { | |
520 | assert!(!on_stack); | |
521 | let i = apply(bx.cx, attrs); | |
522 | let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx)); | |
523 | attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]); | |
524 | } | |
525 | PassMode::Cast { cast, pad_i32: _ } => { | |
526 | cast.attrs.apply_attrs_to_callsite( | |
527 | llvm::AttributePlace::ReturnValue, | |
528 | &bx.cx, | |
529 | callsite, | |
530 | ); | |
531 | } | |
532 | _ => {} | |
533 | } | |
534 | if let abi::Abi::Scalar(scalar) = self.ret.layout.abi { | |
535 | // If the value is a boolean, the range is 0..2 and that ultimately | |
536 | // become 0..0 when the type becomes i1, which would be rejected | |
537 | // by the LLVM verifier. | |
538 | if let Int(..) = scalar.primitive() { | |
539 | if !scalar.is_bool() && !scalar.is_always_valid(bx) { | |
540 | bx.range_metadata(callsite, scalar.valid_range(bx)); | |
541 | } | |
542 | } | |
543 | } | |
544 | for arg in self.args.iter() { | |
545 | match &arg.mode { | |
546 | PassMode::Ignore => {} | |
547 | PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => { | |
548 | let i = apply(bx.cx, attrs); | |
549 | let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx)); | |
550 | attributes::apply_to_callsite( | |
551 | callsite, | |
552 | llvm::AttributePlace::Argument(i), | |
553 | &[byval], | |
554 | ); | |
555 | } | |
556 | PassMode::Direct(attrs) | |
557 | | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => { | |
558 | apply(bx.cx, attrs); | |
559 | } | |
560 | PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => { | |
561 | apply(bx.cx, attrs); | |
562 | apply(bx.cx, meta_attrs); | |
563 | } | |
564 | PassMode::Pair(a, b) => { | |
565 | apply(bx.cx, a); | |
566 | apply(bx.cx, b); | |
567 | } | |
568 | PassMode::Cast { cast, pad_i32 } => { | |
569 | if *pad_i32 { | |
570 | apply(bx.cx, &ArgAttributes::new()); | |
571 | } | |
572 | apply(bx.cx, &cast.attrs); | |
573 | } | |
574 | } | |
575 | } | |
576 | ||
577 | let cconv = self.llvm_cconv(); | |
578 | if cconv != llvm::CCallConv { | |
579 | llvm::SetInstructionCallConv(callsite, cconv); | |
580 | } | |
581 | ||
582 | if self.conv == Conv::CCmseNonSecureCall { | |
583 | // This will probably get ignored on all targets but those supporting the TrustZone-M | |
584 | // extension (thumbv8m targets). | |
585 | let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call"); | |
586 | attributes::apply_to_callsite( | |
587 | callsite, | |
588 | llvm::AttributePlace::Function, | |
589 | &[cmse_nonsecure_call], | |
590 | ); | |
591 | } | |
592 | ||
593 | // Some intrinsics require that an elementtype attribute (with the pointee type of a | |
594 | // pointer argument) is added to the callsite. | |
595 | let element_type_index = unsafe { llvm::LLVMRustGetElementTypeArgIndex(callsite) }; | |
596 | if element_type_index >= 0 { | |
597 | let arg_ty = self.args[element_type_index as usize].layout.ty; | |
598 | let pointee_ty = arg_ty.builtin_deref(true).expect("Must be pointer argument").ty; | |
599 | let element_type_attr = unsafe { | |
600 | llvm::LLVMRustCreateElementTypeAttr(bx.llcx, bx.layout_of(pointee_ty).llvm_type(bx)) | |
601 | }; | |
602 | attributes::apply_to_callsite( | |
603 | callsite, | |
604 | llvm::AttributePlace::Argument(element_type_index as u32), | |
605 | &[element_type_attr], | |
606 | ); | |
607 | } | |
608 | } | |
609 | } | |
610 | ||
611 | impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { | |
612 | fn get_param(&mut self, index: usize) -> Self::Value { | |
613 | llvm::get_param(self.llfn(), index as c_uint) | |
614 | } | |
615 | } | |
616 | ||
617 | impl From<Conv> for llvm::CallConv { | |
618 | fn from(conv: Conv) -> Self { | |
619 | match conv { | |
620 | Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => { | |
621 | llvm::CCallConv | |
622 | } | |
623 | Conv::Cold => llvm::ColdCallConv, | |
624 | Conv::PreserveMost => llvm::PreserveMost, | |
625 | Conv::PreserveAll => llvm::PreserveAll, | |
626 | Conv::AmdGpuKernel => llvm::AmdGpuKernel, | |
627 | Conv::AvrInterrupt => llvm::AvrInterrupt, | |
628 | Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt, | |
629 | Conv::ArmAapcs => llvm::ArmAapcsCallConv, | |
630 | Conv::Msp430Intr => llvm::Msp430Intr, | |
631 | Conv::PtxKernel => llvm::PtxKernel, | |
632 | Conv::X86Fastcall => llvm::X86FastcallCallConv, | |
633 | Conv::X86Intr => llvm::X86_Intr, | |
634 | Conv::X86Stdcall => llvm::X86StdcallCallConv, | |
635 | Conv::X86ThisCall => llvm::X86_ThisCall, | |
636 | Conv::X86VectorCall => llvm::X86_VectorCall, | |
637 | Conv::X86_64SysV => llvm::X86_64_SysV, | |
638 | Conv::X86_64Win64 => llvm::X86_64_Win64, | |
639 | } | |
640 | } | |
641 | } |