1 use crate::common
::Funclet
;
2 use crate::context
::CodegenCx
;
3 use crate::llvm
::{self, BasicBlock, False}
;
4 use crate::llvm
::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope}
;
6 use crate::type_
::Type
;
7 use crate::type_of
::LayoutLlvmExt
;
8 use crate::value
::Value
;
10 use libc
::{c_char, c_uint}
;
11 use rustc_codegen_ssa
::common
::{IntPredicate, RealPredicate, TypeKind}
;
12 use rustc_codegen_ssa
::mir
::operand
::{OperandRef, OperandValue}
;
13 use rustc_codegen_ssa
::mir
::place
::PlaceRef
;
14 use rustc_codegen_ssa
::traits
::*;
15 use rustc_codegen_ssa
::MemFlags
;
16 use rustc_data_structures
::small_c_str
::SmallCStr
;
17 use rustc_hir
::def_id
::DefId
;
18 use rustc_middle
::ty
::layout
::TyAndLayout
;
19 use rustc_middle
::ty
::{self, Ty, TyCtxt}
;
21 use rustc_target
::abi
::{self, Align, Size}
;
22 use rustc_target
::spec
::{HasTargetSpec, Target}
;
26 use std
::ops
::{Deref, Range}
;
30 // All Builders must have an llfn associated with them
32 pub struct Builder
<'a
, 'll
, 'tcx
> {
33 pub llbuilder
: &'ll
mut llvm
::Builder
<'ll
>,
34 pub cx
: &'a CodegenCx
<'ll
, 'tcx
>,
37 impl Drop
for Builder
<'a
, 'll
, 'tcx
> {
40 llvm
::LLVMDisposeBuilder(&mut *(self.llbuilder
as *mut _
));
45 // FIXME(eddyb) use a checked constructor when they become `const fn`.
46 const EMPTY_C_STR
: &CStr
= unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") }
;
48 /// Empty string, to be used where LLVM expects an instruction name, indicating
49 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
50 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
51 const UNNAMED
: *const c_char
= EMPTY_C_STR
.as_ptr();
53 impl BackendTypes
for Builder
<'_
, 'll
, 'tcx
> {
54 type Value
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Value
;
55 type Function
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Function
;
56 type BasicBlock
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::BasicBlock
;
57 type Type
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Type
;
58 type Funclet
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Funclet
;
60 type DIScope
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::DIScope
;
61 type DILocation
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::DILocation
;
62 type DIVariable
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::DIVariable
;
65 impl abi
::HasDataLayout
for Builder
<'_
, '_
, '_
> {
66 fn data_layout(&self) -> &abi
::TargetDataLayout
{
71 impl ty
::layout
::HasTyCtxt
<'tcx
> for Builder
<'_
, '_
, 'tcx
> {
73 fn tcx(&self) -> TyCtxt
<'tcx
> {
78 impl ty
::layout
::HasParamEnv
<'tcx
> for Builder
<'_
, '_
, 'tcx
> {
79 fn param_env(&self) -> ty
::ParamEnv
<'tcx
> {
84 impl HasTargetSpec
for Builder
<'_
, '_
, 'tcx
> {
86 fn target_spec(&self) -> &Target
{
87 &self.cx
.target_spec()
91 impl abi
::LayoutOf
for Builder
<'_
, '_
, 'tcx
> {
93 type TyAndLayout
= TyAndLayout
<'tcx
>;
95 fn layout_of(&self, ty
: Ty
<'tcx
>) -> Self::TyAndLayout
{
100 impl Deref
for Builder
<'_
, 'll
, 'tcx
> {
101 type Target
= CodegenCx
<'ll
, 'tcx
>;
104 fn deref(&self) -> &Self::Target
{
109 impl HasCodegen
<'tcx
> for Builder
<'_
, 'll
, 'tcx
> {
110 type CodegenCx
= CodegenCx
<'ll
, 'tcx
>;
113 macro_rules
! builder_methods_for_value_instructions
{
114 ($
($name
:ident($
($arg
:ident
),*) => $llvm_capi
:ident
),+ $
(,)?
) => {
115 $
(fn $
name(&mut self, $
($arg
: &'ll Value
),*) -> &'ll Value
{
117 llvm
::$
llvm_capi(self.llbuilder
, $
($arg
,)* UNNAMED
)
123 impl BuilderMethods
<'a
, 'tcx
> for Builder
<'a
, 'll
, 'tcx
> {
124 fn build(cx
: &'a CodegenCx
<'ll
, 'tcx
>, llbb
: &'ll BasicBlock
) -> Self {
125 let bx
= Builder
::with_cx(cx
);
127 llvm
::LLVMPositionBuilderAtEnd(bx
.llbuilder
, llbb
);
132 fn cx(&self) -> &CodegenCx
<'ll
, 'tcx
> {
136 fn llbb(&self) -> &'ll BasicBlock
{
137 unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
140 fn set_span(&mut self, _span
: Span
) {}
142 fn append_block(cx
: &'a CodegenCx
<'ll
, 'tcx
>, llfn
: &'ll Value
, name
: &str) -> &'ll BasicBlock
{
144 let name
= SmallCStr
::new(name
);
145 llvm
::LLVMAppendBasicBlockInContext(cx
.llcx
, llfn
, name
.as_ptr())
149 fn append_sibling_block(&mut self, name
: &str) -> &'ll BasicBlock
{
150 Self::append_block(self.cx
, self.llfn(), name
)
153 fn build_sibling_block(&mut self, name
: &str) -> Self {
154 let llbb
= self.append_sibling_block(name
);
155 Self::build(self.cx
, llbb
)
158 fn ret_void(&mut self) {
160 llvm
::LLVMBuildRetVoid(self.llbuilder
);
164 fn ret(&mut self, v
: &'ll Value
) {
166 llvm
::LLVMBuildRet(self.llbuilder
, v
);
170 fn br(&mut self, dest
: &'ll BasicBlock
) {
172 llvm
::LLVMBuildBr(self.llbuilder
, dest
);
179 then_llbb
: &'ll BasicBlock
,
180 else_llbb
: &'ll BasicBlock
,
183 llvm
::LLVMBuildCondBr(self.llbuilder
, cond
, then_llbb
, else_llbb
);
190 else_llbb
: &'ll BasicBlock
,
191 cases
: impl ExactSizeIterator
<Item
= (u128
, &'ll BasicBlock
)>,
194 unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) }
;
195 for (on_val
, dest
) in cases
{
196 let on_val
= self.const_uint_big(self.val_ty(v
), on_val
);
197 unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
205 then
: &'ll BasicBlock
,
206 catch: &'ll BasicBlock
,
207 funclet
: Option
<&Funclet
<'ll
>>,
209 debug
!("invoke {:?} with args ({:?})", llfn
, args
);
211 let args
= self.check_call("invoke", llfn
, args
);
212 let bundle
= funclet
.map(|funclet
| funclet
.bundle());
213 let bundle
= bundle
.as_ref().map(|b
| &*b
.raw
);
216 llvm
::LLVMRustBuildInvoke(
220 args
.len() as c_uint
,
229 fn unreachable(&mut self) {
231 llvm
::LLVMBuildUnreachable(self.llbuilder
);
235 builder_methods_for_value_instructions
! {
236 add(a
, b
) => LLVMBuildAdd
,
237 fadd(a
, b
) => LLVMBuildFAdd
,
238 sub(a
, b
) => LLVMBuildSub
,
239 fsub(a
, b
) => LLVMBuildFSub
,
240 mul(a
, b
) => LLVMBuildMul
,
241 fmul(a
, b
) => LLVMBuildFMul
,
242 udiv(a
, b
) => LLVMBuildUDiv
,
243 exactudiv(a
, b
) => LLVMBuildExactUDiv
,
244 sdiv(a
, b
) => LLVMBuildSDiv
,
245 exactsdiv(a
, b
) => LLVMBuildExactSDiv
,
246 fdiv(a
, b
) => LLVMBuildFDiv
,
247 urem(a
, b
) => LLVMBuildURem
,
248 srem(a
, b
) => LLVMBuildSRem
,
249 frem(a
, b
) => LLVMBuildFRem
,
250 shl(a
, b
) => LLVMBuildShl
,
251 lshr(a
, b
) => LLVMBuildLShr
,
252 ashr(a
, b
) => LLVMBuildAShr
,
253 and(a
, b
) => LLVMBuildAnd
,
254 or(a
, b
) => LLVMBuildOr
,
255 xor(a
, b
) => LLVMBuildXor
,
256 neg(x
) => LLVMBuildNeg
,
257 fneg(x
) => LLVMBuildFNeg
,
258 not(x
) => LLVMBuildNot
,
259 unchecked_sadd(x
, y
) => LLVMBuildNSWAdd
,
260 unchecked_uadd(x
, y
) => LLVMBuildNUWAdd
,
261 unchecked_ssub(x
, y
) => LLVMBuildNSWSub
,
262 unchecked_usub(x
, y
) => LLVMBuildNUWSub
,
263 unchecked_smul(x
, y
) => LLVMBuildNSWMul
,
264 unchecked_umul(x
, y
) => LLVMBuildNUWMul
,
267 fn fadd_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
269 let instr
= llvm
::LLVMBuildFAdd(self.llbuilder
, lhs
, rhs
, UNNAMED
);
270 llvm
::LLVMRustSetFastMath(instr
);
275 fn fsub_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
277 let instr
= llvm
::LLVMBuildFSub(self.llbuilder
, lhs
, rhs
, UNNAMED
);
278 llvm
::LLVMRustSetFastMath(instr
);
283 fn fmul_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
285 let instr
= llvm
::LLVMBuildFMul(self.llbuilder
, lhs
, rhs
, UNNAMED
);
286 llvm
::LLVMRustSetFastMath(instr
);
291 fn fdiv_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
293 let instr
= llvm
::LLVMBuildFDiv(self.llbuilder
, lhs
, rhs
, UNNAMED
);
294 llvm
::LLVMRustSetFastMath(instr
);
299 fn frem_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
301 let instr
= llvm
::LLVMBuildFRem(self.llbuilder
, lhs
, rhs
, UNNAMED
);
302 llvm
::LLVMRustSetFastMath(instr
);
313 ) -> (Self::Value
, Self::Value
) {
314 use rustc_middle
::ty
::{Int, Uint}
;
315 use rustc_middle
::ty
::{IntTy::*, UintTy::*}
;
317 let new_kind
= match ty
.kind() {
318 Int(t @ Isize
) => Int(t
.normalize(self.tcx
.sess
.target
.pointer_width
)),
319 Uint(t @ Usize
) => Uint(t
.normalize(self.tcx
.sess
.target
.pointer_width
)),
320 t @
(Uint(_
) | Int(_
)) => t
.clone(),
321 _
=> panic
!("tried to get overflow intrinsic for op applied to non-int type"),
324 let name
= match oop
{
325 OverflowOp
::Add
=> match new_kind
{
326 Int(I8
) => "llvm.sadd.with.overflow.i8",
327 Int(I16
) => "llvm.sadd.with.overflow.i16",
328 Int(I32
) => "llvm.sadd.with.overflow.i32",
329 Int(I64
) => "llvm.sadd.with.overflow.i64",
330 Int(I128
) => "llvm.sadd.with.overflow.i128",
332 Uint(U8
) => "llvm.uadd.with.overflow.i8",
333 Uint(U16
) => "llvm.uadd.with.overflow.i16",
334 Uint(U32
) => "llvm.uadd.with.overflow.i32",
335 Uint(U64
) => "llvm.uadd.with.overflow.i64",
336 Uint(U128
) => "llvm.uadd.with.overflow.i128",
340 OverflowOp
::Sub
=> match new_kind
{
341 Int(I8
) => "llvm.ssub.with.overflow.i8",
342 Int(I16
) => "llvm.ssub.with.overflow.i16",
343 Int(I32
) => "llvm.ssub.with.overflow.i32",
344 Int(I64
) => "llvm.ssub.with.overflow.i64",
345 Int(I128
) => "llvm.ssub.with.overflow.i128",
347 Uint(U8
) => "llvm.usub.with.overflow.i8",
348 Uint(U16
) => "llvm.usub.with.overflow.i16",
349 Uint(U32
) => "llvm.usub.with.overflow.i32",
350 Uint(U64
) => "llvm.usub.with.overflow.i64",
351 Uint(U128
) => "llvm.usub.with.overflow.i128",
355 OverflowOp
::Mul
=> match new_kind
{
356 Int(I8
) => "llvm.smul.with.overflow.i8",
357 Int(I16
) => "llvm.smul.with.overflow.i16",
358 Int(I32
) => "llvm.smul.with.overflow.i32",
359 Int(I64
) => "llvm.smul.with.overflow.i64",
360 Int(I128
) => "llvm.smul.with.overflow.i128",
362 Uint(U8
) => "llvm.umul.with.overflow.i8",
363 Uint(U16
) => "llvm.umul.with.overflow.i16",
364 Uint(U32
) => "llvm.umul.with.overflow.i32",
365 Uint(U64
) => "llvm.umul.with.overflow.i64",
366 Uint(U128
) => "llvm.umul.with.overflow.i128",
372 let intrinsic
= self.get_intrinsic(&name
);
373 let res
= self.call(intrinsic
, &[lhs
, rhs
], None
);
374 (self.extract_value(res
, 0), self.extract_value(res
, 1))
377 fn from_immediate(&mut self, val
: Self::Value
) -> Self::Value
{
378 if self.cx().val_ty(val
) == self.cx().type_i1() {
379 self.zext(val
, self.cx().type_i8())
384 fn to_immediate_scalar(&mut self, val
: Self::Value
, scalar
: &abi
::Scalar
) -> Self::Value
{
385 if scalar
.is_bool() {
386 return self.trunc(val
, self.cx().type_i1());
391 fn alloca(&mut self, ty
: &'ll Type
, align
: Align
) -> &'ll Value
{
392 let mut bx
= Builder
::with_cx(self.cx
);
393 bx
.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }
);
394 bx
.dynamic_alloca(ty
, align
)
397 fn dynamic_alloca(&mut self, ty
: &'ll Type
, align
: Align
) -> &'ll Value
{
399 let alloca
= llvm
::LLVMBuildAlloca(self.llbuilder
, ty
, UNNAMED
);
400 llvm
::LLVMSetAlignment(alloca
, align
.bytes() as c_uint
);
405 fn array_alloca(&mut self, ty
: &'ll Type
, len
: &'ll Value
, align
: Align
) -> &'ll Value
{
407 let alloca
= llvm
::LLVMBuildArrayAlloca(self.llbuilder
, ty
, len
, UNNAMED
);
408 llvm
::LLVMSetAlignment(alloca
, align
.bytes() as c_uint
);
413 fn load(&mut self, ty
: &'ll Type
, ptr
: &'ll Value
, align
: Align
) -> &'ll Value
{
415 let load
= llvm
::LLVMBuildLoad2(self.llbuilder
, ty
, ptr
, UNNAMED
);
416 llvm
::LLVMSetAlignment(load
, align
.bytes() as c_uint
);
421 fn volatile_load(&mut self, ty
: &'ll Type
, ptr
: &'ll Value
) -> &'ll Value
{
423 let load
= llvm
::LLVMBuildLoad2(self.llbuilder
, ty
, ptr
, UNNAMED
);
424 llvm
::LLVMSetVolatile(load
, llvm
::True
);
433 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
437 let load
= llvm
::LLVMRustBuildAtomicLoad(
442 AtomicOrdering
::from_generic(order
),
444 // LLVM requires the alignment of atomic loads to be at least the size of the type.
445 llvm
::LLVMSetAlignment(load
, size
.bytes() as c_uint
);
450 fn load_operand(&mut self, place
: PlaceRef
<'tcx
, &'ll Value
>) -> OperandRef
<'tcx
, &'ll Value
> {
451 debug
!("PlaceRef::load: {:?}", place
);
453 assert_eq
!(place
.llextra
.is_some(), place
.layout
.is_unsized());
455 if place
.layout
.is_zst() {
456 return OperandRef
::new_zst(self, place
.layout
);
459 fn scalar_load_metadata
<'a
, 'll
, 'tcx
>(
460 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
462 scalar
: &abi
::Scalar
,
464 let vr
= scalar
.valid_range
.clone();
467 let range
= scalar
.valid_range_exclusive(bx
);
468 if range
.start
!= range
.end
{
469 bx
.range_metadata(load
, range
);
472 abi
::Pointer
if vr
.start() < vr
.end() && !vr
.contains(&0) => {
473 bx
.nonnull_metadata(load
);
479 let val
= if let Some(llextra
) = place
.llextra
{
480 OperandValue
::Ref(place
.llval
, Some(llextra
), place
.align
)
481 } else if place
.layout
.is_llvm_immediate() {
482 let mut const_llval
= None
;
484 if let Some(global
) = llvm
::LLVMIsAGlobalVariable(place
.llval
) {
485 if llvm
::LLVMIsGlobalConstant(global
) == llvm
::True
{
486 const_llval
= llvm
::LLVMGetInitializer(global
);
490 let llval
= const_llval
.unwrap_or_else(|| {
491 let load
= self.load(place
.layout
.llvm_type(self), place
.llval
, place
.align
);
492 if let abi
::Abi
::Scalar(ref scalar
) = place
.layout
.abi
{
493 scalar_load_metadata(self, load
, scalar
);
497 OperandValue
::Immediate(self.to_immediate(llval
, place
.layout
))
498 } else if let abi
::Abi
::ScalarPair(ref a
, ref b
) = place
.layout
.abi
{
499 let b_offset
= a
.value
.size(self).align_to(b
.value
.align(self).abi
);
501 let mut load
= |i
, scalar
: &abi
::Scalar
, align
| {
502 let llptr
= self.struct_gep(place
.llval
, i
as u64);
503 let llty
= place
.layout
.scalar_pair_element_llvm_type(self, i
, false);
504 let load
= self.load(llty
, llptr
, align
);
505 scalar_load_metadata(self, load
, scalar
);
506 self.to_immediate_scalar(load
, scalar
)
510 load(0, a
, place
.align
),
511 load(1, b
, place
.align
.restrict_for_offset(b_offset
)),
514 OperandValue
::Ref(place
.llval
, None
, place
.align
)
517 OperandRef { val, layout: place.layout }
520 fn write_operand_repeatedly(
522 cg_elem
: OperandRef
<'tcx
, &'ll Value
>,
524 dest
: PlaceRef
<'tcx
, &'ll Value
>,
526 let zero
= self.const_usize(0);
527 let count
= self.const_usize(count
);
528 let start
= dest
.project_index(&mut self, zero
).llval
;
529 let end
= dest
.project_index(&mut self, count
).llval
;
531 let mut header_bx
= self.build_sibling_block("repeat_loop_header");
532 let mut body_bx
= self.build_sibling_block("repeat_loop_body");
533 let next_bx
= self.build_sibling_block("repeat_loop_next");
535 self.br(header_bx
.llbb());
536 let current
= header_bx
.phi(self.val_ty(start
), &[start
], &[self.llbb()]);
538 let keep_going
= header_bx
.icmp(IntPredicate
::IntNE
, current
, end
);
539 header_bx
.cond_br(keep_going
, body_bx
.llbb(), next_bx
.llbb());
541 let align
= dest
.align
.restrict_for_offset(dest
.layout
.field(self.cx(), 0).size
);
544 .store(&mut body_bx
, PlaceRef
::new_sized_aligned(current
, cg_elem
.layout
, align
));
546 let next
= body_bx
.inbounds_gep(current
, &[self.const_usize(1)]);
547 body_bx
.br(header_bx
.llbb());
548 header_bx
.add_incoming_to_phi(current
, next
, body_bx
.llbb());
553 fn range_metadata(&mut self, load
: &'ll Value
, range
: Range
<u128
>) {
554 if self.sess().target
.arch
== "amdgpu" {
555 // amdgpu/LLVM does something weird and thinks a i64 value is
556 // split into a v2i32, halving the bitwidth LLVM expects,
557 // tripping an assertion. So, for now, just disable this
563 let llty
= self.cx
.val_ty(load
);
565 self.cx
.const_uint_big(llty
, range
.start
),
566 self.cx
.const_uint_big(llty
, range
.end
),
569 llvm
::LLVMSetMetadata(
571 llvm
::MD_range
as c_uint
,
572 llvm
::LLVMMDNodeInContext(self.cx
.llcx
, v
.as_ptr(), v
.len() as c_uint
),
577 fn nonnull_metadata(&mut self, load
: &'ll Value
) {
579 llvm
::LLVMSetMetadata(
581 llvm
::MD_nonnull
as c_uint
,
582 llvm
::LLVMMDNodeInContext(self.cx
.llcx
, ptr
::null(), 0),
587 fn store(&mut self, val
: &'ll Value
, ptr
: &'ll Value
, align
: Align
) -> &'ll Value
{
588 self.store_with_flags(val
, ptr
, align
, MemFlags
::empty())
598 debug
!("Store {:?} -> {:?} ({:?})", val
, ptr
, flags
);
599 let ptr
= self.check_store(val
, ptr
);
601 let store
= llvm
::LLVMBuildStore(self.llbuilder
, val
, ptr
);
603 if flags
.contains(MemFlags
::UNALIGNED
) { 1 }
else { align.bytes() as c_uint }
;
604 llvm
::LLVMSetAlignment(store
, align
);
605 if flags
.contains(MemFlags
::VOLATILE
) {
606 llvm
::LLVMSetVolatile(store
, llvm
::True
);
608 if flags
.contains(MemFlags
::NONTEMPORAL
) {
609 // According to LLVM [1] building a nontemporal store must
610 // *always* point to a metadata value of the integer 1.
612 // [1]: https://llvm.org/docs/LangRef.html#store-instruction
613 let one
= self.cx
.const_i32(1);
614 let node
= llvm
::LLVMMDNodeInContext(self.cx
.llcx
, &one
, 1);
615 llvm
::LLVMSetMetadata(store
, llvm
::MD_nontemporal
as c_uint
, node
);
625 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
628 debug
!("Store {:?} -> {:?}", val
, ptr
);
629 let ptr
= self.check_store(val
, ptr
);
631 let store
= llvm
::LLVMRustBuildAtomicStore(
635 AtomicOrdering
::from_generic(order
),
637 // LLVM requires the alignment of atomic stores to be at least the size of the type.
638 llvm
::LLVMSetAlignment(store
, size
.bytes() as c_uint
);
642 fn gep(&mut self, ptr
: &'ll Value
, indices
: &[&'ll Value
]) -> &'ll Value
{
648 indices
.len() as c_uint
,
654 fn inbounds_gep(&mut self, ptr
: &'ll Value
, indices
: &[&'ll Value
]) -> &'ll Value
{
656 llvm
::LLVMBuildInBoundsGEP(
660 indices
.len() as c_uint
,
666 fn struct_gep(&mut self, ptr
: &'ll Value
, idx
: u64) -> &'ll Value
{
667 assert_eq
!(idx
as c_uint
as u64, idx
);
668 unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
672 fn trunc(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
673 unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
676 fn sext(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
677 unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
680 fn fptoui_sat(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> Option
<&'ll Value
> {
681 if llvm_util
::get_version() >= (12, 0, 0) && !self.fptoint_sat_broken_in_llvm() {
682 let src_ty
= self.cx
.val_ty(val
);
683 let float_width
= self.cx
.float_width(src_ty
);
684 let int_width
= self.cx
.int_width(dest_ty
);
685 let name
= format
!("llvm.fptoui.sat.i{}.f{}", int_width
, float_width
);
686 let intrinsic
= self.get_intrinsic(&name
);
687 return Some(self.call(intrinsic
, &[val
], None
));
693 fn fptosi_sat(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> Option
<&'ll Value
> {
694 if llvm_util
::get_version() >= (12, 0, 0) && !self.fptoint_sat_broken_in_llvm() {
695 let src_ty
= self.cx
.val_ty(val
);
696 let float_width
= self.cx
.float_width(src_ty
);
697 let int_width
= self.cx
.int_width(dest_ty
);
698 let name
= format
!("llvm.fptosi.sat.i{}.f{}", int_width
, float_width
);
699 let intrinsic
= self.get_intrinsic(&name
);
700 return Some(self.call(intrinsic
, &[val
], None
));
706 fn fptoui(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
707 // On WebAssembly the `fptoui` and `fptosi` instructions currently have
708 // poor codegen. The reason for this is that the corresponding wasm
709 // instructions, `i32.trunc_f32_s` for example, will trap when the float
710 // is out-of-bounds, infinity, or nan. This means that LLVM
711 // automatically inserts control flow around `fptoui` and `fptosi`
712 // because the LLVM instruction `fptoui` is defined as producing a
713 // poison value, not having UB on out-of-bounds values.
715 // This method, however, is only used with non-saturating casts that
716 // have UB on out-of-bounds values. This means that it's ok if we use
717 // the raw wasm instruction since out-of-bounds values can do whatever
718 // we like. To ensure that LLVM picks the right instruction we choose
719 // the raw wasm intrinsic functions which avoid LLVM inserting all the
720 // other control flow automatically.
721 if self.sess().target
.arch
== "wasm32" {
722 let src_ty
= self.cx
.val_ty(val
);
723 if self.cx
.type_kind(src_ty
) != TypeKind
::Vector
{
724 let float_width
= self.cx
.float_width(src_ty
);
725 let int_width
= self.cx
.int_width(dest_ty
);
726 let name
= match (int_width
, float_width
) {
727 (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
728 (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
729 (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
730 (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
733 if let Some(name
) = name
{
734 let intrinsic
= self.get_intrinsic(name
);
735 return self.call(intrinsic
, &[val
], None
);
739 unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
742 fn fptosi(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
743 // see `fptoui` above for why wasm is different here
744 if self.sess().target
.arch
== "wasm32" {
745 let src_ty
= self.cx
.val_ty(val
);
746 if self.cx
.type_kind(src_ty
) != TypeKind
::Vector
{
747 let float_width
= self.cx
.float_width(src_ty
);
748 let int_width
= self.cx
.int_width(dest_ty
);
749 let name
= match (int_width
, float_width
) {
750 (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
751 (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
752 (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
753 (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
756 if let Some(name
) = name
{
757 let intrinsic
= self.get_intrinsic(name
);
758 return self.call(intrinsic
, &[val
], None
);
762 unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
765 fn uitofp(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
766 unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
769 fn sitofp(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
770 unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
773 fn fptrunc(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
774 unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
777 fn fpext(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
778 unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
781 fn ptrtoint(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
782 unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
785 fn inttoptr(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
786 unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
789 fn bitcast(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
790 unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
793 fn intcast(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
, is_signed
: bool
) -> &'ll Value
{
794 unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
797 fn pointercast(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
798 unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
802 fn icmp(&mut self, op
: IntPredicate
, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
803 let op
= llvm
::IntPredicate
::from_generic(op
);
804 unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
807 fn fcmp(&mut self, op
: RealPredicate
, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
808 unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
811 /* Miscellaneous instructions */
821 assert
!(!flags
.contains(MemFlags
::NONTEMPORAL
), "non-temporal memcpy not supported");
822 let size
= self.intcast(size
, self.type_isize(), false);
823 let is_volatile
= flags
.contains(MemFlags
::VOLATILE
);
824 let dst
= self.pointercast(dst
, self.type_i8p());
825 let src
= self.pointercast(src
, self.type_i8p());
827 llvm
::LLVMRustBuildMemCpy(
830 dst_align
.bytes() as c_uint
,
832 src_align
.bytes() as c_uint
,
848 assert
!(!flags
.contains(MemFlags
::NONTEMPORAL
), "non-temporal memmove not supported");
849 let size
= self.intcast(size
, self.type_isize(), false);
850 let is_volatile
= flags
.contains(MemFlags
::VOLATILE
);
851 let dst
= self.pointercast(dst
, self.type_i8p());
852 let src
= self.pointercast(src
, self.type_i8p());
854 llvm
::LLVMRustBuildMemMove(
857 dst_align
.bytes() as c_uint
,
859 src_align
.bytes() as c_uint
,
869 fill_byte
: &'ll Value
,
874 let is_volatile
= flags
.contains(MemFlags
::VOLATILE
);
875 let ptr
= self.pointercast(ptr
, self.type_i8p());
877 llvm
::LLVMRustBuildMemSet(
880 align
.bytes() as c_uint
,
891 then_val
: &'ll Value
,
892 else_val
: &'ll Value
,
894 unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
897 fn va_arg(&mut self, list
: &'ll Value
, ty
: &'ll Type
) -> &'ll Value
{
898 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
901 fn extract_element(&mut self, vec
: &'ll Value
, idx
: &'ll Value
) -> &'ll Value
{
902 unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
905 fn vector_splat(&mut self, num_elts
: usize, elt
: &'ll Value
) -> &'ll Value
{
907 let elt_ty
= self.cx
.val_ty(elt
);
908 let undef
= llvm
::LLVMGetUndef(self.type_vector(elt_ty
, num_elts
as u64));
909 let vec
= self.insert_element(undef
, elt
, self.cx
.const_i32(0));
910 let vec_i32_ty
= self.type_vector(self.type_i32(), num_elts
as u64);
911 self.shuffle_vector(vec
, undef
, self.const_null(vec_i32_ty
))
915 fn extract_value(&mut self, agg_val
: &'ll Value
, idx
: u64) -> &'ll Value
{
916 assert_eq
!(idx
as c_uint
as u64, idx
);
917 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
920 fn insert_value(&mut self, agg_val
: &'ll Value
, elt
: &'ll Value
, idx
: u64) -> &'ll Value
{
921 assert_eq
!(idx
as c_uint
as u64, idx
);
922 unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
931 // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while,
932 // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The
933 // personality lives on the parent function anyway.
934 self.set_personality_fn(pers_fn
);
936 llvm
::LLVMBuildLandingPad(self.llbuilder
, ty
, None
, num_clauses
as c_uint
, UNNAMED
)
940 fn set_cleanup(&mut self, landing_pad
: &'ll Value
) {
942 llvm
::LLVMSetCleanup(landing_pad
, llvm
::True
);
946 fn resume(&mut self, exn
: &'ll Value
) -> &'ll Value
{
947 unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
950 fn cleanup_pad(&mut self, parent
: Option
<&'ll Value
>, args
: &[&'ll Value
]) -> Funclet
<'ll
> {
951 let name
= cstr
!("cleanuppad");
953 llvm
::LLVMRustBuildCleanupPad(
956 args
.len() as c_uint
,
961 Funclet
::new(ret
.expect("LLVM does not have support for cleanuppad"))
966 funclet
: &Funclet
<'ll
>,
967 unwind
: Option
<&'ll BasicBlock
>,
970 unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }
;
971 ret
.expect("LLVM does not have support for cleanupret")
974 fn catch_pad(&mut self, parent
: &'ll Value
, args
: &[&'ll Value
]) -> Funclet
<'ll
> {
975 let name
= cstr
!("catchpad");
977 llvm
::LLVMRustBuildCatchPad(
980 args
.len() as c_uint
,
985 Funclet
::new(ret
.expect("LLVM does not have support for catchpad"))
990 parent
: Option
<&'ll Value
>,
991 unwind
: Option
<&'ll BasicBlock
>,
994 let name
= cstr
!("catchswitch");
996 llvm
::LLVMRustBuildCatchSwitch(
1000 num_handlers
as c_uint
,
1004 ret
.expect("LLVM does not have support for catchswitch")
1007 fn add_handler(&mut self, catch_switch
: &'ll Value
, handler
: &'ll BasicBlock
) {
1009 llvm
::LLVMRustAddHandler(catch_switch
, handler
);
1013 fn set_personality_fn(&mut self, personality
: &'ll Value
) {
1015 llvm
::LLVMSetPersonalityFn(self.llfn(), personality
);
1019 // Atomic Operations
1025 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1026 failure_order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1029 let weak
= if weak { llvm::True }
else { llvm::False }
;
1031 llvm
::LLVMRustBuildAtomicCmpXchg(
1036 AtomicOrdering
::from_generic(order
),
1037 AtomicOrdering
::from_generic(failure_order
),
1044 op
: rustc_codegen_ssa
::common
::AtomicRmwBinOp
,
1047 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1050 llvm
::LLVMBuildAtomicRMW(
1052 AtomicRmwBinOp
::from_generic(op
),
1055 AtomicOrdering
::from_generic(order
),
1063 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1064 scope
: rustc_codegen_ssa
::common
::SynchronizationScope
,
1067 llvm
::LLVMRustBuildAtomicFence(
1069 AtomicOrdering
::from_generic(order
),
1070 SynchronizationScope
::from_generic(scope
),
1075 fn set_invariant_load(&mut self, load
: &'ll Value
) {
1077 llvm
::LLVMSetMetadata(
1079 llvm
::MD_invariant_load
as c_uint
,
1080 llvm
::LLVMMDNodeInContext(self.cx
.llcx
, ptr
::null(), 0),
1085 fn lifetime_start(&mut self, ptr
: &'ll Value
, size
: Size
) {
1086 self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr
, size
);
1089 fn lifetime_end(&mut self, ptr
: &'ll Value
, size
: Size
) {
1090 self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr
, size
);
1093 fn instrprof_increment(
1095 fn_name
: &'ll Value
,
1097 num_counters
: &'ll Value
,
1101 "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
1102 fn_name
, hash
, num_counters
, index
1105 let llfn
= unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }
;
1106 let args
= &[fn_name
, hash
, num_counters
, index
];
1107 let args
= self.check_call("call", llfn
, args
);
1110 let _
= llvm
::LLVMRustBuildCall(
1113 args
.as_ptr() as *const &llvm
::Value
,
1114 args
.len() as c_uint
,
1123 args
: &[&'ll Value
],
1124 funclet
: Option
<&Funclet
<'ll
>>,
1126 debug
!("call {:?} with args ({:?})", llfn
, args
);
1128 let args
= self.check_call("call", llfn
, args
);
1129 let bundle
= funclet
.map(|funclet
| funclet
.bundle());
1130 let bundle
= bundle
.as_ref().map(|b
| &*b
.raw
);
1133 llvm
::LLVMRustBuildCall(
1136 args
.as_ptr() as *const &llvm
::Value
,
1137 args
.len() as c_uint
,
1143 fn zext(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
1144 unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
1147 fn do_not_inline(&mut self, llret
: &'ll Value
) {
1148 llvm
::Attribute
::NoInline
.apply_callsite(llvm
::AttributePlace
::Function
, llret
);
1152 impl StaticBuilderMethods
for Builder
<'a
, 'll
, 'tcx
> {
1153 fn get_static(&mut self, def_id
: DefId
) -> &'ll Value
{
1154 // Forward to the `get_static` method of `CodegenCx`
1155 self.cx().get_static(def_id
)
1159 impl Builder
<'a
, 'll
, 'tcx
> {
1160 fn with_cx(cx
: &'a CodegenCx
<'ll
, 'tcx
>) -> Self {
1161 // Create a fresh builder from the crate context.
1162 let llbuilder
= unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) }
;
1163 Builder { llbuilder, cx }
1166 pub fn llfn(&self) -> &'ll Value
{
1167 unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
1170 fn position_at_start(&mut self, llbb
: &'ll BasicBlock
) {
1172 llvm
::LLVMRustPositionBuilderAtStart(self.llbuilder
, llbb
);
1176 pub fn minnum(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
1177 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1180 pub fn maxnum(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
1181 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1184 pub fn insert_element(
1190 unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
1193 pub fn shuffle_vector(
1199 unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
1202 pub fn vector_reduce_fadd(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1203 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
1205 pub fn vector_reduce_fmul(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1206 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
1208 pub fn vector_reduce_fadd_fast(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1210 let instr
= llvm
::LLVMRustBuildVectorReduceFAdd(self.llbuilder
, acc
, src
);
1211 llvm
::LLVMRustSetFastMath(instr
);
1215 pub fn vector_reduce_fmul_fast(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1217 let instr
= llvm
::LLVMRustBuildVectorReduceFMul(self.llbuilder
, acc
, src
);
1218 llvm
::LLVMRustSetFastMath(instr
);
1222 pub fn vector_reduce_add(&mut self, src
: &'ll Value
) -> &'ll Value
{
1223 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1225 pub fn vector_reduce_mul(&mut self, src
: &'ll Value
) -> &'ll Value
{
1226 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1228 pub fn vector_reduce_and(&mut self, src
: &'ll Value
) -> &'ll Value
{
1229 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1231 pub fn vector_reduce_or(&mut self, src
: &'ll Value
) -> &'ll Value
{
1232 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1234 pub fn vector_reduce_xor(&mut self, src
: &'ll Value
) -> &'ll Value
{
1235 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1237 pub fn vector_reduce_fmin(&mut self, src
: &'ll Value
) -> &'ll Value
{
1239 llvm
::LLVMRustBuildVectorReduceFMin(self.llbuilder
, src
, /*NoNaNs:*/ false)
1242 pub fn vector_reduce_fmax(&mut self, src
: &'ll Value
) -> &'ll Value
{
1244 llvm
::LLVMRustBuildVectorReduceFMax(self.llbuilder
, src
, /*NoNaNs:*/ false)
1247 pub fn vector_reduce_fmin_fast(&mut self, src
: &'ll Value
) -> &'ll Value
{
1250 llvm
::LLVMRustBuildVectorReduceFMin(self.llbuilder
, src
, /*NoNaNs:*/ true);
1251 llvm
::LLVMRustSetFastMath(instr
);
1255 pub fn vector_reduce_fmax_fast(&mut self, src
: &'ll Value
) -> &'ll Value
{
1258 llvm
::LLVMRustBuildVectorReduceFMax(self.llbuilder
, src
, /*NoNaNs:*/ true);
1259 llvm
::LLVMRustSetFastMath(instr
);
1263 pub fn vector_reduce_min(&mut self, src
: &'ll Value
, is_signed
: bool
) -> &'ll Value
{
1264 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1266 pub fn vector_reduce_max(&mut self, src
: &'ll Value
, is_signed
: bool
) -> &'ll Value
{
1267 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1270 pub fn add_clause(&mut self, landing_pad
: &'ll Value
, clause
: &'ll Value
) {
1272 llvm
::LLVMAddClause(landing_pad
, clause
);
1276 pub fn catch_ret(&mut self, funclet
: &Funclet
<'ll
>, unwind
: &'ll BasicBlock
) -> &'ll Value
{
1278 unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }
;
1279 ret
.expect("LLVM does not have support for catchret")
1282 fn check_store(&mut self, val
: &'ll Value
, ptr
: &'ll Value
) -> &'ll Value
{
1283 let dest_ptr_ty
= self.cx
.val_ty(ptr
);
1284 let stored_ty
= self.cx
.val_ty(val
);
1285 let stored_ptr_ty
= self.cx
.type_ptr_to(stored_ty
);
1287 assert_eq
!(self.cx
.type_kind(dest_ptr_ty
), TypeKind
::Pointer
);
1289 if dest_ptr_ty
== stored_ptr_ty
{
1293 "type mismatch in store. \
1294 Expected {:?}, got {:?}; inserting bitcast",
1295 dest_ptr_ty
, stored_ptr_ty
1297 self.bitcast(ptr
, stored_ptr_ty
)
1305 args
: &'b
[&'ll Value
],
1306 ) -> Cow
<'b
, [&'ll Value
]> {
1307 let mut fn_ty
= self.cx
.val_ty(llfn
);
1308 // Strip off pointers
1309 while self.cx
.type_kind(fn_ty
) == TypeKind
::Pointer
{
1310 fn_ty
= self.cx
.element_type(fn_ty
);
1314 self.cx
.type_kind(fn_ty
) == TypeKind
::Function
,
1315 "builder::{} not passed a function, but {:?}",
1320 let param_tys
= self.cx
.func_params_types(fn_ty
);
1322 let all_args_match
= iter
::zip(¶m_tys
, args
.iter().map(|&v
| self.val_ty(v
)))
1323 .all(|(expected_ty
, actual_ty
)| *expected_ty
== actual_ty
);
1326 return Cow
::Borrowed(args
);
1329 let casted_args
: Vec
<_
> = iter
::zip(param_tys
, args
)
1331 .map(|(i
, (expected_ty
, &actual_val
))| {
1332 let actual_ty
= self.val_ty(actual_val
);
1333 if expected_ty
!= actual_ty
{
1335 "type mismatch in function call of {:?}. \
1336 Expected {:?} for param {}, got {:?}; injecting bitcast",
1337 llfn
, expected_ty
, i
, actual_ty
1339 self.bitcast(actual_val
, expected_ty
)
1346 Cow
::Owned(casted_args
)
1349 pub fn va_arg(&mut self, list
: &'ll Value
, ty
: &'ll Type
) -> &'ll Value
{
1350 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
1353 fn call_lifetime_intrinsic(&mut self, intrinsic
: &str, ptr
: &'ll Value
, size
: Size
) {
1354 let size
= size
.bytes();
1359 if !self.cx().sess().emit_lifetime_markers() {
1363 let lifetime_intrinsic
= self.cx
.get_intrinsic(intrinsic
);
1365 let ptr
= self.pointercast(ptr
, self.cx
.type_i8p());
1366 self.call(lifetime_intrinsic
, &[self.cx
.const_u64(size
), ptr
], None
);
1372 vals
: &[&'ll Value
],
1373 bbs
: &[&'ll BasicBlock
],
1375 assert_eq
!(vals
.len(), bbs
.len());
1376 let phi
= unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) }
;
1378 llvm
::LLVMAddIncoming(phi
, vals
.as_ptr(), bbs
.as_ptr(), vals
.len() as c_uint
);
1383 fn add_incoming_to_phi(&mut self, phi
: &'ll Value
, val
: &'ll Value
, bb
: &'ll BasicBlock
) {
1385 llvm
::LLVMAddIncoming(phi
, &val
, &bb
, 1 as c_uint
);
1389 fn fptoint_sat_broken_in_llvm(&self) -> bool
{
1390 match self.tcx
.sess
.target
.arch
.as_str() {
1391 // FIXME - https://bugs.llvm.org/show_bug.cgi?id=50083
1392 "riscv64" => llvm_util
::get_version() < (13, 0, 0),