1 use crate::common
::Funclet
;
2 use crate::context
::CodegenCx
;
3 use crate::llvm
::{self, BasicBlock, False}
;
4 use crate::llvm
::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope}
;
5 use crate::type_
::Type
;
6 use crate::type_of
::LayoutLlvmExt
;
7 use crate::value
::Value
;
8 use libc
::{c_char, c_uint}
;
9 use rustc_codegen_ssa
::base
::to_immediate
;
10 use rustc_codegen_ssa
::common
::{IntPredicate, RealPredicate, TypeKind}
;
11 use rustc_codegen_ssa
::mir
::operand
::{OperandRef, OperandValue}
;
12 use rustc_codegen_ssa
::mir
::place
::PlaceRef
;
13 use rustc_codegen_ssa
::traits
::*;
14 use rustc_codegen_ssa
::MemFlags
;
15 use rustc_data_structures
::const_cstr
;
16 use rustc_data_structures
::small_c_str
::SmallCStr
;
17 use rustc_hir
::def_id
::DefId
;
18 use rustc_middle
::ty
::layout
::TyAndLayout
;
19 use rustc_middle
::ty
::{self, Ty, TyCtxt}
;
21 use rustc_target
::abi
::{self, Align, Size}
;
22 use rustc_target
::spec
::{HasTargetSpec, Target}
;
25 use std
::iter
::TrustedLen
;
26 use std
::ops
::{Deref, Range}
;
30 // All Builders must have an llfn associated with them
32 pub struct Builder
<'a
, 'll
, 'tcx
> {
33 pub llbuilder
: &'ll
mut llvm
::Builder
<'ll
>,
34 pub cx
: &'a CodegenCx
<'ll
, 'tcx
>,
37 impl Drop
for Builder
<'a
, 'll
, 'tcx
> {
40 llvm
::LLVMDisposeBuilder(&mut *(self.llbuilder
as *mut _
));
45 // FIXME(eddyb) use a checked constructor when they become `const fn`.
46 const EMPTY_C_STR
: &CStr
= unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") }
;
48 /// Empty string, to be used where LLVM expects an instruction name, indicating
49 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
50 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
51 const UNNAMED
: *const c_char
= EMPTY_C_STR
.as_ptr();
53 impl BackendTypes
for Builder
<'_
, 'll
, 'tcx
> {
54 type Value
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Value
;
55 type Function
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Function
;
56 type BasicBlock
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::BasicBlock
;
57 type Type
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Type
;
58 type Funclet
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::Funclet
;
60 type DIScope
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::DIScope
;
61 type DIVariable
= <CodegenCx
<'ll
, 'tcx
> as BackendTypes
>::DIVariable
;
64 impl abi
::HasDataLayout
for Builder
<'_
, '_
, '_
> {
65 fn data_layout(&self) -> &abi
::TargetDataLayout
{
70 impl ty
::layout
::HasTyCtxt
<'tcx
> for Builder
<'_
, '_
, 'tcx
> {
71 fn tcx(&self) -> TyCtxt
<'tcx
> {
76 impl ty
::layout
::HasParamEnv
<'tcx
> for Builder
<'_
, '_
, 'tcx
> {
77 fn param_env(&self) -> ty
::ParamEnv
<'tcx
> {
82 impl HasTargetSpec
for Builder
<'_
, '_
, 'tcx
> {
83 fn target_spec(&self) -> &Target
{
84 &self.cx
.target_spec()
88 impl abi
::LayoutOf
for Builder
<'_
, '_
, 'tcx
> {
90 type TyAndLayout
= TyAndLayout
<'tcx
>;
92 fn layout_of(&self, ty
: Ty
<'tcx
>) -> Self::TyAndLayout
{
97 impl Deref
for Builder
<'_
, 'll
, 'tcx
> {
98 type Target
= CodegenCx
<'ll
, 'tcx
>;
100 fn deref(&self) -> &Self::Target
{
105 impl HasCodegen
<'tcx
> for Builder
<'_
, 'll
, 'tcx
> {
106 type CodegenCx
= CodegenCx
<'ll
, 'tcx
>;
109 macro_rules
! builder_methods_for_value_instructions
{
110 ($
($name
:ident($
($arg
:ident
),*) => $llvm_capi
:ident
),+ $
(,)?
) => {
111 $
(fn $
name(&mut self, $
($arg
: &'ll Value
),*) -> &'ll Value
{
113 llvm
::$
llvm_capi(self.llbuilder
, $
($arg
,)* UNNAMED
)
119 impl BuilderMethods
<'a
, 'tcx
> for Builder
<'a
, 'll
, 'tcx
> {
120 fn new_block
<'b
>(cx
: &'a CodegenCx
<'ll
, 'tcx
>, llfn
: &'ll Value
, name
: &'b
str) -> Self {
121 let mut bx
= Builder
::with_cx(cx
);
123 let name
= SmallCStr
::new(name
);
124 llvm
::LLVMAppendBasicBlockInContext(cx
.llcx
, llfn
, name
.as_ptr())
126 bx
.position_at_end(llbb
);
130 fn with_cx(cx
: &'a CodegenCx
<'ll
, 'tcx
>) -> Self {
131 // Create a fresh builder from the crate context.
132 let llbuilder
= unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) }
;
133 Builder { llbuilder, cx }
136 fn build_sibling_block(&self, name
: &str) -> Self {
137 Builder
::new_block(self.cx
, self.llfn(), name
)
140 fn llbb(&self) -> &'ll BasicBlock
{
141 unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
144 fn position_at_end(&mut self, llbb
: &'ll BasicBlock
) {
146 llvm
::LLVMPositionBuilderAtEnd(self.llbuilder
, llbb
);
150 fn ret_void(&mut self) {
152 llvm
::LLVMBuildRetVoid(self.llbuilder
);
156 fn ret(&mut self, v
: &'ll Value
) {
158 llvm
::LLVMBuildRet(self.llbuilder
, v
);
162 fn br(&mut self, dest
: &'ll BasicBlock
) {
164 llvm
::LLVMBuildBr(self.llbuilder
, dest
);
171 then_llbb
: &'ll BasicBlock
,
172 else_llbb
: &'ll BasicBlock
,
175 llvm
::LLVMBuildCondBr(self.llbuilder
, cond
, then_llbb
, else_llbb
);
182 else_llbb
: &'ll BasicBlock
,
183 cases
: impl ExactSizeIterator
<Item
= (u128
, &'ll BasicBlock
)> + TrustedLen
,
186 unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) }
;
187 for (on_val
, dest
) in cases
{
188 let on_val
= self.const_uint_big(self.val_ty(v
), on_val
);
189 unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
197 then
: &'ll BasicBlock
,
198 catch: &'ll BasicBlock
,
199 funclet
: Option
<&Funclet
<'ll
>>,
201 debug
!("invoke {:?} with args ({:?})", llfn
, args
);
203 let args
= self.check_call("invoke", llfn
, args
);
204 let bundle
= funclet
.map(|funclet
| funclet
.bundle());
205 let bundle
= bundle
.as_ref().map(|b
| &*b
.raw
);
208 llvm
::LLVMRustBuildInvoke(
212 args
.len() as c_uint
,
221 fn unreachable(&mut self) {
223 llvm
::LLVMBuildUnreachable(self.llbuilder
);
227 builder_methods_for_value_instructions
! {
228 add(a
, b
) => LLVMBuildAdd
,
229 fadd(a
, b
) => LLVMBuildFAdd
,
230 sub(a
, b
) => LLVMBuildSub
,
231 fsub(a
, b
) => LLVMBuildFSub
,
232 mul(a
, b
) => LLVMBuildMul
,
233 fmul(a
, b
) => LLVMBuildFMul
,
234 udiv(a
, b
) => LLVMBuildUDiv
,
235 exactudiv(a
, b
) => LLVMBuildExactUDiv
,
236 sdiv(a
, b
) => LLVMBuildSDiv
,
237 exactsdiv(a
, b
) => LLVMBuildExactSDiv
,
238 fdiv(a
, b
) => LLVMBuildFDiv
,
239 urem(a
, b
) => LLVMBuildURem
,
240 srem(a
, b
) => LLVMBuildSRem
,
241 frem(a
, b
) => LLVMBuildFRem
,
242 shl(a
, b
) => LLVMBuildShl
,
243 lshr(a
, b
) => LLVMBuildLShr
,
244 ashr(a
, b
) => LLVMBuildAShr
,
245 and(a
, b
) => LLVMBuildAnd
,
246 or(a
, b
) => LLVMBuildOr
,
247 xor(a
, b
) => LLVMBuildXor
,
248 neg(x
) => LLVMBuildNeg
,
249 fneg(x
) => LLVMBuildFNeg
,
250 not(x
) => LLVMBuildNot
,
251 unchecked_sadd(x
, y
) => LLVMBuildNSWAdd
,
252 unchecked_uadd(x
, y
) => LLVMBuildNUWAdd
,
253 unchecked_ssub(x
, y
) => LLVMBuildNSWSub
,
254 unchecked_usub(x
, y
) => LLVMBuildNUWSub
,
255 unchecked_smul(x
, y
) => LLVMBuildNSWMul
,
256 unchecked_umul(x
, y
) => LLVMBuildNUWMul
,
259 fn fadd_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
261 let instr
= llvm
::LLVMBuildFAdd(self.llbuilder
, lhs
, rhs
, UNNAMED
);
262 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
267 fn fsub_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
269 let instr
= llvm
::LLVMBuildFSub(self.llbuilder
, lhs
, rhs
, UNNAMED
);
270 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
275 fn fmul_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
277 let instr
= llvm
::LLVMBuildFMul(self.llbuilder
, lhs
, rhs
, UNNAMED
);
278 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
283 fn fdiv_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
285 let instr
= llvm
::LLVMBuildFDiv(self.llbuilder
, lhs
, rhs
, UNNAMED
);
286 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
291 fn frem_fast(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
293 let instr
= llvm
::LLVMBuildFRem(self.llbuilder
, lhs
, rhs
, UNNAMED
);
294 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
305 ) -> (Self::Value
, Self::Value
) {
306 use rustc_ast
::IntTy
::*;
307 use rustc_ast
::UintTy
::*;
308 use rustc_middle
::ty
::{Int, Uint}
;
310 let new_kind
= match ty
.kind
{
311 Int(t @ Isize
) => Int(t
.normalize(self.tcx
.sess
.target
.ptr_width
)),
312 Uint(t @ Usize
) => Uint(t
.normalize(self.tcx
.sess
.target
.ptr_width
)),
313 ref t @
(Uint(_
) | Int(_
)) => t
.clone(),
314 _
=> panic
!("tried to get overflow intrinsic for op applied to non-int type"),
317 let name
= match oop
{
318 OverflowOp
::Add
=> match new_kind
{
319 Int(I8
) => "llvm.sadd.with.overflow.i8",
320 Int(I16
) => "llvm.sadd.with.overflow.i16",
321 Int(I32
) => "llvm.sadd.with.overflow.i32",
322 Int(I64
) => "llvm.sadd.with.overflow.i64",
323 Int(I128
) => "llvm.sadd.with.overflow.i128",
325 Uint(U8
) => "llvm.uadd.with.overflow.i8",
326 Uint(U16
) => "llvm.uadd.with.overflow.i16",
327 Uint(U32
) => "llvm.uadd.with.overflow.i32",
328 Uint(U64
) => "llvm.uadd.with.overflow.i64",
329 Uint(U128
) => "llvm.uadd.with.overflow.i128",
333 OverflowOp
::Sub
=> match new_kind
{
334 Int(I8
) => "llvm.ssub.with.overflow.i8",
335 Int(I16
) => "llvm.ssub.with.overflow.i16",
336 Int(I32
) => "llvm.ssub.with.overflow.i32",
337 Int(I64
) => "llvm.ssub.with.overflow.i64",
338 Int(I128
) => "llvm.ssub.with.overflow.i128",
340 Uint(U8
) => "llvm.usub.with.overflow.i8",
341 Uint(U16
) => "llvm.usub.with.overflow.i16",
342 Uint(U32
) => "llvm.usub.with.overflow.i32",
343 Uint(U64
) => "llvm.usub.with.overflow.i64",
344 Uint(U128
) => "llvm.usub.with.overflow.i128",
348 OverflowOp
::Mul
=> match new_kind
{
349 Int(I8
) => "llvm.smul.with.overflow.i8",
350 Int(I16
) => "llvm.smul.with.overflow.i16",
351 Int(I32
) => "llvm.smul.with.overflow.i32",
352 Int(I64
) => "llvm.smul.with.overflow.i64",
353 Int(I128
) => "llvm.smul.with.overflow.i128",
355 Uint(U8
) => "llvm.umul.with.overflow.i8",
356 Uint(U16
) => "llvm.umul.with.overflow.i16",
357 Uint(U32
) => "llvm.umul.with.overflow.i32",
358 Uint(U64
) => "llvm.umul.with.overflow.i64",
359 Uint(U128
) => "llvm.umul.with.overflow.i128",
365 let intrinsic
= self.get_intrinsic(&name
);
366 let res
= self.call(intrinsic
, &[lhs
, rhs
], None
);
367 (self.extract_value(res
, 0), self.extract_value(res
, 1))
370 fn alloca(&mut self, ty
: &'ll Type
, align
: Align
) -> &'ll Value
{
371 let mut bx
= Builder
::with_cx(self.cx
);
372 bx
.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }
);
373 bx
.dynamic_alloca(ty
, align
)
376 fn dynamic_alloca(&mut self, ty
: &'ll Type
, align
: Align
) -> &'ll Value
{
378 let alloca
= llvm
::LLVMBuildAlloca(self.llbuilder
, ty
, UNNAMED
);
379 llvm
::LLVMSetAlignment(alloca
, align
.bytes() as c_uint
);
384 fn array_alloca(&mut self, ty
: &'ll Type
, len
: &'ll Value
, align
: Align
) -> &'ll Value
{
386 let alloca
= llvm
::LLVMBuildArrayAlloca(self.llbuilder
, ty
, len
, UNNAMED
);
387 llvm
::LLVMSetAlignment(alloca
, align
.bytes() as c_uint
);
392 fn load(&mut self, ptr
: &'ll Value
, align
: Align
) -> &'ll Value
{
394 let load
= llvm
::LLVMBuildLoad(self.llbuilder
, ptr
, UNNAMED
);
395 llvm
::LLVMSetAlignment(load
, align
.bytes() as c_uint
);
400 fn volatile_load(&mut self, ptr
: &'ll Value
) -> &'ll Value
{
402 let load
= llvm
::LLVMBuildLoad(self.llbuilder
, ptr
, UNNAMED
);
403 llvm
::LLVMSetVolatile(load
, llvm
::True
);
411 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
415 let load
= llvm
::LLVMRustBuildAtomicLoad(
419 AtomicOrdering
::from_generic(order
),
421 // LLVM requires the alignment of atomic loads to be at least the size of the type.
422 llvm
::LLVMSetAlignment(load
, size
.bytes() as c_uint
);
427 fn load_operand(&mut self, place
: PlaceRef
<'tcx
, &'ll Value
>) -> OperandRef
<'tcx
, &'ll Value
> {
428 debug
!("PlaceRef::load: {:?}", place
);
430 assert_eq
!(place
.llextra
.is_some(), place
.layout
.is_unsized());
432 if place
.layout
.is_zst() {
433 return OperandRef
::new_zst(self, place
.layout
);
436 fn scalar_load_metadata
<'a
, 'll
, 'tcx
>(
437 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
439 scalar
: &abi
::Scalar
,
441 let vr
= scalar
.valid_range
.clone();
444 let range
= scalar
.valid_range_exclusive(bx
);
445 if range
.start
!= range
.end
{
446 bx
.range_metadata(load
, range
);
449 abi
::Pointer
if vr
.start() < vr
.end() && !vr
.contains(&0) => {
450 bx
.nonnull_metadata(load
);
456 let val
= if let Some(llextra
) = place
.llextra
{
457 OperandValue
::Ref(place
.llval
, Some(llextra
), place
.align
)
458 } else if place
.layout
.is_llvm_immediate() {
459 let mut const_llval
= None
;
461 if let Some(global
) = llvm
::LLVMIsAGlobalVariable(place
.llval
) {
462 if llvm
::LLVMIsGlobalConstant(global
) == llvm
::True
{
463 const_llval
= llvm
::LLVMGetInitializer(global
);
467 let llval
= const_llval
.unwrap_or_else(|| {
468 let load
= self.load(place
.llval
, place
.align
);
469 if let abi
::Abi
::Scalar(ref scalar
) = place
.layout
.abi
{
470 scalar_load_metadata(self, load
, scalar
);
474 OperandValue
::Immediate(to_immediate(self, llval
, place
.layout
))
475 } else if let abi
::Abi
::ScalarPair(ref a
, ref b
) = place
.layout
.abi
{
476 let b_offset
= a
.value
.size(self).align_to(b
.value
.align(self).abi
);
478 let mut load
= |i
, scalar
: &abi
::Scalar
, align
| {
479 let llptr
= self.struct_gep(place
.llval
, i
as u64);
480 let load
= self.load(llptr
, align
);
481 scalar_load_metadata(self, load
, scalar
);
482 if scalar
.is_bool() { self.trunc(load, self.type_i1()) }
else { load }
486 load(0, a
, place
.align
),
487 load(1, b
, place
.align
.restrict_for_offset(b_offset
)),
490 OperandValue
::Ref(place
.llval
, None
, place
.align
)
493 OperandRef { val, layout: place.layout }
496 fn write_operand_repeatedly(
498 cg_elem
: OperandRef
<'tcx
, &'ll Value
>,
500 dest
: PlaceRef
<'tcx
, &'ll Value
>,
502 let zero
= self.const_usize(0);
503 let count
= self.const_usize(count
);
504 let start
= dest
.project_index(&mut self, zero
).llval
;
505 let end
= dest
.project_index(&mut self, count
).llval
;
507 let mut header_bx
= self.build_sibling_block("repeat_loop_header");
508 let mut body_bx
= self.build_sibling_block("repeat_loop_body");
509 let next_bx
= self.build_sibling_block("repeat_loop_next");
511 self.br(header_bx
.llbb());
512 let current
= header_bx
.phi(self.val_ty(start
), &[start
], &[self.llbb()]);
514 let keep_going
= header_bx
.icmp(IntPredicate
::IntNE
, current
, end
);
515 header_bx
.cond_br(keep_going
, body_bx
.llbb(), next_bx
.llbb());
517 let align
= dest
.align
.restrict_for_offset(dest
.layout
.field(self.cx(), 0).size
);
520 .store(&mut body_bx
, PlaceRef
::new_sized_aligned(current
, cg_elem
.layout
, align
));
522 let next
= body_bx
.inbounds_gep(current
, &[self.const_usize(1)]);
523 body_bx
.br(header_bx
.llbb());
524 header_bx
.add_incoming_to_phi(current
, next
, body_bx
.llbb());
529 fn range_metadata(&mut self, load
: &'ll Value
, range
: Range
<u128
>) {
530 if self.sess().target
.target
.arch
== "amdgpu" {
531 // amdgpu/LLVM does something weird and thinks a i64 value is
532 // split into a v2i32, halving the bitwidth LLVM expects,
533 // tripping an assertion. So, for now, just disable this
539 let llty
= self.cx
.val_ty(load
);
541 self.cx
.const_uint_big(llty
, range
.start
),
542 self.cx
.const_uint_big(llty
, range
.end
),
545 llvm
::LLVMSetMetadata(
547 llvm
::MD_range
as c_uint
,
548 llvm
::LLVMMDNodeInContext(self.cx
.llcx
, v
.as_ptr(), v
.len() as c_uint
),
553 fn nonnull_metadata(&mut self, load
: &'ll Value
) {
555 llvm
::LLVMSetMetadata(
557 llvm
::MD_nonnull
as c_uint
,
558 llvm
::LLVMMDNodeInContext(self.cx
.llcx
, ptr
::null(), 0),
563 fn store(&mut self, val
: &'ll Value
, ptr
: &'ll Value
, align
: Align
) -> &'ll Value
{
564 self.store_with_flags(val
, ptr
, align
, MemFlags
::empty())
574 debug
!("Store {:?} -> {:?} ({:?})", val
, ptr
, flags
);
575 let ptr
= self.check_store(val
, ptr
);
577 let store
= llvm
::LLVMBuildStore(self.llbuilder
, val
, ptr
);
579 if flags
.contains(MemFlags
::UNALIGNED
) { 1 }
else { align.bytes() as c_uint }
;
580 llvm
::LLVMSetAlignment(store
, align
);
581 if flags
.contains(MemFlags
::VOLATILE
) {
582 llvm
::LLVMSetVolatile(store
, llvm
::True
);
584 if flags
.contains(MemFlags
::NONTEMPORAL
) {
585 // According to LLVM [1] building a nontemporal store must
586 // *always* point to a metadata value of the integer 1.
588 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
589 let one
= self.cx
.const_i32(1);
590 let node
= llvm
::LLVMMDNodeInContext(self.cx
.llcx
, &one
, 1);
591 llvm
::LLVMSetMetadata(store
, llvm
::MD_nontemporal
as c_uint
, node
);
601 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
604 debug
!("Store {:?} -> {:?}", val
, ptr
);
605 let ptr
= self.check_store(val
, ptr
);
607 let store
= llvm
::LLVMRustBuildAtomicStore(
611 AtomicOrdering
::from_generic(order
),
613 // LLVM requires the alignment of atomic stores to be at least the size of the type.
614 llvm
::LLVMSetAlignment(store
, size
.bytes() as c_uint
);
618 fn gep(&mut self, ptr
: &'ll Value
, indices
: &[&'ll Value
]) -> &'ll Value
{
624 indices
.len() as c_uint
,
630 fn inbounds_gep(&mut self, ptr
: &'ll Value
, indices
: &[&'ll Value
]) -> &'ll Value
{
632 llvm
::LLVMBuildInBoundsGEP(
636 indices
.len() as c_uint
,
642 fn struct_gep(&mut self, ptr
: &'ll Value
, idx
: u64) -> &'ll Value
{
643 assert_eq
!(idx
as c_uint
as u64, idx
);
644 unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
648 fn trunc(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
649 unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
652 fn sext(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
653 unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
656 fn fptoui_sat(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> Option
<&'ll Value
> {
657 // WebAssembly has saturating floating point to integer casts if the
658 // `nontrapping-fptoint` target feature is activated. We'll use those if
659 // they are available.
660 if self.sess().target
.target
.arch
== "wasm32"
661 && self.sess().target_features
.contains(&sym
::nontrapping_dash_fptoint
)
663 let src_ty
= self.cx
.val_ty(val
);
664 let float_width
= self.cx
.float_width(src_ty
);
665 let int_width
= self.cx
.int_width(dest_ty
);
666 let name
= match (int_width
, float_width
) {
667 (32, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f32"),
668 (32, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i32.f64"),
669 (64, 32) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f32"),
670 (64, 64) => Some("llvm.wasm.trunc.saturate.unsigned.i64.f64"),
673 if let Some(name
) = name
{
674 let intrinsic
= self.get_intrinsic(name
);
675 return Some(self.call(intrinsic
, &[val
], None
));
681 fn fptosi_sat(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> Option
<&'ll Value
> {
682 // WebAssembly has saturating floating point to integer casts if the
683 // `nontrapping-fptoint` target feature is activated. We'll use those if
684 // they are available.
685 if self.sess().target
.target
.arch
== "wasm32"
686 && self.sess().target_features
.contains(&sym
::nontrapping_dash_fptoint
)
688 let src_ty
= self.cx
.val_ty(val
);
689 let float_width
= self.cx
.float_width(src_ty
);
690 let int_width
= self.cx
.int_width(dest_ty
);
691 let name
= match (int_width
, float_width
) {
692 (32, 32) => Some("llvm.wasm.trunc.saturate.signed.i32.f32"),
693 (32, 64) => Some("llvm.wasm.trunc.saturate.signed.i32.f64"),
694 (64, 32) => Some("llvm.wasm.trunc.saturate.signed.i64.f32"),
695 (64, 64) => Some("llvm.wasm.trunc.saturate.signed.i64.f64"),
698 if let Some(name
) = name
{
699 let intrinsic
= self.get_intrinsic(name
);
700 return Some(self.call(intrinsic
, &[val
], None
));
706 fn fptosui_may_trap(&self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> bool
{
707 // Most of the time we'll be generating the `fptosi` or `fptoui`
708 // instruction for floating-point-to-integer conversions. These
709 // instructions by definition in LLVM do not trap. For the WebAssembly
710 // target, however, we'll lower in some cases to intrinsic calls instead
711 // which may trap. If we detect that this is a situation where we'll be
712 // using the intrinsics then we report that the call map trap, which
713 // callers might need to handle.
714 if !self.wasm_and_missing_nontrapping_fptoint() {
717 let src_ty
= self.cx
.val_ty(val
);
718 let float_width
= self.cx
.float_width(src_ty
);
719 let int_width
= self.cx
.int_width(dest_ty
);
720 match (int_width
, float_width
) {
721 (32, 32) | (32, 64) | (64, 32) | (64, 64) => true,
726 fn fptoui(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
727 // When we can, use the native wasm intrinsics which have tighter
728 // codegen. Note that this has a semantic difference in that the
729 // intrinsic can trap whereas `fptoui` never traps. That difference,
730 // however, is handled by `fptosui_may_trap` above.
732 // Note that we skip the wasm intrinsics for vector types where `fptoui`
733 // must be used instead.
734 if self.wasm_and_missing_nontrapping_fptoint() {
735 let src_ty
= self.cx
.val_ty(val
);
736 if self.cx
.type_kind(src_ty
) != TypeKind
::Vector
{
737 let float_width
= self.cx
.float_width(src_ty
);
738 let int_width
= self.cx
.int_width(dest_ty
);
739 let name
= match (int_width
, float_width
) {
740 (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
741 (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
742 (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
743 (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
746 if let Some(name
) = name
{
747 let intrinsic
= self.get_intrinsic(name
);
748 return self.call(intrinsic
, &[val
], None
);
752 unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
755 fn fptosi(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
756 if self.wasm_and_missing_nontrapping_fptoint() {
757 let src_ty
= self.cx
.val_ty(val
);
758 if self.cx
.type_kind(src_ty
) != TypeKind
::Vector
{
759 let float_width
= self.cx
.float_width(src_ty
);
760 let int_width
= self.cx
.int_width(dest_ty
);
761 let name
= match (int_width
, float_width
) {
762 (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
763 (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
764 (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
765 (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
768 if let Some(name
) = name
{
769 let intrinsic
= self.get_intrinsic(name
);
770 return self.call(intrinsic
, &[val
], None
);
774 unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
777 fn uitofp(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
778 unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
781 fn sitofp(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
782 unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
785 fn fptrunc(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
786 unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
789 fn fpext(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
790 unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
793 fn ptrtoint(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
794 unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
797 fn inttoptr(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
798 unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
801 fn bitcast(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
802 unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
805 fn intcast(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
, is_signed
: bool
) -> &'ll Value
{
806 unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
809 fn pointercast(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
810 unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
814 fn icmp(&mut self, op
: IntPredicate
, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
815 let op
= llvm
::IntPredicate
::from_generic(op
);
816 unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
819 fn fcmp(&mut self, op
: RealPredicate
, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
820 unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
823 /* Miscellaneous instructions */
833 if flags
.contains(MemFlags
::NONTEMPORAL
) {
834 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
835 let val
= self.load(src
, src_align
);
836 let ptr
= self.pointercast(dst
, self.type_ptr_to(self.val_ty(val
)));
837 self.store_with_flags(val
, ptr
, dst_align
, flags
);
840 let size
= self.intcast(size
, self.type_isize(), false);
841 let is_volatile
= flags
.contains(MemFlags
::VOLATILE
);
842 let dst
= self.pointercast(dst
, self.type_i8p());
843 let src
= self.pointercast(src
, self.type_i8p());
845 llvm
::LLVMRustBuildMemCpy(
848 dst_align
.bytes() as c_uint
,
850 src_align
.bytes() as c_uint
,
866 if flags
.contains(MemFlags
::NONTEMPORAL
) {
867 // HACK(nox): This is inefficient but there is no nontemporal memmove.
868 let val
= self.load(src
, src_align
);
869 let ptr
= self.pointercast(dst
, self.type_ptr_to(self.val_ty(val
)));
870 self.store_with_flags(val
, ptr
, dst_align
, flags
);
873 let size
= self.intcast(size
, self.type_isize(), false);
874 let is_volatile
= flags
.contains(MemFlags
::VOLATILE
);
875 let dst
= self.pointercast(dst
, self.type_i8p());
876 let src
= self.pointercast(src
, self.type_i8p());
878 llvm
::LLVMRustBuildMemMove(
881 dst_align
.bytes() as c_uint
,
883 src_align
.bytes() as c_uint
,
893 fill_byte
: &'ll Value
,
898 let is_volatile
= flags
.contains(MemFlags
::VOLATILE
);
899 let ptr
= self.pointercast(ptr
, self.type_i8p());
901 llvm
::LLVMRustBuildMemSet(
904 align
.bytes() as c_uint
,
915 then_val
: &'ll Value
,
916 else_val
: &'ll Value
,
918 unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
922 fn va_arg(&mut self, list
: &'ll Value
, ty
: &'ll Type
) -> &'ll Value
{
923 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
926 fn extract_element(&mut self, vec
: &'ll Value
, idx
: &'ll Value
) -> &'ll Value
{
927 unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
930 fn vector_splat(&mut self, num_elts
: usize, elt
: &'ll Value
) -> &'ll Value
{
932 let elt_ty
= self.cx
.val_ty(elt
);
933 let undef
= llvm
::LLVMGetUndef(self.type_vector(elt_ty
, num_elts
as u64));
934 let vec
= self.insert_element(undef
, elt
, self.cx
.const_i32(0));
935 let vec_i32_ty
= self.type_vector(self.type_i32(), num_elts
as u64);
936 self.shuffle_vector(vec
, undef
, self.const_null(vec_i32_ty
))
940 fn extract_value(&mut self, agg_val
: &'ll Value
, idx
: u64) -> &'ll Value
{
941 assert_eq
!(idx
as c_uint
as u64, idx
);
942 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
945 fn insert_value(&mut self, agg_val
: &'ll Value
, elt
: &'ll Value
, idx
: u64) -> &'ll Value
{
946 assert_eq
!(idx
as c_uint
as u64, idx
);
947 unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
957 llvm
::LLVMBuildLandingPad(self.llbuilder
, ty
, pers_fn
, num_clauses
as c_uint
, UNNAMED
)
961 fn set_cleanup(&mut self, landing_pad
: &'ll Value
) {
963 llvm
::LLVMSetCleanup(landing_pad
, llvm
::True
);
967 fn resume(&mut self, exn
: &'ll Value
) -> &'ll Value
{
968 unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
971 fn cleanup_pad(&mut self, parent
: Option
<&'ll Value
>, args
: &[&'ll Value
]) -> Funclet
<'ll
> {
972 let name
= const_cstr
!("cleanuppad");
974 llvm
::LLVMRustBuildCleanupPad(
977 args
.len() as c_uint
,
982 Funclet
::new(ret
.expect("LLVM does not have support for cleanuppad"))
987 funclet
: &Funclet
<'ll
>,
988 unwind
: Option
<&'ll BasicBlock
>,
991 unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }
;
992 ret
.expect("LLVM does not have support for cleanupret")
995 fn catch_pad(&mut self, parent
: &'ll Value
, args
: &[&'ll Value
]) -> Funclet
<'ll
> {
996 let name
= const_cstr
!("catchpad");
998 llvm
::LLVMRustBuildCatchPad(
1001 args
.len() as c_uint
,
1006 Funclet
::new(ret
.expect("LLVM does not have support for catchpad"))
1011 parent
: Option
<&'ll Value
>,
1012 unwind
: Option
<&'ll BasicBlock
>,
1013 num_handlers
: usize,
1015 let name
= const_cstr
!("catchswitch");
1017 llvm
::LLVMRustBuildCatchSwitch(
1021 num_handlers
as c_uint
,
1025 ret
.expect("LLVM does not have support for catchswitch")
1028 fn add_handler(&mut self, catch_switch
: &'ll Value
, handler
: &'ll BasicBlock
) {
1030 llvm
::LLVMRustAddHandler(catch_switch
, handler
);
1034 fn set_personality_fn(&mut self, personality
: &'ll Value
) {
1036 llvm
::LLVMSetPersonalityFn(self.llfn(), personality
);
1040 // Atomic Operations
1046 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1047 failure_order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1050 let weak
= if weak { llvm::True }
else { llvm::False }
;
1052 llvm
::LLVMRustBuildAtomicCmpXchg(
1057 AtomicOrdering
::from_generic(order
),
1058 AtomicOrdering
::from_generic(failure_order
),
1065 op
: rustc_codegen_ssa
::common
::AtomicRmwBinOp
,
1068 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1071 llvm
::LLVMBuildAtomicRMW(
1073 AtomicRmwBinOp
::from_generic(op
),
1076 AtomicOrdering
::from_generic(order
),
1084 order
: rustc_codegen_ssa
::common
::AtomicOrdering
,
1085 scope
: rustc_codegen_ssa
::common
::SynchronizationScope
,
1088 llvm
::LLVMRustBuildAtomicFence(
1090 AtomicOrdering
::from_generic(order
),
1091 SynchronizationScope
::from_generic(scope
),
1096 fn set_invariant_load(&mut self, load
: &'ll Value
) {
1098 llvm
::LLVMSetMetadata(
1100 llvm
::MD_invariant_load
as c_uint
,
1101 llvm
::LLVMMDNodeInContext(self.cx
.llcx
, ptr
::null(), 0),
1106 fn lifetime_start(&mut self, ptr
: &'ll Value
, size
: Size
) {
1107 self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr
, size
);
1110 fn lifetime_end(&mut self, ptr
: &'ll Value
, size
: Size
) {
1111 self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr
, size
);
1114 fn instrprof_increment(
1116 fn_name
: &'ll Value
,
1118 num_counters
: &'ll Value
,
1122 "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
1123 fn_name
, hash
, num_counters
, index
1126 let llfn
= unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }
;
1127 let args
= &[fn_name
, hash
, num_counters
, index
];
1128 let args
= self.check_call("call", llfn
, args
);
1131 let _
= llvm
::LLVMRustBuildCall(
1134 args
.as_ptr() as *const &llvm
::Value
,
1135 args
.len() as c_uint
,
1144 args
: &[&'ll Value
],
1145 funclet
: Option
<&Funclet
<'ll
>>,
1147 debug
!("call {:?} with args ({:?})", llfn
, args
);
1149 let args
= self.check_call("call", llfn
, args
);
1150 let bundle
= funclet
.map(|funclet
| funclet
.bundle());
1151 let bundle
= bundle
.as_ref().map(|b
| &*b
.raw
);
1154 llvm
::LLVMRustBuildCall(
1157 args
.as_ptr() as *const &llvm
::Value
,
1158 args
.len() as c_uint
,
1164 fn zext(&mut self, val
: &'ll Value
, dest_ty
: &'ll Type
) -> &'ll Value
{
1165 unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
1168 fn cx(&self) -> &CodegenCx
<'ll
, 'tcx
> {
1172 unsafe fn delete_basic_block(&mut self, bb
: &'ll BasicBlock
) {
1173 llvm
::LLVMDeleteBasicBlock(bb
);
1176 fn do_not_inline(&mut self, llret
: &'ll Value
) {
1177 llvm
::Attribute
::NoInline
.apply_callsite(llvm
::AttributePlace
::Function
, llret
);
1181 impl StaticBuilderMethods
for Builder
<'a
, 'll
, 'tcx
> {
1182 fn get_static(&mut self, def_id
: DefId
) -> &'ll Value
{
1183 // Forward to the `get_static` method of `CodegenCx`
1184 self.cx().get_static(def_id
)
1188 impl Builder
<'a
, 'll
, 'tcx
> {
1189 pub fn llfn(&self) -> &'ll Value
{
1190 unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
1193 fn position_at_start(&mut self, llbb
: &'ll BasicBlock
) {
1195 llvm
::LLVMRustPositionBuilderAtStart(self.llbuilder
, llbb
);
1199 pub fn minnum(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
1200 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1203 pub fn maxnum(&mut self, lhs
: &'ll Value
, rhs
: &'ll Value
) -> &'ll Value
{
1204 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1207 pub fn insert_element(
1213 unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
1216 pub fn shuffle_vector(
1222 unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
1225 pub fn vector_reduce_fadd(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1226 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
1228 pub fn vector_reduce_fmul(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1229 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
1231 pub fn vector_reduce_fadd_fast(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1233 let instr
= llvm
::LLVMRustBuildVectorReduceFAdd(self.llbuilder
, acc
, src
);
1234 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
1238 pub fn vector_reduce_fmul_fast(&mut self, acc
: &'ll Value
, src
: &'ll Value
) -> &'ll Value
{
1240 let instr
= llvm
::LLVMRustBuildVectorReduceFMul(self.llbuilder
, acc
, src
);
1241 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
1245 pub fn vector_reduce_add(&mut self, src
: &'ll Value
) -> &'ll Value
{
1246 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1248 pub fn vector_reduce_mul(&mut self, src
: &'ll Value
) -> &'ll Value
{
1249 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1251 pub fn vector_reduce_and(&mut self, src
: &'ll Value
) -> &'ll Value
{
1252 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1254 pub fn vector_reduce_or(&mut self, src
: &'ll Value
) -> &'ll Value
{
1255 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1257 pub fn vector_reduce_xor(&mut self, src
: &'ll Value
) -> &'ll Value
{
1258 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1260 pub fn vector_reduce_fmin(&mut self, src
: &'ll Value
) -> &'ll Value
{
1262 llvm
::LLVMRustBuildVectorReduceFMin(self.llbuilder
, src
, /*NoNaNs:*/ false)
1265 pub fn vector_reduce_fmax(&mut self, src
: &'ll Value
) -> &'ll Value
{
1267 llvm
::LLVMRustBuildVectorReduceFMax(self.llbuilder
, src
, /*NoNaNs:*/ false)
1270 pub fn vector_reduce_fmin_fast(&mut self, src
: &'ll Value
) -> &'ll Value
{
1273 llvm
::LLVMRustBuildVectorReduceFMin(self.llbuilder
, src
, /*NoNaNs:*/ true);
1274 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
1278 pub fn vector_reduce_fmax_fast(&mut self, src
: &'ll Value
) -> &'ll Value
{
1281 llvm
::LLVMRustBuildVectorReduceFMax(self.llbuilder
, src
, /*NoNaNs:*/ true);
1282 llvm
::LLVMRustSetHasUnsafeAlgebra(instr
);
1286 pub fn vector_reduce_min(&mut self, src
: &'ll Value
, is_signed
: bool
) -> &'ll Value
{
1287 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1289 pub fn vector_reduce_max(&mut self, src
: &'ll Value
, is_signed
: bool
) -> &'ll Value
{
1290 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1293 pub fn add_clause(&mut self, landing_pad
: &'ll Value
, clause
: &'ll Value
) {
1295 llvm
::LLVMAddClause(landing_pad
, clause
);
1299 pub fn catch_ret(&mut self, funclet
: &Funclet
<'ll
>, unwind
: &'ll BasicBlock
) -> &'ll Value
{
1301 unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }
;
1302 ret
.expect("LLVM does not have support for catchret")
1305 fn check_store(&mut self, val
: &'ll Value
, ptr
: &'ll Value
) -> &'ll Value
{
1306 let dest_ptr_ty
= self.cx
.val_ty(ptr
);
1307 let stored_ty
= self.cx
.val_ty(val
);
1308 let stored_ptr_ty
= self.cx
.type_ptr_to(stored_ty
);
1310 assert_eq
!(self.cx
.type_kind(dest_ptr_ty
), TypeKind
::Pointer
);
1312 if dest_ptr_ty
== stored_ptr_ty
{
1316 "type mismatch in store. \
1317 Expected {:?}, got {:?}; inserting bitcast",
1318 dest_ptr_ty
, stored_ptr_ty
1320 self.bitcast(ptr
, stored_ptr_ty
)
1328 args
: &'b
[&'ll Value
],
1329 ) -> Cow
<'b
, [&'ll Value
]> {
1330 let mut fn_ty
= self.cx
.val_ty(llfn
);
1331 // Strip off pointers
1332 while self.cx
.type_kind(fn_ty
) == TypeKind
::Pointer
{
1333 fn_ty
= self.cx
.element_type(fn_ty
);
1337 self.cx
.type_kind(fn_ty
) == TypeKind
::Function
,
1338 "builder::{} not passed a function, but {:?}",
1343 let param_tys
= self.cx
.func_params_types(fn_ty
);
1345 let all_args_match
= param_tys
1347 .zip(args
.iter().map(|&v
| self.val_ty(v
)))
1348 .all(|(expected_ty
, actual_ty
)| *expected_ty
== actual_ty
);
1351 return Cow
::Borrowed(args
);
1354 let casted_args
: Vec
<_
> = param_tys
1358 .map(|(i
, (expected_ty
, &actual_val
))| {
1359 let actual_ty
= self.val_ty(actual_val
);
1360 if expected_ty
!= actual_ty
{
1362 "type mismatch in function call of {:?}. \
1363 Expected {:?} for param {}, got {:?}; injecting bitcast",
1364 llfn
, expected_ty
, i
, actual_ty
1366 self.bitcast(actual_val
, expected_ty
)
1373 Cow
::Owned(casted_args
)
1376 pub fn va_arg(&mut self, list
: &'ll Value
, ty
: &'ll Type
) -> &'ll Value
{
1377 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
1380 fn call_lifetime_intrinsic(&mut self, intrinsic
: &str, ptr
: &'ll Value
, size
: Size
) {
1381 let size
= size
.bytes();
1386 if !self.cx().sess().emit_lifetime_markers() {
1390 let lifetime_intrinsic
= self.cx
.get_intrinsic(intrinsic
);
1392 let ptr
= self.pointercast(ptr
, self.cx
.type_i8p());
1393 self.call(lifetime_intrinsic
, &[self.cx
.const_u64(size
), ptr
], None
);
1399 vals
: &[&'ll Value
],
1400 bbs
: &[&'ll BasicBlock
],
1402 assert_eq
!(vals
.len(), bbs
.len());
1403 let phi
= unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) }
;
1405 llvm
::LLVMAddIncoming(phi
, vals
.as_ptr(), bbs
.as_ptr(), vals
.len() as c_uint
);
1410 fn add_incoming_to_phi(&mut self, phi
: &'ll Value
, val
: &'ll Value
, bb
: &'ll BasicBlock
) {
1412 llvm
::LLVMAddIncoming(phi
, &val
, &bb
, 1 as c_uint
);
1416 fn wasm_and_missing_nontrapping_fptoint(&self) -> bool
{
1417 self.sess().target
.target
.arch
== "wasm32"
1418 && !self.sess().target_features
.contains(&sym
::nontrapping_dash_fptoint
)