]> git.proxmox.com Git - rustc.git/blob - src/librustc_codegen_llvm/builder.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / librustc_codegen_llvm / builder.rs
1 use crate::common::Funclet;
2 use crate::context::CodegenCx;
3 use crate::llvm::{self, BasicBlock, False};
4 use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::value::Value;
8 use libc::{c_char, c_uint};
9 use log::debug;
10 use rustc_codegen_ssa::base::to_immediate;
11 use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
12 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
13 use rustc_codegen_ssa::mir::place::PlaceRef;
14 use rustc_codegen_ssa::traits::*;
15 use rustc_codegen_ssa::MemFlags;
16 use rustc_data_structures::const_cstr;
17 use rustc_data_structures::small_c_str::SmallCStr;
18 use rustc_hir::def_id::DefId;
19 use rustc_middle::ty::layout::TyAndLayout;
20 use rustc_middle::ty::{self, Ty, TyCtxt};
21 use rustc_session::config::{self, Sanitizer};
22 use rustc_target::abi::{self, Align, Size};
23 use rustc_target::spec::{HasTargetSpec, Target};
24 use std::borrow::Cow;
25 use std::ffi::CStr;
26 use std::iter::TrustedLen;
27 use std::ops::{Deref, Range};
28 use std::ptr;
29
30 // All Builders must have an llfn associated with them
31 #[must_use]
32 pub struct Builder<'a, 'll, 'tcx> {
33 pub llbuilder: &'ll mut llvm::Builder<'ll>,
34 pub cx: &'a CodegenCx<'ll, 'tcx>,
35 }
36
37 impl Drop for Builder<'a, 'll, 'tcx> {
38 fn drop(&mut self) {
39 unsafe {
40 llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
41 }
42 }
43 }
44
45 // FIXME(eddyb) use a checked constructor when they become `const fn`.
46 const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
47
48 /// Empty string, to be used where LLVM expects an instruction name, indicating
49 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
50 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
51 const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
52
53 impl BackendTypes for Builder<'_, 'll, 'tcx> {
54 type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
55 type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
56 type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
57 type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
58 type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
59
60 type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
61 type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
62 }
63
64 impl abi::HasDataLayout for Builder<'_, '_, '_> {
65 fn data_layout(&self) -> &abi::TargetDataLayout {
66 self.cx.data_layout()
67 }
68 }
69
70 impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
71 fn tcx(&self) -> TyCtxt<'tcx> {
72 self.cx.tcx
73 }
74 }
75
76 impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
77 fn param_env(&self) -> ty::ParamEnv<'tcx> {
78 self.cx.param_env()
79 }
80 }
81
82 impl HasTargetSpec for Builder<'_, '_, 'tcx> {
83 fn target_spec(&self) -> &Target {
84 &self.cx.target_spec()
85 }
86 }
87
88 impl abi::LayoutOf for Builder<'_, '_, 'tcx> {
89 type Ty = Ty<'tcx>;
90 type TyAndLayout = TyAndLayout<'tcx>;
91
92 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
93 self.cx.layout_of(ty)
94 }
95 }
96
97 impl Deref for Builder<'_, 'll, 'tcx> {
98 type Target = CodegenCx<'ll, 'tcx>;
99
100 fn deref(&self) -> &Self::Target {
101 self.cx
102 }
103 }
104
105 impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
106 type CodegenCx = CodegenCx<'ll, 'tcx>;
107 }
108
109 macro_rules! builder_methods_for_value_instructions {
110 ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
111 $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
112 unsafe {
113 llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
114 }
115 })+
116 }
117 }
118
119 impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
120 fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self {
121 let mut bx = Builder::with_cx(cx);
122 let llbb = unsafe {
123 let name = SmallCStr::new(name);
124 llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
125 };
126 bx.position_at_end(llbb);
127 bx
128 }
129
130 fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
131 // Create a fresh builder from the crate context.
132 let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
133 Builder { llbuilder, cx }
134 }
135
136 fn build_sibling_block(&self, name: &str) -> Self {
137 Builder::new_block(self.cx, self.llfn(), name)
138 }
139
140 fn llbb(&self) -> &'ll BasicBlock {
141 unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
142 }
143
144 fn position_at_end(&mut self, llbb: &'ll BasicBlock) {
145 unsafe {
146 llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb);
147 }
148 }
149
150 fn ret_void(&mut self) {
151 unsafe {
152 llvm::LLVMBuildRetVoid(self.llbuilder);
153 }
154 }
155
156 fn ret(&mut self, v: &'ll Value) {
157 unsafe {
158 llvm::LLVMBuildRet(self.llbuilder, v);
159 }
160 }
161
162 fn br(&mut self, dest: &'ll BasicBlock) {
163 unsafe {
164 llvm::LLVMBuildBr(self.llbuilder, dest);
165 }
166 }
167
168 fn cond_br(
169 &mut self,
170 cond: &'ll Value,
171 then_llbb: &'ll BasicBlock,
172 else_llbb: &'ll BasicBlock,
173 ) {
174 unsafe {
175 llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
176 }
177 }
178
179 fn switch(
180 &mut self,
181 v: &'ll Value,
182 else_llbb: &'ll BasicBlock,
183 cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)> + TrustedLen,
184 ) {
185 let switch =
186 unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
187 for (on_val, dest) in cases {
188 let on_val = self.const_uint_big(self.val_ty(v), on_val);
189 unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
190 }
191 }
192
193 fn invoke(
194 &mut self,
195 llfn: &'ll Value,
196 args: &[&'ll Value],
197 then: &'ll BasicBlock,
198 catch: &'ll BasicBlock,
199 funclet: Option<&Funclet<'ll>>,
200 ) -> &'ll Value {
201 debug!("invoke {:?} with args ({:?})", llfn, args);
202
203 let args = self.check_call("invoke", llfn, args);
204 let bundle = funclet.map(|funclet| funclet.bundle());
205 let bundle = bundle.as_ref().map(|b| &*b.raw);
206
207 unsafe {
208 llvm::LLVMRustBuildInvoke(
209 self.llbuilder,
210 llfn,
211 args.as_ptr(),
212 args.len() as c_uint,
213 then,
214 catch,
215 bundle,
216 UNNAMED,
217 )
218 }
219 }
220
221 fn unreachable(&mut self) {
222 unsafe {
223 llvm::LLVMBuildUnreachable(self.llbuilder);
224 }
225 }
226
227 builder_methods_for_value_instructions! {
228 add(a, b) => LLVMBuildAdd,
229 fadd(a, b) => LLVMBuildFAdd,
230 sub(a, b) => LLVMBuildSub,
231 fsub(a, b) => LLVMBuildFSub,
232 mul(a, b) => LLVMBuildMul,
233 fmul(a, b) => LLVMBuildFMul,
234 udiv(a, b) => LLVMBuildUDiv,
235 exactudiv(a, b) => LLVMBuildExactUDiv,
236 sdiv(a, b) => LLVMBuildSDiv,
237 exactsdiv(a, b) => LLVMBuildExactSDiv,
238 fdiv(a, b) => LLVMBuildFDiv,
239 urem(a, b) => LLVMBuildURem,
240 srem(a, b) => LLVMBuildSRem,
241 frem(a, b) => LLVMBuildFRem,
242 shl(a, b) => LLVMBuildShl,
243 lshr(a, b) => LLVMBuildLShr,
244 ashr(a, b) => LLVMBuildAShr,
245 and(a, b) => LLVMBuildAnd,
246 or(a, b) => LLVMBuildOr,
247 xor(a, b) => LLVMBuildXor,
248 neg(x) => LLVMBuildNeg,
249 fneg(x) => LLVMBuildFNeg,
250 not(x) => LLVMBuildNot,
251 unchecked_sadd(x, y) => LLVMBuildNSWAdd,
252 unchecked_uadd(x, y) => LLVMBuildNUWAdd,
253 unchecked_ssub(x, y) => LLVMBuildNSWSub,
254 unchecked_usub(x, y) => LLVMBuildNUWSub,
255 unchecked_smul(x, y) => LLVMBuildNSWMul,
256 unchecked_umul(x, y) => LLVMBuildNUWMul,
257 }
258
259 fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
260 unsafe {
261 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
262 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
263 instr
264 }
265 }
266
267 fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
268 unsafe {
269 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
270 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
271 instr
272 }
273 }
274
275 fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
276 unsafe {
277 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
278 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
279 instr
280 }
281 }
282
283 fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
284 unsafe {
285 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
286 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
287 instr
288 }
289 }
290
291 fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
292 unsafe {
293 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
294 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
295 instr
296 }
297 }
298
299 fn checked_binop(
300 &mut self,
301 oop: OverflowOp,
302 ty: Ty<'_>,
303 lhs: Self::Value,
304 rhs: Self::Value,
305 ) -> (Self::Value, Self::Value) {
306 use rustc_ast::ast::IntTy::*;
307 use rustc_ast::ast::UintTy::*;
308 use rustc_middle::ty::{Int, Uint};
309
310 let new_kind = match ty.kind {
311 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.ptr_width)),
312 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.ptr_width)),
313 ref t @ (Uint(_) | Int(_)) => t.clone(),
314 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
315 };
316
317 let name = match oop {
318 OverflowOp::Add => match new_kind {
319 Int(I8) => "llvm.sadd.with.overflow.i8",
320 Int(I16) => "llvm.sadd.with.overflow.i16",
321 Int(I32) => "llvm.sadd.with.overflow.i32",
322 Int(I64) => "llvm.sadd.with.overflow.i64",
323 Int(I128) => "llvm.sadd.with.overflow.i128",
324
325 Uint(U8) => "llvm.uadd.with.overflow.i8",
326 Uint(U16) => "llvm.uadd.with.overflow.i16",
327 Uint(U32) => "llvm.uadd.with.overflow.i32",
328 Uint(U64) => "llvm.uadd.with.overflow.i64",
329 Uint(U128) => "llvm.uadd.with.overflow.i128",
330
331 _ => unreachable!(),
332 },
333 OverflowOp::Sub => match new_kind {
334 Int(I8) => "llvm.ssub.with.overflow.i8",
335 Int(I16) => "llvm.ssub.with.overflow.i16",
336 Int(I32) => "llvm.ssub.with.overflow.i32",
337 Int(I64) => "llvm.ssub.with.overflow.i64",
338 Int(I128) => "llvm.ssub.with.overflow.i128",
339
340 Uint(U8) => "llvm.usub.with.overflow.i8",
341 Uint(U16) => "llvm.usub.with.overflow.i16",
342 Uint(U32) => "llvm.usub.with.overflow.i32",
343 Uint(U64) => "llvm.usub.with.overflow.i64",
344 Uint(U128) => "llvm.usub.with.overflow.i128",
345
346 _ => unreachable!(),
347 },
348 OverflowOp::Mul => match new_kind {
349 Int(I8) => "llvm.smul.with.overflow.i8",
350 Int(I16) => "llvm.smul.with.overflow.i16",
351 Int(I32) => "llvm.smul.with.overflow.i32",
352 Int(I64) => "llvm.smul.with.overflow.i64",
353 Int(I128) => "llvm.smul.with.overflow.i128",
354
355 Uint(U8) => "llvm.umul.with.overflow.i8",
356 Uint(U16) => "llvm.umul.with.overflow.i16",
357 Uint(U32) => "llvm.umul.with.overflow.i32",
358 Uint(U64) => "llvm.umul.with.overflow.i64",
359 Uint(U128) => "llvm.umul.with.overflow.i128",
360
361 _ => unreachable!(),
362 },
363 };
364
365 let intrinsic = self.get_intrinsic(&name);
366 let res = self.call(intrinsic, &[lhs, rhs], None);
367 (self.extract_value(res, 0), self.extract_value(res, 1))
368 }
369
370 fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
371 let mut bx = Builder::with_cx(self.cx);
372 bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
373 bx.dynamic_alloca(ty, align)
374 }
375
376 fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
377 unsafe {
378 let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
379 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
380 alloca
381 }
382 }
383
384 fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
385 unsafe {
386 let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
387 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
388 alloca
389 }
390 }
391
392 fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
393 unsafe {
394 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
395 llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
396 load
397 }
398 }
399
400 fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
401 unsafe {
402 let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
403 llvm::LLVMSetVolatile(load, llvm::True);
404 load
405 }
406 }
407
408 fn atomic_load(
409 &mut self,
410 ptr: &'ll Value,
411 order: rustc_codegen_ssa::common::AtomicOrdering,
412 size: Size,
413 ) -> &'ll Value {
414 unsafe {
415 let load = llvm::LLVMRustBuildAtomicLoad(
416 self.llbuilder,
417 ptr,
418 UNNAMED,
419 AtomicOrdering::from_generic(order),
420 );
421 // LLVM requires the alignment of atomic loads to be at least the size of the type.
422 llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
423 load
424 }
425 }
426
427 fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
428 debug!("PlaceRef::load: {:?}", place);
429
430 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
431
432 if place.layout.is_zst() {
433 return OperandRef::new_zst(self, place.layout);
434 }
435
436 fn scalar_load_metadata<'a, 'll, 'tcx>(
437 bx: &mut Builder<'a, 'll, 'tcx>,
438 load: &'ll Value,
439 scalar: &abi::Scalar,
440 ) {
441 let vr = scalar.valid_range.clone();
442 match scalar.value {
443 abi::Int(..) => {
444 let range = scalar.valid_range_exclusive(bx);
445 if range.start != range.end {
446 bx.range_metadata(load, range);
447 }
448 }
449 abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
450 bx.nonnull_metadata(load);
451 }
452 _ => {}
453 }
454 }
455
456 let val = if let Some(llextra) = place.llextra {
457 OperandValue::Ref(place.llval, Some(llextra), place.align)
458 } else if place.layout.is_llvm_immediate() {
459 let mut const_llval = None;
460 unsafe {
461 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
462 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
463 const_llval = llvm::LLVMGetInitializer(global);
464 }
465 }
466 }
467 let llval = const_llval.unwrap_or_else(|| {
468 let load = self.load(place.llval, place.align);
469 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
470 scalar_load_metadata(self, load, scalar);
471 }
472 load
473 });
474 OperandValue::Immediate(to_immediate(self, llval, place.layout))
475 } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
476 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
477
478 let mut load = |i, scalar: &abi::Scalar, align| {
479 let llptr = self.struct_gep(place.llval, i as u64);
480 let load = self.load(llptr, align);
481 scalar_load_metadata(self, load, scalar);
482 if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
483 };
484
485 OperandValue::Pair(
486 load(0, a, place.align),
487 load(1, b, place.align.restrict_for_offset(b_offset)),
488 )
489 } else {
490 OperandValue::Ref(place.llval, None, place.align)
491 };
492
493 OperandRef { val, layout: place.layout }
494 }
495
496 fn write_operand_repeatedly(
497 mut self,
498 cg_elem: OperandRef<'tcx, &'ll Value>,
499 count: u64,
500 dest: PlaceRef<'tcx, &'ll Value>,
501 ) -> Self {
502 let zero = self.const_usize(0);
503 let count = self.const_usize(count);
504 let start = dest.project_index(&mut self, zero).llval;
505 let end = dest.project_index(&mut self, count).llval;
506
507 let mut header_bx = self.build_sibling_block("repeat_loop_header");
508 let mut body_bx = self.build_sibling_block("repeat_loop_body");
509 let next_bx = self.build_sibling_block("repeat_loop_next");
510
511 self.br(header_bx.llbb());
512 let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
513
514 let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
515 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
516
517 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
518 cg_elem
519 .val
520 .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
521
522 let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
523 body_bx.br(header_bx.llbb());
524 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
525
526 next_bx
527 }
528
529 fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
530 if self.sess().target.target.arch == "amdgpu" {
531 // amdgpu/LLVM does something weird and thinks a i64 value is
532 // split into a v2i32, halving the bitwidth LLVM expects,
533 // tripping an assertion. So, for now, just disable this
534 // optimization.
535 return;
536 }
537
538 unsafe {
539 let llty = self.cx.val_ty(load);
540 let v = [
541 self.cx.const_uint_big(llty, range.start),
542 self.cx.const_uint_big(llty, range.end),
543 ];
544
545 llvm::LLVMSetMetadata(
546 load,
547 llvm::MD_range as c_uint,
548 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
549 );
550 }
551 }
552
553 fn nonnull_metadata(&mut self, load: &'ll Value) {
554 unsafe {
555 llvm::LLVMSetMetadata(
556 load,
557 llvm::MD_nonnull as c_uint,
558 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
559 );
560 }
561 }
562
563 fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
564 self.store_with_flags(val, ptr, align, MemFlags::empty())
565 }
566
567 fn store_with_flags(
568 &mut self,
569 val: &'ll Value,
570 ptr: &'ll Value,
571 align: Align,
572 flags: MemFlags,
573 ) -> &'ll Value {
574 debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
575 let ptr = self.check_store(val, ptr);
576 unsafe {
577 let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
578 let align =
579 if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
580 llvm::LLVMSetAlignment(store, align);
581 if flags.contains(MemFlags::VOLATILE) {
582 llvm::LLVMSetVolatile(store, llvm::True);
583 }
584 if flags.contains(MemFlags::NONTEMPORAL) {
585 // According to LLVM [1] building a nontemporal store must
586 // *always* point to a metadata value of the integer 1.
587 //
588 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
589 let one = self.cx.const_i32(1);
590 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
591 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
592 }
593 store
594 }
595 }
596
597 fn atomic_store(
598 &mut self,
599 val: &'ll Value,
600 ptr: &'ll Value,
601 order: rustc_codegen_ssa::common::AtomicOrdering,
602 size: Size,
603 ) {
604 debug!("Store {:?} -> {:?}", val, ptr);
605 let ptr = self.check_store(val, ptr);
606 unsafe {
607 let store = llvm::LLVMRustBuildAtomicStore(
608 self.llbuilder,
609 val,
610 ptr,
611 AtomicOrdering::from_generic(order),
612 );
613 // LLVM requires the alignment of atomic stores to be at least the size of the type.
614 llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
615 }
616 }
617
618 fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
619 unsafe {
620 llvm::LLVMBuildGEP(
621 self.llbuilder,
622 ptr,
623 indices.as_ptr(),
624 indices.len() as c_uint,
625 UNNAMED,
626 )
627 }
628 }
629
630 fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
631 unsafe {
632 llvm::LLVMBuildInBoundsGEP(
633 self.llbuilder,
634 ptr,
635 indices.as_ptr(),
636 indices.len() as c_uint,
637 UNNAMED,
638 )
639 }
640 }
641
642 fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value {
643 assert_eq!(idx as c_uint as u64, idx);
644 unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) }
645 }
646
647 /* Casts */
648 fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
649 unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
650 }
651
652 fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
653 unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
654 }
655
656 fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
657 unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
658 }
659
660 fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
661 unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
662 }
663
664 fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
665 unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
666 }
667
668 fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
669 unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
670 }
671
672 fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
673 unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
674 }
675
676 fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
677 unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
678 }
679
680 fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
681 unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
682 }
683
684 fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
685 unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
686 }
687
688 fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
689 unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
690 }
691
692 fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
693 unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
694 }
695
696 fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
697 unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
698 }
699
700 /* Comparisons */
701 fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
702 let op = llvm::IntPredicate::from_generic(op);
703 unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
704 }
705
706 fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
707 unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
708 }
709
710 /* Miscellaneous instructions */
711 fn memcpy(
712 &mut self,
713 dst: &'ll Value,
714 dst_align: Align,
715 src: &'ll Value,
716 src_align: Align,
717 size: &'ll Value,
718 flags: MemFlags,
719 ) {
720 if flags.contains(MemFlags::NONTEMPORAL) {
721 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
722 let val = self.load(src, src_align);
723 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
724 self.store_with_flags(val, ptr, dst_align, flags);
725 return;
726 }
727 let size = self.intcast(size, self.type_isize(), false);
728 let is_volatile = flags.contains(MemFlags::VOLATILE);
729 let dst = self.pointercast(dst, self.type_i8p());
730 let src = self.pointercast(src, self.type_i8p());
731 unsafe {
732 llvm::LLVMRustBuildMemCpy(
733 self.llbuilder,
734 dst,
735 dst_align.bytes() as c_uint,
736 src,
737 src_align.bytes() as c_uint,
738 size,
739 is_volatile,
740 );
741 }
742 }
743
744 fn memmove(
745 &mut self,
746 dst: &'ll Value,
747 dst_align: Align,
748 src: &'ll Value,
749 src_align: Align,
750 size: &'ll Value,
751 flags: MemFlags,
752 ) {
753 if flags.contains(MemFlags::NONTEMPORAL) {
754 // HACK(nox): This is inefficient but there is no nontemporal memmove.
755 let val = self.load(src, src_align);
756 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
757 self.store_with_flags(val, ptr, dst_align, flags);
758 return;
759 }
760 let size = self.intcast(size, self.type_isize(), false);
761 let is_volatile = flags.contains(MemFlags::VOLATILE);
762 let dst = self.pointercast(dst, self.type_i8p());
763 let src = self.pointercast(src, self.type_i8p());
764 unsafe {
765 llvm::LLVMRustBuildMemMove(
766 self.llbuilder,
767 dst,
768 dst_align.bytes() as c_uint,
769 src,
770 src_align.bytes() as c_uint,
771 size,
772 is_volatile,
773 );
774 }
775 }
776
777 fn memset(
778 &mut self,
779 ptr: &'ll Value,
780 fill_byte: &'ll Value,
781 size: &'ll Value,
782 align: Align,
783 flags: MemFlags,
784 ) {
785 let is_volatile = flags.contains(MemFlags::VOLATILE);
786 let ptr = self.pointercast(ptr, self.type_i8p());
787 unsafe {
788 llvm::LLVMRustBuildMemSet(
789 self.llbuilder,
790 ptr,
791 align.bytes() as c_uint,
792 fill_byte,
793 size,
794 is_volatile,
795 );
796 }
797 }
798
799 fn select(
800 &mut self,
801 cond: &'ll Value,
802 then_val: &'ll Value,
803 else_val: &'ll Value,
804 ) -> &'ll Value {
805 unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
806 }
807
808 #[allow(dead_code)]
809 fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
810 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
811 }
812
813 fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
814 unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
815 }
816
817 fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
818 unsafe {
819 let elt_ty = self.cx.val_ty(elt);
820 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
821 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
822 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
823 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
824 }
825 }
826
827 fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
828 assert_eq!(idx as c_uint as u64, idx);
829 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
830 }
831
832 fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
833 assert_eq!(idx as c_uint as u64, idx);
834 unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
835 }
836
837 fn landing_pad(
838 &mut self,
839 ty: &'ll Type,
840 pers_fn: &'ll Value,
841 num_clauses: usize,
842 ) -> &'ll Value {
843 unsafe {
844 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
845 }
846 }
847
848 fn set_cleanup(&mut self, landing_pad: &'ll Value) {
849 unsafe {
850 llvm::LLVMSetCleanup(landing_pad, llvm::True);
851 }
852 }
853
854 fn resume(&mut self, exn: &'ll Value) -> &'ll Value {
855 unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
856 }
857
858 fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
859 let name = const_cstr!("cleanuppad");
860 let ret = unsafe {
861 llvm::LLVMRustBuildCleanupPad(
862 self.llbuilder,
863 parent,
864 args.len() as c_uint,
865 args.as_ptr(),
866 name.as_ptr(),
867 )
868 };
869 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
870 }
871
872 fn cleanup_ret(
873 &mut self,
874 funclet: &Funclet<'ll>,
875 unwind: Option<&'ll BasicBlock>,
876 ) -> &'ll Value {
877 let ret =
878 unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
879 ret.expect("LLVM does not have support for cleanupret")
880 }
881
882 fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
883 let name = const_cstr!("catchpad");
884 let ret = unsafe {
885 llvm::LLVMRustBuildCatchPad(
886 self.llbuilder,
887 parent,
888 args.len() as c_uint,
889 args.as_ptr(),
890 name.as_ptr(),
891 )
892 };
893 Funclet::new(ret.expect("LLVM does not have support for catchpad"))
894 }
895
896 fn catch_switch(
897 &mut self,
898 parent: Option<&'ll Value>,
899 unwind: Option<&'ll BasicBlock>,
900 num_handlers: usize,
901 ) -> &'ll Value {
902 let name = const_cstr!("catchswitch");
903 let ret = unsafe {
904 llvm::LLVMRustBuildCatchSwitch(
905 self.llbuilder,
906 parent,
907 unwind,
908 num_handlers as c_uint,
909 name.as_ptr(),
910 )
911 };
912 ret.expect("LLVM does not have support for catchswitch")
913 }
914
915 fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) {
916 unsafe {
917 llvm::LLVMRustAddHandler(catch_switch, handler);
918 }
919 }
920
921 fn set_personality_fn(&mut self, personality: &'ll Value) {
922 unsafe {
923 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
924 }
925 }
926
927 // Atomic Operations
928 fn atomic_cmpxchg(
929 &mut self,
930 dst: &'ll Value,
931 cmp: &'ll Value,
932 src: &'ll Value,
933 order: rustc_codegen_ssa::common::AtomicOrdering,
934 failure_order: rustc_codegen_ssa::common::AtomicOrdering,
935 weak: bool,
936 ) -> &'ll Value {
937 let weak = if weak { llvm::True } else { llvm::False };
938 unsafe {
939 llvm::LLVMRustBuildAtomicCmpXchg(
940 self.llbuilder,
941 dst,
942 cmp,
943 src,
944 AtomicOrdering::from_generic(order),
945 AtomicOrdering::from_generic(failure_order),
946 weak,
947 )
948 }
949 }
950 fn atomic_rmw(
951 &mut self,
952 op: rustc_codegen_ssa::common::AtomicRmwBinOp,
953 dst: &'ll Value,
954 src: &'ll Value,
955 order: rustc_codegen_ssa::common::AtomicOrdering,
956 ) -> &'ll Value {
957 unsafe {
958 llvm::LLVMBuildAtomicRMW(
959 self.llbuilder,
960 AtomicRmwBinOp::from_generic(op),
961 dst,
962 src,
963 AtomicOrdering::from_generic(order),
964 False,
965 )
966 }
967 }
968
969 fn atomic_fence(
970 &mut self,
971 order: rustc_codegen_ssa::common::AtomicOrdering,
972 scope: rustc_codegen_ssa::common::SynchronizationScope,
973 ) {
974 unsafe {
975 llvm::LLVMRustBuildAtomicFence(
976 self.llbuilder,
977 AtomicOrdering::from_generic(order),
978 SynchronizationScope::from_generic(scope),
979 );
980 }
981 }
982
983 fn set_invariant_load(&mut self, load: &'ll Value) {
984 unsafe {
985 llvm::LLVMSetMetadata(
986 load,
987 llvm::MD_invariant_load as c_uint,
988 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
989 );
990 }
991 }
992
993 fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
994 self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
995 }
996
997 fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
998 self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
999 }
1000
1001 fn call(
1002 &mut self,
1003 llfn: &'ll Value,
1004 args: &[&'ll Value],
1005 funclet: Option<&Funclet<'ll>>,
1006 ) -> &'ll Value {
1007 debug!("call {:?} with args ({:?})", llfn, args);
1008
1009 let args = self.check_call("call", llfn, args);
1010 let bundle = funclet.map(|funclet| funclet.bundle());
1011 let bundle = bundle.as_ref().map(|b| &*b.raw);
1012
1013 unsafe {
1014 llvm::LLVMRustBuildCall(
1015 self.llbuilder,
1016 llfn,
1017 args.as_ptr() as *const &llvm::Value,
1018 args.len() as c_uint,
1019 bundle,
1020 )
1021 }
1022 }
1023
1024 fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
1025 unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
1026 }
1027
1028 fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
1029 self.cx
1030 }
1031
1032 unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) {
1033 llvm::LLVMDeleteBasicBlock(bb);
1034 }
1035
1036 fn do_not_inline(&mut self, llret: &'ll Value) {
1037 llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
1038 }
1039 }
1040
1041 impl StaticBuilderMethods for Builder<'a, 'll, 'tcx> {
1042 fn get_static(&mut self, def_id: DefId) -> &'ll Value {
1043 // Forward to the `get_static` method of `CodegenCx`
1044 self.cx().get_static(def_id)
1045 }
1046 }
1047
1048 impl Builder<'a, 'll, 'tcx> {
1049 pub fn llfn(&self) -> &'ll Value {
1050 unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
1051 }
1052
1053 fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
1054 unsafe {
1055 llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
1056 }
1057 }
1058
1059 pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1060 unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
1061 }
1062
1063 pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
1064 unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
1065 }
1066
1067 pub fn insert_element(
1068 &mut self,
1069 vec: &'ll Value,
1070 elt: &'ll Value,
1071 idx: &'ll Value,
1072 ) -> &'ll Value {
1073 unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
1074 }
1075
1076 pub fn shuffle_vector(
1077 &mut self,
1078 v1: &'ll Value,
1079 v2: &'ll Value,
1080 mask: &'ll Value,
1081 ) -> &'ll Value {
1082 unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
1083 }
1084
1085 pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1086 unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
1087 }
1088 pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1089 unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
1090 }
1091 pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1092 unsafe {
1093 let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
1094 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1095 instr
1096 }
1097 }
1098 pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
1099 unsafe {
1100 let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
1101 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1102 instr
1103 }
1104 }
1105 pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
1106 unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
1107 }
1108 pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
1109 unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
1110 }
1111 pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
1112 unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
1113 }
1114 pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
1115 unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
1116 }
1117 pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
1118 unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
1119 }
1120 pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
1121 unsafe {
1122 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
1123 }
1124 }
1125 pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
1126 unsafe {
1127 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
1128 }
1129 }
1130 pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
1131 unsafe {
1132 let instr =
1133 llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
1134 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1135 instr
1136 }
1137 }
1138 pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
1139 unsafe {
1140 let instr =
1141 llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
1142 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
1143 instr
1144 }
1145 }
1146 pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1147 unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
1148 }
1149 pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
1150 unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
1151 }
1152
1153 pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
1154 unsafe {
1155 llvm::LLVMAddClause(landing_pad, clause);
1156 }
1157 }
1158
1159 pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
1160 let ret =
1161 unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
1162 ret.expect("LLVM does not have support for catchret")
1163 }
1164
1165 fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
1166 let dest_ptr_ty = self.cx.val_ty(ptr);
1167 let stored_ty = self.cx.val_ty(val);
1168 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
1169
1170 assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
1171
1172 if dest_ptr_ty == stored_ptr_ty {
1173 ptr
1174 } else {
1175 debug!(
1176 "type mismatch in store. \
1177 Expected {:?}, got {:?}; inserting bitcast",
1178 dest_ptr_ty, stored_ptr_ty
1179 );
1180 self.bitcast(ptr, stored_ptr_ty)
1181 }
1182 }
1183
1184 fn check_call<'b>(
1185 &mut self,
1186 typ: &str,
1187 llfn: &'ll Value,
1188 args: &'b [&'ll Value],
1189 ) -> Cow<'b, [&'ll Value]> {
1190 let mut fn_ty = self.cx.val_ty(llfn);
1191 // Strip off pointers
1192 while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
1193 fn_ty = self.cx.element_type(fn_ty);
1194 }
1195
1196 assert!(
1197 self.cx.type_kind(fn_ty) == TypeKind::Function,
1198 "builder::{} not passed a function, but {:?}",
1199 typ,
1200 fn_ty
1201 );
1202
1203 let param_tys = self.cx.func_params_types(fn_ty);
1204
1205 let all_args_match = param_tys
1206 .iter()
1207 .zip(args.iter().map(|&v| self.val_ty(v)))
1208 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
1209
1210 if all_args_match {
1211 return Cow::Borrowed(args);
1212 }
1213
1214 let casted_args: Vec<_> = param_tys
1215 .into_iter()
1216 .zip(args.iter())
1217 .enumerate()
1218 .map(|(i, (expected_ty, &actual_val))| {
1219 let actual_ty = self.val_ty(actual_val);
1220 if expected_ty != actual_ty {
1221 debug!(
1222 "type mismatch in function call of {:?}. \
1223 Expected {:?} for param {}, got {:?}; injecting bitcast",
1224 llfn, expected_ty, i, actual_ty
1225 );
1226 self.bitcast(actual_val, expected_ty)
1227 } else {
1228 actual_val
1229 }
1230 })
1231 .collect();
1232
1233 Cow::Owned(casted_args)
1234 }
1235
1236 pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
1237 unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
1238 }
1239
1240 fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
1241 let size = size.bytes();
1242 if size == 0 {
1243 return;
1244 }
1245
1246 let opts = &self.cx.sess().opts;
1247 let emit = match opts.debugging_opts.sanitizer {
1248 // Some sanitizer use lifetime intrinsics. When they are in use,
1249 // emit lifetime intrinsics regardless of optimization level.
1250 Some(Sanitizer::Address | Sanitizer::Memory) => true,
1251 _ => opts.optimize != config::OptLevel::No,
1252 };
1253 if !emit {
1254 return;
1255 }
1256
1257 let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
1258
1259 let ptr = self.pointercast(ptr, self.cx.type_i8p());
1260 self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
1261 }
1262
1263 fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value {
1264 assert_eq!(vals.len(), bbs.len());
1265 let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
1266 unsafe {
1267 llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
1268 phi
1269 }
1270 }
1271
1272 fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
1273 unsafe {
1274 llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
1275 }
1276 }
1277 }