1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
14 use llvm
::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind}
;
16 use middle
::subst
::FnSpace
;
22 use trans
::cleanup
::CleanupMethods
;
25 use trans
::debuginfo
::DebugLoc
;
28 use trans
::type_of
::*;
31 use trans
::machine
::llsize_of
;
32 use trans
::type_
::Type
;
33 use middle
::ty
::{self, Ty}
;
34 use syntax
::abi
::RustIntrinsic
;
36 use syntax
::parse
::token
;
37 use util
::ppaux
::{Repr, ty_to_string}
;
39 pub fn get_simple_intrinsic(ccx
: &CrateContext
, item
: &ast
::ForeignItem
) -> Option
<ValueRef
> {
40 let name
= match &token
::get_ident(item
.ident
)[..] {
41 "sqrtf32" => "llvm.sqrt.f32",
42 "sqrtf64" => "llvm.sqrt.f64",
43 "powif32" => "llvm.powi.f32",
44 "powif64" => "llvm.powi.f64",
45 "sinf32" => "llvm.sin.f32",
46 "sinf64" => "llvm.sin.f64",
47 "cosf32" => "llvm.cos.f32",
48 "cosf64" => "llvm.cos.f64",
49 "powf32" => "llvm.pow.f32",
50 "powf64" => "llvm.pow.f64",
51 "expf32" => "llvm.exp.f32",
52 "expf64" => "llvm.exp.f64",
53 "exp2f32" => "llvm.exp2.f32",
54 "exp2f64" => "llvm.exp2.f64",
55 "logf32" => "llvm.log.f32",
56 "logf64" => "llvm.log.f64",
57 "log10f32" => "llvm.log10.f32",
58 "log10f64" => "llvm.log10.f64",
59 "log2f32" => "llvm.log2.f32",
60 "log2f64" => "llvm.log2.f64",
61 "fmaf32" => "llvm.fma.f32",
62 "fmaf64" => "llvm.fma.f64",
63 "fabsf32" => "llvm.fabs.f32",
64 "fabsf64" => "llvm.fabs.f64",
65 "copysignf32" => "llvm.copysign.f32",
66 "copysignf64" => "llvm.copysign.f64",
67 "floorf32" => "llvm.floor.f32",
68 "floorf64" => "llvm.floor.f64",
69 "ceilf32" => "llvm.ceil.f32",
70 "ceilf64" => "llvm.ceil.f64",
71 "truncf32" => "llvm.trunc.f32",
72 "truncf64" => "llvm.trunc.f64",
73 "rintf32" => "llvm.rint.f32",
74 "rintf64" => "llvm.rint.f64",
75 "nearbyintf32" => "llvm.nearbyint.f32",
76 "nearbyintf64" => "llvm.nearbyint.f64",
77 "roundf32" => "llvm.round.f32",
78 "roundf64" => "llvm.round.f64",
79 "ctpop8" => "llvm.ctpop.i8",
80 "ctpop16" => "llvm.ctpop.i16",
81 "ctpop32" => "llvm.ctpop.i32",
82 "ctpop64" => "llvm.ctpop.i64",
83 "bswap16" => "llvm.bswap.i16",
84 "bswap32" => "llvm.bswap.i32",
85 "bswap64" => "llvm.bswap.i64",
86 "assume" => "llvm.assume",
89 Some(ccx
.get_intrinsic(&name
))
92 /// Performs late verification that intrinsics are used correctly. At present,
93 /// the only intrinsic that needs such verification is `transmute`.
94 pub fn check_intrinsics(ccx
: &CrateContext
) {
95 let mut last_failing_id
= None
;
96 for transmute_restriction
in &*ccx
.tcx().transmute_restrictions
.borrow() {
97 // Sometimes, a single call to transmute will push multiple
98 // type pairs to test in order to exhaustively test the
99 // possibility around a type parameter. If one of those fails,
100 // there is no sense reporting errors on the others.
101 if last_failing_id
== Some(transmute_restriction
.id
) {
105 debug
!("transmute_restriction: {}", transmute_restriction
.repr(ccx
.tcx()));
107 assert
!(!ty
::type_has_params(transmute_restriction
.substituted_from
));
108 assert
!(!ty
::type_has_params(transmute_restriction
.substituted_to
));
110 let llfromtype
= type_of
::sizing_type_of(ccx
,
111 transmute_restriction
.substituted_from
);
112 let lltotype
= type_of
::sizing_type_of(ccx
,
113 transmute_restriction
.substituted_to
);
114 let from_type_size
= machine
::llbitsize_of_real(ccx
, llfromtype
);
115 let to_type_size
= machine
::llbitsize_of_real(ccx
, lltotype
);
116 if from_type_size
!= to_type_size
{
117 last_failing_id
= Some(transmute_restriction
.id
);
119 if transmute_restriction
.original_from
!= transmute_restriction
.substituted_from
{
121 transmute_restriction
.span
,
122 &format
!("transmute called on types with potentially different sizes: \
123 {} (could be {} bit{}) to {} (could be {} bit{})",
124 ty_to_string(ccx
.tcx(), transmute_restriction
.original_from
),
125 from_type_size
as usize,
126 if from_type_size
== 1 {""}
else {"s"}
,
127 ty_to_string(ccx
.tcx(), transmute_restriction
.original_to
),
128 to_type_size
as usize,
129 if to_type_size
== 1 {""}
else {"s"}
));
132 transmute_restriction
.span
,
133 &format
!("transmute called on types with different sizes: \
134 {} ({} bit{}) to {} ({} bit{})",
135 ty_to_string(ccx
.tcx(), transmute_restriction
.original_from
),
136 from_type_size
as usize,
137 if from_type_size
== 1 {""}
else {"s"}
,
138 ty_to_string(ccx
.tcx(), transmute_restriction
.original_to
),
139 to_type_size
as usize,
140 if to_type_size
== 1 {""}
else {"s"}
));
144 ccx
.sess().abort_if_errors();
147 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
148 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
149 /// add them to librustc_trans/trans/context.rs
150 pub fn trans_intrinsic_call
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
153 cleanup_scope
: cleanup
::CustomScopeIndex
,
154 args
: callee
::CallArgs
<'a
, 'tcx
>,
156 substs
: subst
::Substs
<'tcx
>,
157 call_info
: NodeIdAndSpan
)
158 -> Result
<'blk
, 'tcx
> {
163 let _icx
= push_ctxt("trans_intrinsic_call");
165 let ret_ty
= match callee_ty
.sty
{
166 ty
::ty_bare_fn(_
, ref f
) => {
167 ty
::erase_late_bound_regions(bcx
.tcx(), &f
.sig
.output())
169 _
=> panic
!("expected bare_fn in trans_intrinsic_call")
171 let foreign_item
= tcx
.map
.expect_foreign_item(node
);
172 let name
= token
::get_ident(foreign_item
.ident
);
174 // For `transmute` we can just trans the input expr directly into dest
175 if &name
[..] == "transmute" {
176 let llret_ty
= type_of
::type_of(ccx
, ret_ty
.unwrap());
178 callee
::ArgExprs(arg_exprs
) => {
179 assert_eq
!(arg_exprs
.len(), 1);
181 let (in_type
, out_type
) = (*substs
.types
.get(FnSpace
, 0),
182 *substs
.types
.get(FnSpace
, 1));
183 let llintype
= type_of
::type_of(ccx
, in_type
);
184 let llouttype
= type_of
::type_of(ccx
, out_type
);
186 let in_type_size
= machine
::llbitsize_of_real(ccx
, llintype
);
187 let out_type_size
= machine
::llbitsize_of_real(ccx
, llouttype
);
189 // This should be caught by the intrinsicck pass
190 assert_eq
!(in_type_size
, out_type_size
);
192 let nonpointer_nonaggregate
= |llkind
: TypeKind
| -> bool
{
193 use llvm
::TypeKind
::*;
195 Half
| Float
| Double
| X86_FP80
| FP128
|
196 PPC_FP128
| Integer
| Vector
| X86_MMX
=> true,
201 // An approximation to which types can be directly cast via
202 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
203 // but does, importantly, cover SIMD types.
204 let in_kind
= llintype
.kind();
205 let ret_kind
= llret_ty
.kind();
206 let bitcast_compatible
=
207 (nonpointer_nonaggregate(in_kind
) && nonpointer_nonaggregate(ret_kind
)) || {
208 in_kind
== TypeKind
::Pointer
&& ret_kind
== TypeKind
::Pointer
211 let dest
= if bitcast_compatible
{
212 // if we're here, the type is scalar-like (a primitive, a
213 // SIMD type or a pointer), and so can be handled as a
214 // by-value ValueRef and can also be directly bitcast to the
215 // target type. Doing this special case makes conversions
216 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
217 // efficient (these are done efficiently implicitly in C
218 // with the `__m128i` type and so this means Rust doesn't
220 let expr
= &*arg_exprs
[0];
221 let datum
= unpack_datum
!(bcx
, expr
::trans(bcx
, expr
));
222 let datum
= unpack_datum
!(bcx
, datum
.to_rvalue_datum(bcx
, "transmute_temp"));
223 let val
= if datum
.kind
.is_by_ref() {
224 load_ty(bcx
, datum
.val
, datum
.ty
)
229 let cast_val
= BitCast(bcx
, val
, llret_ty
);
233 // this often occurs in a sequence like `Store(val,
234 // d); val2 = Load(d)`, so disappears easily.
235 Store(bcx
, cast_val
, d
);
241 // The types are too complicated to do with a by-value
242 // bitcast, so pointer cast instead. We need to cast the
243 // dest so the types work out.
244 let dest
= match dest
{
245 expr
::SaveIn(d
) => expr
::SaveIn(PointerCast(bcx
, d
, llintype
.ptr_to())),
246 expr
::Ignore
=> expr
::Ignore
248 bcx
= expr
::trans_into(bcx
, &*arg_exprs
[0], dest
);
252 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
253 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
256 expr
::SaveIn(d
) => Result
::new(bcx
, d
),
257 expr
::Ignore
=> Result
::new(bcx
, C_undef(llret_ty
.ptr_to()))
263 ccx
.sess().bug("expected expr as argument for transmute");
268 // Push the arguments.
269 let mut llargs
= Vec
::new();
270 bcx
= callee
::trans_args(bcx
,
274 cleanup
::CustomScope(cleanup_scope
),
278 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
280 let call_debug_location
= DebugLoc
::At(call_info
.id
, call_info
.span
);
282 // These are the only intrinsic functions that diverge.
283 if &name
[..] == "abort" {
284 let llfn
= ccx
.get_intrinsic(&("llvm.trap"));
285 Call(bcx
, llfn
, &[], None
, call_debug_location
);
286 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
288 return Result
::new(bcx
, C_undef(Type
::nil(ccx
).ptr_to()));
289 } else if &name
[..] == "unreachable" {
290 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
292 return Result
::new(bcx
, C_nil(ccx
));
295 let ret_ty
= match ret_ty
{
296 ty
::FnConverging(ret_ty
) => ret_ty
,
297 ty
::FnDiverging
=> unreachable
!()
300 let llret_ty
= type_of
::type_of(ccx
, ret_ty
);
302 // Get location to store the result. If the user does
303 // not care about the result, just make a stack slot
304 let llresult
= match dest
{
305 expr
::SaveIn(d
) => d
,
307 if !type_is_zero_size(ccx
, ret_ty
) {
308 alloc_ty(bcx
, ret_ty
, "intrinsic_result")
310 C_undef(llret_ty
.ptr_to())
315 let simple
= get_simple_intrinsic(ccx
, &*foreign_item
);
316 let llval
= match (simple
, &name
[..]) {
318 Call(bcx
, llfn
, &llargs
, None
, call_debug_location
)
320 (_
, "breakpoint") => {
321 let llfn
= ccx
.get_intrinsic(&("llvm.debugtrap"));
322 Call(bcx
, llfn
, &[], None
, call_debug_location
)
325 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
326 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
327 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
329 (_
, "size_of_val") => {
330 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
331 if !type_is_sized(tcx
, tp_ty
) {
332 let info
= Load(bcx
, expr
::get_len(bcx
, llargs
[0]));
333 let (llsize
, _
) = glue
::size_and_align_of_dst(bcx
, tp_ty
, info
);
336 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
337 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
340 (_
, "min_align_of") => {
341 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
342 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
344 (_
, "min_align_of_val") => {
345 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
346 if !type_is_sized(tcx
, tp_ty
) {
347 let info
= Load(bcx
, expr
::get_len(bcx
, llargs
[0]));
348 let (_
, llalign
) = glue
::size_and_align_of_dst(bcx
, tp_ty
, info
);
351 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
354 (_
, "pref_align_of") => {
355 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
356 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
357 C_uint(ccx
, machine
::llalign_of_pref(ccx
, lltp_ty
))
359 (_
, "move_val_init") => {
360 // Create a datum reflecting the value being moved.
361 // Use `appropriate_mode` so that the datum is by ref
362 // if the value is non-immediate. Note that, with
363 // intrinsics, there are no argument cleanups to
364 // concern ourselves with, so we can use an rvalue datum.
365 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
366 let mode
= appropriate_rvalue_mode(ccx
, tp_ty
);
370 kind
: Rvalue
::new(mode
)
372 bcx
= src
.store_to(bcx
, llargs
[0]);
375 (_
, "drop_in_place") => {
376 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
377 glue
::drop_ty(bcx
, llargs
[0], tp_ty
, call_debug_location
);
380 (_
, "type_name") => {
381 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
382 let ty_name
= token
::intern_and_get_ident(&ty_to_string(ccx
.tcx(), tp_ty
));
383 C_str_slice(ccx
, ty_name
)
386 let hash
= ty
::hash_crate_independent(
388 *substs
.types
.get(FnSpace
, 0),
389 &ccx
.link_meta().crate_hash
);
392 (_
, "init_dropped") => {
393 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
394 if !return_type_is_void(ccx
, tp_ty
) {
395 drop_done_fill_mem(bcx
, llresult
, tp_ty
);
400 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
401 if !return_type_is_void(ccx
, tp_ty
) {
402 // Just zero out the stack slot. (See comment on base::memzero for explanation)
403 init_zero_mem(bcx
, llresult
, tp_ty
);
407 // Effectively no-ops
408 (_
, "uninit") | (_
, "forget") => {
411 (_
, "needs_drop") => {
412 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
414 C_bool(ccx
, bcx
.fcx
.type_needs_drop(tp_ty
))
418 let offset
= llargs
[1];
419 InBoundsGEP(bcx
, ptr
, &[offset
])
422 (_
, "copy_nonoverlapping") => {
426 *substs
.types
.get(FnSpace
, 0),
436 *substs
.types
.get(FnSpace
, 0),
442 (_
, "write_bytes") => {
443 memset_intrinsic(bcx
,
445 *substs
.types
.get(FnSpace
, 0),
452 (_
, "volatile_copy_nonoverlapping_memory") => {
456 *substs
.types
.get(FnSpace
, 0),
462 (_
, "volatile_copy_memory") => {
466 *substs
.types
.get(FnSpace
, 0),
472 (_
, "volatile_set_memory") => {
473 memset_intrinsic(bcx
,
475 *substs
.types
.get(FnSpace
, 0),
481 (_
, "volatile_load") => {
482 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
483 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
484 let load
= VolatileLoad(bcx
, ptr
);
486 llvm
::LLVMSetAlignment(load
, type_of
::align_of(ccx
, tp_ty
));
488 from_arg_ty(bcx
, load
, tp_ty
)
490 (_
, "volatile_store") => {
491 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
492 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
493 let val
= to_arg_ty(bcx
, llargs
[1], tp_ty
);
494 let store
= VolatileStore(bcx
, val
, ptr
);
496 llvm
::LLVMSetAlignment(store
, type_of
::align_of(ccx
, tp_ty
));
501 (_
, "ctlz8") => count_zeros_intrinsic(bcx
,
504 call_debug_location
),
505 (_
, "ctlz16") => count_zeros_intrinsic(bcx
,
508 call_debug_location
),
509 (_
, "ctlz32") => count_zeros_intrinsic(bcx
,
512 call_debug_location
),
513 (_
, "ctlz64") => count_zeros_intrinsic(bcx
,
516 call_debug_location
),
517 (_
, "cttz8") => count_zeros_intrinsic(bcx
,
520 call_debug_location
),
521 (_
, "cttz16") => count_zeros_intrinsic(bcx
,
524 call_debug_location
),
525 (_
, "cttz32") => count_zeros_intrinsic(bcx
,
528 call_debug_location
),
529 (_
, "cttz64") => count_zeros_intrinsic(bcx
,
532 call_debug_location
),
534 (_
, "i8_add_with_overflow") =>
535 with_overflow_intrinsic(bcx
,
536 "llvm.sadd.with.overflow.i8",
540 call_debug_location
),
541 (_
, "i16_add_with_overflow") =>
542 with_overflow_intrinsic(bcx
,
543 "llvm.sadd.with.overflow.i16",
547 call_debug_location
),
548 (_
, "i32_add_with_overflow") =>
549 with_overflow_intrinsic(bcx
,
550 "llvm.sadd.with.overflow.i32",
554 call_debug_location
),
555 (_
, "i64_add_with_overflow") =>
556 with_overflow_intrinsic(bcx
,
557 "llvm.sadd.with.overflow.i64",
561 call_debug_location
),
563 (_
, "u8_add_with_overflow") =>
564 with_overflow_intrinsic(bcx
,
565 "llvm.uadd.with.overflow.i8",
569 call_debug_location
),
570 (_
, "u16_add_with_overflow") =>
571 with_overflow_intrinsic(bcx
,
572 "llvm.uadd.with.overflow.i16",
576 call_debug_location
),
577 (_
, "u32_add_with_overflow") =>
578 with_overflow_intrinsic(bcx
,
579 "llvm.uadd.with.overflow.i32",
583 call_debug_location
),
584 (_
, "u64_add_with_overflow") =>
585 with_overflow_intrinsic(bcx
,
586 "llvm.uadd.with.overflow.i64",
590 call_debug_location
),
591 (_
, "i8_sub_with_overflow") =>
592 with_overflow_intrinsic(bcx
,
593 "llvm.ssub.with.overflow.i8",
597 call_debug_location
),
598 (_
, "i16_sub_with_overflow") =>
599 with_overflow_intrinsic(bcx
,
600 "llvm.ssub.with.overflow.i16",
604 call_debug_location
),
605 (_
, "i32_sub_with_overflow") =>
606 with_overflow_intrinsic(bcx
,
607 "llvm.ssub.with.overflow.i32",
611 call_debug_location
),
612 (_
, "i64_sub_with_overflow") =>
613 with_overflow_intrinsic(bcx
,
614 "llvm.ssub.with.overflow.i64",
618 call_debug_location
),
619 (_
, "u8_sub_with_overflow") =>
620 with_overflow_intrinsic(bcx
,
621 "llvm.usub.with.overflow.i8",
625 call_debug_location
),
626 (_
, "u16_sub_with_overflow") =>
627 with_overflow_intrinsic(bcx
,
628 "llvm.usub.with.overflow.i16",
632 call_debug_location
),
633 (_
, "u32_sub_with_overflow") =>
634 with_overflow_intrinsic(bcx
,
635 "llvm.usub.with.overflow.i32",
639 call_debug_location
),
640 (_
, "u64_sub_with_overflow") =>
641 with_overflow_intrinsic(bcx
,
642 "llvm.usub.with.overflow.i64",
646 call_debug_location
),
647 (_
, "i8_mul_with_overflow") =>
648 with_overflow_intrinsic(bcx
,
649 "llvm.smul.with.overflow.i8",
653 call_debug_location
),
654 (_
, "i16_mul_with_overflow") =>
655 with_overflow_intrinsic(bcx
,
656 "llvm.smul.with.overflow.i16",
660 call_debug_location
),
661 (_
, "i32_mul_with_overflow") =>
662 with_overflow_intrinsic(bcx
,
663 "llvm.smul.with.overflow.i32",
667 call_debug_location
),
668 (_
, "i64_mul_with_overflow") =>
669 with_overflow_intrinsic(bcx
,
670 "llvm.smul.with.overflow.i64",
674 call_debug_location
),
675 (_
, "u8_mul_with_overflow") =>
676 with_overflow_intrinsic(bcx
,
677 "llvm.umul.with.overflow.i8",
681 call_debug_location
),
682 (_
, "u16_mul_with_overflow") =>
683 with_overflow_intrinsic(bcx
,
684 "llvm.umul.with.overflow.i16",
688 call_debug_location
),
689 (_
, "u32_mul_with_overflow") =>
690 with_overflow_intrinsic(bcx
,
691 "llvm.umul.with.overflow.i32",
695 call_debug_location
),
696 (_
, "u64_mul_with_overflow") =>
697 with_overflow_intrinsic(bcx
,
698 "llvm.umul.with.overflow.i64",
702 call_debug_location
),
704 (_
, "unchecked_udiv") => UDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
),
705 (_
, "unchecked_sdiv") => SDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
),
706 (_
, "unchecked_urem") => URem(bcx
, llargs
[0], llargs
[1], call_debug_location
),
707 (_
, "unchecked_srem") => SRem(bcx
, llargs
[0], llargs
[1], call_debug_location
),
709 (_
, "overflowing_add") => Add(bcx
, llargs
[0], llargs
[1], call_debug_location
),
710 (_
, "overflowing_sub") => Sub(bcx
, llargs
[0], llargs
[1], call_debug_location
),
711 (_
, "overflowing_mul") => Mul(bcx
, llargs
[0], llargs
[1], call_debug_location
),
713 (_
, "return_address") => {
714 if !fcx
.caller_expects_out_pointer
{
715 tcx
.sess
.span_err(call_info
.span
,
716 "invalid use of `return_address` intrinsic: function \
717 does not use out pointer");
718 C_null(Type
::i8p(ccx
))
720 PointerCast(bcx
, llvm
::get_param(fcx
.llfn
, 0), Type
::i8p(ccx
))
724 (_
, "discriminant_value") => {
725 let val_ty
= substs
.types
.get(FnSpace
, 0);
728 let repr
= adt
::represent_type(ccx
, *val_ty
);
729 adt
::trans_get_discr(bcx
, &*repr
, llargs
[0], Some(llret_ty
))
731 _
=> C_null(llret_ty
)
735 // This requires that atomic intrinsics follow a specific naming pattern:
736 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
737 (_
, name
) if name
.starts_with("atomic_") => {
738 let split
: Vec
<&str> = name
.split('_'
).collect();
739 assert
!(split
.len() >= 2, "Atomic intrinsic not correct format");
741 let order
= if split
.len() == 2 {
742 llvm
::SequentiallyConsistent
745 "unordered" => llvm
::Unordered
,
746 "relaxed" => llvm
::Monotonic
,
747 "acq" => llvm
::Acquire
,
748 "rel" => llvm
::Release
,
749 "acqrel" => llvm
::AcquireRelease
,
750 _
=> ccx
.sess().fatal("unknown ordering in atomic intrinsic")
756 // See include/llvm/IR/Instructions.h for their implementation
757 // of this, I assume that it's good enough for us to use for
759 let strongest_failure_ordering
= match order
{
760 llvm
::NotAtomic
| llvm
::Unordered
=>
761 ccx
.sess().fatal("cmpxchg must be atomic"),
763 llvm
::Monotonic
| llvm
::Release
=>
766 llvm
::Acquire
| llvm
::AcquireRelease
=>
769 llvm
::SequentiallyConsistent
=>
770 llvm
::SequentiallyConsistent
773 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
774 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
775 let cmp
= to_arg_ty(bcx
, llargs
[1], tp_ty
);
776 let src
= to_arg_ty(bcx
, llargs
[2], tp_ty
);
777 let res
= AtomicCmpXchg(bcx
, ptr
, cmp
, src
, order
,
778 strongest_failure_ordering
);
779 ExtractValue(bcx
, res
, 0)
783 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
784 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
785 from_arg_ty(bcx
, AtomicLoad(bcx
, ptr
, order
), tp_ty
)
788 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
789 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
790 let val
= to_arg_ty(bcx
, llargs
[1], tp_ty
);
791 AtomicStore(bcx
, val
, ptr
, order
);
796 AtomicFence(bcx
, order
, llvm
::CrossThread
);
800 "singlethreadfence" => {
801 AtomicFence(bcx
, order
, llvm
::SingleThread
);
805 // These are all AtomicRMW ops
807 let atom_op
= match op
{
808 "xchg" => llvm
::AtomicXchg
,
809 "xadd" => llvm
::AtomicAdd
,
810 "xsub" => llvm
::AtomicSub
,
811 "and" => llvm
::AtomicAnd
,
812 "nand" => llvm
::AtomicNand
,
813 "or" => llvm
::AtomicOr
,
814 "xor" => llvm
::AtomicXor
,
815 "max" => llvm
::AtomicMax
,
816 "min" => llvm
::AtomicMin
,
817 "umax" => llvm
::AtomicUMax
,
818 "umin" => llvm
::AtomicUMin
,
819 _
=> ccx
.sess().fatal("unknown atomic operation")
822 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
823 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
824 let val
= to_arg_ty(bcx
, llargs
[1], tp_ty
);
825 AtomicRMW(bcx
, atom_op
, ptr
, val
, order
)
831 (_
, _
) => ccx
.sess().span_bug(foreign_item
.span
, "unknown intrinsic")
834 if val_ty(llval
) != Type
::void(ccx
) &&
835 machine
::llsize_of_alloc(ccx
, val_ty(llval
)) != 0 {
836 store_ty(bcx
, llval
, llresult
, ret_ty
);
839 // If we made a temporary stack slot, let's clean it up
842 bcx
= glue
::drop_ty(bcx
, llresult
, ret_ty
, call_debug_location
);
844 expr
::SaveIn(_
) => {}
847 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
849 Result
::new(bcx
, llresult
)
852 fn copy_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
859 call_debug_location
: DebugLoc
)
862 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
863 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
864 let size
= machine
::llsize_of(ccx
, lltp_ty
);
865 let int_size
= machine
::llbitsize_of_real(ccx
, ccx
.int_type());
866 let name
= if allow_overlap
{
868 "llvm.memmove.p0i8.p0i8.i32"
870 "llvm.memmove.p0i8.p0i8.i64"
874 "llvm.memcpy.p0i8.p0i8.i32"
876 "llvm.memcpy.p0i8.p0i8.i64"
880 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
881 let src_ptr
= PointerCast(bcx
, src
, Type
::i8p(ccx
));
882 let llfn
= ccx
.get_intrinsic(&name
);
888 Mul(bcx
, size
, count
, DebugLoc
::None
),
890 C_bool(ccx
, volatile
)],
895 fn memset_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
901 call_debug_location
: DebugLoc
)
904 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
905 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
906 let size
= machine
::llsize_of(ccx
, lltp_ty
);
907 let name
= if machine
::llbitsize_of_real(ccx
, ccx
.int_type()) == 32 {
908 "llvm.memset.p0i8.i32"
910 "llvm.memset.p0i8.i64"
913 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
914 let llfn
= ccx
.get_intrinsic(&name
);
920 Mul(bcx
, size
, count
, DebugLoc
::None
),
922 C_bool(ccx
, volatile
)],
927 fn count_zeros_intrinsic(bcx
: Block
,
930 call_debug_location
: DebugLoc
)
932 let y
= C_bool(bcx
.ccx(), false);
933 let llfn
= bcx
.ccx().get_intrinsic(&name
);
934 Call(bcx
, llfn
, &[val
, y
], None
, call_debug_location
)
937 fn with_overflow_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
942 call_debug_location
: DebugLoc
)
944 let llfn
= bcx
.ccx().get_intrinsic(&name
);
946 // Convert `i1` to a `bool`, and write it to the out parameter
947 let val
= Call(bcx
, llfn
, &[a
, b
], None
, call_debug_location
);
948 let result
= ExtractValue(bcx
, val
, 0);
949 let overflow
= ZExt(bcx
, ExtractValue(bcx
, val
, 1), Type
::bool(bcx
.ccx()));
950 let ret
= C_undef(type_of
::type_of(bcx
.ccx(), t
));
951 let ret
= InsertValue(bcx
, ret
, result
, 0);
952 let ret
= InsertValue(bcx
, ret
, overflow
, 1);
953 if type_is_immediate(bcx
.ccx(), t
) {
954 let tmp
= alloc_ty(bcx
, t
, "tmp");
955 Store(bcx
, ret
, tmp
);