1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use arena
::TypedArena
;
14 use intrinsics
::{self, Intrinsic}
;
17 use llvm
::{ValueRef, TypeKind}
;
19 use rustc
::ty
::subst
::FnSpace
;
20 use abi
::{Abi, FnType}
;
24 use callee
::{self, Callee}
;
26 use cleanup
::CleanupMethods
;
30 use debuginfo
::DebugLoc
;
37 use rustc
::ty
::{self, Ty}
;
42 use syntax
::parse
::token
;
44 use rustc
::session
::Session
;
45 use rustc_const_eval
::fatal_const_eval_err
;
46 use syntax_pos
::{Span, DUMMY_SP}
;
48 use std
::cmp
::Ordering
;
50 fn get_simple_intrinsic(ccx
: &CrateContext
, name
: &str) -> Option
<ValueRef
> {
51 let llvm_name
= match name
{
52 "sqrtf32" => "llvm.sqrt.f32",
53 "sqrtf64" => "llvm.sqrt.f64",
54 "powif32" => "llvm.powi.f32",
55 "powif64" => "llvm.powi.f64",
56 "sinf32" => "llvm.sin.f32",
57 "sinf64" => "llvm.sin.f64",
58 "cosf32" => "llvm.cos.f32",
59 "cosf64" => "llvm.cos.f64",
60 "powf32" => "llvm.pow.f32",
61 "powf64" => "llvm.pow.f64",
62 "expf32" => "llvm.exp.f32",
63 "expf64" => "llvm.exp.f64",
64 "exp2f32" => "llvm.exp2.f32",
65 "exp2f64" => "llvm.exp2.f64",
66 "logf32" => "llvm.log.f32",
67 "logf64" => "llvm.log.f64",
68 "log10f32" => "llvm.log10.f32",
69 "log10f64" => "llvm.log10.f64",
70 "log2f32" => "llvm.log2.f32",
71 "log2f64" => "llvm.log2.f64",
72 "fmaf32" => "llvm.fma.f32",
73 "fmaf64" => "llvm.fma.f64",
74 "fabsf32" => "llvm.fabs.f32",
75 "fabsf64" => "llvm.fabs.f64",
76 "copysignf32" => "llvm.copysign.f32",
77 "copysignf64" => "llvm.copysign.f64",
78 "floorf32" => "llvm.floor.f32",
79 "floorf64" => "llvm.floor.f64",
80 "ceilf32" => "llvm.ceil.f32",
81 "ceilf64" => "llvm.ceil.f64",
82 "truncf32" => "llvm.trunc.f32",
83 "truncf64" => "llvm.trunc.f64",
84 "rintf32" => "llvm.rint.f32",
85 "rintf64" => "llvm.rint.f64",
86 "nearbyintf32" => "llvm.nearbyint.f32",
87 "nearbyintf64" => "llvm.nearbyint.f64",
88 "roundf32" => "llvm.round.f32",
89 "roundf64" => "llvm.round.f64",
90 "assume" => "llvm.assume",
93 Some(ccx
.get_intrinsic(&llvm_name
))
96 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
97 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
98 /// add them to librustc_trans/trans/context.rs
99 pub fn trans_intrinsic_call
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
102 args
: callee
::CallArgs
<'a
, 'tcx
>,
104 call_debug_location
: DebugLoc
)
105 -> Result
<'blk
, 'tcx
> {
110 let _icx
= push_ctxt("trans_intrinsic_call");
112 let (def_id
, substs
, sig
) = match callee_ty
.sty
{
113 ty
::TyFnDef(def_id
, substs
, fty
) => {
114 let sig
= tcx
.erase_late_bound_regions(&fty
.sig
);
115 (def_id
, substs
, tcx
.normalize_associated_type(&sig
))
117 _
=> bug
!("expected fn item type, found {}", callee_ty
)
119 let arg_tys
= sig
.inputs
;
120 let ret_ty
= sig
.output
;
121 let name
= tcx
.item_name(def_id
).as_str();
123 let span
= match call_debug_location
{
124 DebugLoc
::At(_
, span
) | DebugLoc
::ScopeAt(_
, span
) => span
,
126 span_bug
!(fcx
.span
.unwrap_or(DUMMY_SP
),
127 "intrinsic `{}` called with missing span", name
);
131 let cleanup_scope
= fcx
.push_custom_cleanup_scope();
133 // For `transmute` we can just trans the input expr directly into dest
134 if name
== "transmute" {
135 let llret_ty
= type_of
::type_of(ccx
, ret_ty
);
137 callee
::ArgExprs(arg_exprs
) => {
138 assert_eq
!(arg_exprs
.len(), 1);
140 let (in_type
, out_type
) = (*substs
.types
.get(FnSpace
, 0),
141 *substs
.types
.get(FnSpace
, 1));
142 let llintype
= type_of
::type_of(ccx
, in_type
);
143 let llouttype
= type_of
::type_of(ccx
, out_type
);
145 let in_type_size
= machine
::llbitsize_of_real(ccx
, llintype
);
146 let out_type_size
= machine
::llbitsize_of_real(ccx
, llouttype
);
148 if let ty
::TyFnDef(def_id
, substs
, _
) = in_type
.sty
{
149 if out_type_size
!= 0 {
150 // FIXME #19925 Remove this hack after a release cycle.
151 let _
= unpack_datum
!(bcx
, expr
::trans(bcx
, &arg_exprs
[0]));
152 let llfn
= Callee
::def(ccx
, def_id
, substs
).reify(ccx
).val
;
153 let llfnty
= val_ty(llfn
);
154 let llresult
= match dest
{
155 expr
::SaveIn(d
) => d
,
156 expr
::Ignore
=> alloc_ty(bcx
, out_type
, "ret")
158 Store(bcx
, llfn
, PointerCast(bcx
, llresult
, llfnty
.ptr_to()));
159 if dest
== expr
::Ignore
{
160 bcx
= glue
::drop_ty(bcx
, llresult
, out_type
,
161 call_debug_location
);
163 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
164 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
165 return Result
::new(bcx
, llresult
);
169 // This should be caught by the intrinsicck pass
170 assert_eq
!(in_type_size
, out_type_size
);
172 let nonpointer_nonaggregate
= |llkind
: TypeKind
| -> bool
{
173 use llvm
::TypeKind
::*;
175 Half
| Float
| Double
| X86_FP80
| FP128
|
176 PPC_FP128
| Integer
| Vector
| X86_MMX
=> true,
181 // An approximation to which types can be directly cast via
182 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
183 // but does, importantly, cover SIMD types.
184 let in_kind
= llintype
.kind();
185 let ret_kind
= llret_ty
.kind();
186 let bitcast_compatible
=
187 (nonpointer_nonaggregate(in_kind
) && nonpointer_nonaggregate(ret_kind
)) || {
188 in_kind
== TypeKind
::Pointer
&& ret_kind
== TypeKind
::Pointer
191 let dest
= if bitcast_compatible
{
192 // if we're here, the type is scalar-like (a primitive, a
193 // SIMD type or a pointer), and so can be handled as a
194 // by-value ValueRef and can also be directly bitcast to the
195 // target type. Doing this special case makes conversions
196 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
197 // efficient (these are done efficiently implicitly in C
198 // with the `__m128i` type and so this means Rust doesn't
200 let expr
= &arg_exprs
[0];
201 let datum
= unpack_datum
!(bcx
, expr
::trans(bcx
, expr
));
202 let datum
= unpack_datum
!(bcx
, datum
.to_rvalue_datum(bcx
, "transmute_temp"));
203 let val
= if datum
.kind
.is_by_ref() {
204 load_ty(bcx
, datum
.val
, datum
.ty
)
206 from_immediate(bcx
, datum
.val
)
209 let cast_val
= BitCast(bcx
, val
, llret_ty
);
213 // this often occurs in a sequence like `Store(val,
214 // d); val2 = Load(d)`, so disappears easily.
215 Store(bcx
, cast_val
, d
);
221 // The types are too complicated to do with a by-value
222 // bitcast, so pointer cast instead. We need to cast the
223 // dest so the types work out.
224 let dest
= match dest
{
225 expr
::SaveIn(d
) => expr
::SaveIn(PointerCast(bcx
, d
, llintype
.ptr_to())),
226 expr
::Ignore
=> expr
::Ignore
228 bcx
= expr
::trans_into(bcx
, &arg_exprs
[0], dest
);
232 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
233 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
236 expr
::SaveIn(d
) => Result
::new(bcx
, d
),
237 expr
::Ignore
=> Result
::new(bcx
, C_undef(llret_ty
.ptr_to()))
243 bug
!("expected expr as argument for transmute");
248 // For `move_val_init` we can evaluate the destination address
249 // (the first argument) and then trans the source value (the
250 // second argument) directly into the resulting destination
252 if name
== "move_val_init" {
253 if let callee
::ArgExprs(ref exprs
) = args
{
254 let (dest_expr
, source_expr
) = if exprs
.len() != 2 {
255 bug
!("expected two exprs as arguments for `move_val_init` intrinsic");
257 (&exprs
[0], &exprs
[1])
260 // evaluate destination address
261 let dest_datum
= unpack_datum
!(bcx
, expr
::trans(bcx
, dest_expr
));
262 let dest_datum
= unpack_datum
!(
263 bcx
, dest_datum
.to_rvalue_datum(bcx
, "arg"));
264 let dest_datum
= unpack_datum
!(
265 bcx
, dest_datum
.to_appropriate_datum(bcx
));
267 // `expr::trans_into(bcx, expr, dest)` is equiv to
269 // `trans(bcx, expr).store_to_dest(dest)`,
271 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
273 // `trans(bcx, expr).store_to(bcx, addr)`.
274 let lldest
= expr
::Dest
::SaveIn(dest_datum
.val
);
275 bcx
= expr
::trans_into(bcx
, source_expr
, lldest
);
277 let llresult
= C_nil(ccx
);
278 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
280 return Result
::new(bcx
, llresult
);
282 bug
!("expected two exprs as arguments for `move_val_init` intrinsic");
286 // save the actual AST arguments for later (some places need to do
287 // const-evaluation on them)
288 let expr_arguments
= match args
{
289 callee
::ArgExprs(args
) => Some(args
),
293 // Push the arguments.
294 let mut llargs
= Vec
::new();
295 bcx
= callee
::trans_args(bcx
,
298 &mut callee
::Intrinsic
,
301 cleanup
::CustomScope(cleanup_scope
));
303 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
305 // These are the only intrinsic functions that diverge.
307 let llfn
= ccx
.get_intrinsic(&("llvm.trap"));
308 Call(bcx
, llfn
, &[], call_debug_location
);
309 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
311 return Result
::new(bcx
, C_undef(Type
::nil(ccx
).ptr_to()));
312 } else if &name
[..] == "unreachable" {
313 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
315 return Result
::new(bcx
, C_nil(ccx
));
318 let llret_ty
= type_of
::type_of(ccx
, ret_ty
);
320 // Get location to store the result. If the user does
321 // not care about the result, just make a stack slot
322 let llresult
= match dest
{
323 expr
::SaveIn(d
) => d
,
325 if !type_is_zero_size(ccx
, ret_ty
) {
326 let llresult
= alloc_ty(bcx
, ret_ty
, "intrinsic_result");
327 call_lifetime_start(bcx
, llresult
);
330 C_undef(llret_ty
.ptr_to())
335 let simple
= get_simple_intrinsic(ccx
, &name
);
336 let llval
= match (simple
, &name
[..]) {
338 Call(bcx
, llfn
, &llargs
, call_debug_location
)
341 bcx
= try_intrinsic(bcx
, llargs
[0], llargs
[1], llargs
[2], llresult
,
342 call_debug_location
);
345 (_
, "breakpoint") => {
346 let llfn
= ccx
.get_intrinsic(&("llvm.debugtrap"));
347 Call(bcx
, llfn
, &[], call_debug_location
)
350 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
351 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
352 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
354 (_
, "size_of_val") => {
355 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
356 if !type_is_sized(tcx
, tp_ty
) {
358 glue
::size_and_align_of_dst(&bcx
.build(), tp_ty
, llargs
[1]);
361 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
362 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
365 (_
, "min_align_of") => {
366 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
367 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
369 (_
, "min_align_of_val") => {
370 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
371 if !type_is_sized(tcx
, tp_ty
) {
373 glue
::size_and_align_of_dst(&bcx
.build(), tp_ty
, llargs
[1]);
376 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
379 (_
, "pref_align_of") => {
380 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
381 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
382 C_uint(ccx
, machine
::llalign_of_pref(ccx
, lltp_ty
))
384 (_
, "drop_in_place") => {
385 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
386 let ptr
= if type_is_sized(tcx
, tp_ty
) {
389 let scratch
= rvalue_scratch_datum(bcx
, tp_ty
, "tmp");
390 Store(bcx
, llargs
[0], expr
::get_dataptr(bcx
, scratch
.val
));
391 Store(bcx
, llargs
[1], expr
::get_meta(bcx
, scratch
.val
));
392 fcx
.schedule_lifetime_end(cleanup
::CustomScope(cleanup_scope
), scratch
.val
);
395 glue
::drop_ty(bcx
, ptr
, tp_ty
, call_debug_location
);
398 (_
, "type_name") => {
399 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
400 let ty_name
= token
::intern_and_get_ident(&tp_ty
.to_string());
401 C_str_slice(ccx
, ty_name
)
404 C_u64(ccx
, ccx
.tcx().type_id_hash(*substs
.types
.get(FnSpace
, 0)))
406 (_
, "init_dropped") => {
407 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
408 if !type_is_zero_size(ccx
, tp_ty
) {
409 drop_done_fill_mem(bcx
, llresult
, tp_ty
);
414 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
415 if !type_is_zero_size(ccx
, tp_ty
) {
416 // Just zero out the stack slot. (See comment on base::memzero for explanation)
417 init_zero_mem(bcx
, llresult
, tp_ty
);
421 // Effectively no-ops
422 (_
, "uninit") | (_
, "forget") => {
425 (_
, "needs_drop") => {
426 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
428 C_bool(ccx
, bcx
.fcx
.type_needs_drop(tp_ty
))
432 let offset
= llargs
[1];
433 InBoundsGEP(bcx
, ptr
, &[offset
])
435 (_
, "arith_offset") => {
437 let offset
= llargs
[1];
438 GEP(bcx
, ptr
, &[offset
])
441 (_
, "copy_nonoverlapping") => {
445 *substs
.types
.get(FnSpace
, 0),
455 *substs
.types
.get(FnSpace
, 0),
461 (_
, "write_bytes") => {
462 memset_intrinsic(bcx
,
464 *substs
.types
.get(FnSpace
, 0),
471 (_
, "volatile_copy_nonoverlapping_memory") => {
475 *substs
.types
.get(FnSpace
, 0),
481 (_
, "volatile_copy_memory") => {
485 *substs
.types
.get(FnSpace
, 0),
491 (_
, "volatile_set_memory") => {
492 memset_intrinsic(bcx
,
494 *substs
.types
.get(FnSpace
, 0),
500 (_
, "volatile_load") => {
501 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
502 let mut ptr
= llargs
[0];
503 if let Some(ty
) = fn_ty
.ret
.cast
{
504 ptr
= PointerCast(bcx
, ptr
, ty
.ptr_to());
506 let load
= VolatileLoad(bcx
, ptr
);
508 llvm
::LLVMSetAlignment(load
, type_of
::align_of(ccx
, tp_ty
));
510 to_immediate(bcx
, load
, tp_ty
)
512 (_
, "volatile_store") => {
513 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
514 if type_is_fat_ptr(bcx
.tcx(), tp_ty
) {
515 VolatileStore(bcx
, llargs
[1], expr
::get_dataptr(bcx
, llargs
[0]));
516 VolatileStore(bcx
, llargs
[2], expr
::get_meta(bcx
, llargs
[0]));
518 let val
= if fn_ty
.args
[1].is_indirect() {
521 from_immediate(bcx
, llargs
[1])
523 let ptr
= PointerCast(bcx
, llargs
[0], val_ty(val
).ptr_to());
524 let store
= VolatileStore(bcx
, val
, ptr
);
526 llvm
::LLVMSetAlignment(store
, type_of
::align_of(ccx
, tp_ty
));
532 (_
, "ctlz") | (_
, "cttz") | (_
, "ctpop") | (_
, "bswap") |
533 (_
, "add_with_overflow") | (_
, "sub_with_overflow") | (_
, "mul_with_overflow") |
534 (_
, "overflowing_add") | (_
, "overflowing_sub") | (_
, "overflowing_mul") |
535 (_
, "unchecked_div") | (_
, "unchecked_rem") => {
536 let sty
= &arg_tys
[0].sty
;
537 match int_type_width_signed(sty
, ccx
) {
538 Some((width
, signed
)) =>
540 "ctlz" => count_zeros_intrinsic(bcx
, &format
!("llvm.ctlz.i{}", width
),
541 llargs
[0], call_debug_location
),
542 "cttz" => count_zeros_intrinsic(bcx
, &format
!("llvm.cttz.i{}", width
),
543 llargs
[0], call_debug_location
),
544 "ctpop" => Call(bcx
, ccx
.get_intrinsic(&format
!("llvm.ctpop.i{}", width
)),
545 &llargs
, call_debug_location
),
548 llargs
[0] // byte swap a u8/i8 is just a no-op
550 Call(bcx
, ccx
.get_intrinsic(&format
!("llvm.bswap.i{}", width
)),
551 &llargs
, call_debug_location
)
554 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
555 let intrinsic
= format
!("llvm.{}{}.with.overflow.i{}",
556 if signed { 's' }
else { 'u' }
,
558 with_overflow_intrinsic(bcx
, &intrinsic
, llargs
[0], llargs
[1], llresult
,
561 "overflowing_add" => Add(bcx
, llargs
[0], llargs
[1], call_debug_location
),
562 "overflowing_sub" => Sub(bcx
, llargs
[0], llargs
[1], call_debug_location
),
563 "overflowing_mul" => Mul(bcx
, llargs
[0], llargs
[1], call_debug_location
),
566 SDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
)
568 UDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
)
572 SRem(bcx
, llargs
[0], llargs
[1], call_debug_location
)
574 URem(bcx
, llargs
[0], llargs
[1], call_debug_location
)
579 span_invalid_monomorphization_error(
581 &format
!("invalid monomorphization of `{}` intrinsic: \
582 expected basic integer type, found `{}`", name
, sty
));
588 (_
, "fadd_fast") | (_
, "fsub_fast") | (_
, "fmul_fast") | (_
, "fdiv_fast") |
589 (_
, "frem_fast") => {
590 let sty
= &arg_tys
[0].sty
;
591 match float_type_width(sty
) {
594 "fadd_fast" => FAddFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
595 "fsub_fast" => FSubFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
596 "fmul_fast" => FMulFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
597 "fdiv_fast" => FDivFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
598 "frem_fast" => FRemFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
602 span_invalid_monomorphization_error(
604 &format
!("invalid monomorphization of `{}` intrinsic: \
605 expected basic float type, found `{}`", name
, sty
));
612 (_
, "discriminant_value") => {
613 let val_ty
= substs
.types
.get(FnSpace
, 0);
616 let repr
= adt
::represent_type(ccx
, *val_ty
);
617 adt
::trans_get_discr(bcx
, &repr
, llargs
[0],
618 Some(llret_ty
), true)
620 _
=> C_null(llret_ty
)
623 (_
, name
) if name
.starts_with("simd_") => {
624 generic_simd_intrinsic(bcx
, name
,
633 // This requires that atomic intrinsics follow a specific naming pattern:
634 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
635 (_
, name
) if name
.starts_with("atomic_") => {
636 use llvm
::AtomicOrdering
::*;
638 let split
: Vec
<&str> = name
.split('_'
).collect();
640 let is_cxchg
= split
[1] == "cxchg" || split
[1] == "cxchgweak";
641 let (order
, failorder
) = match split
.len() {
642 2 => (SequentiallyConsistent
, SequentiallyConsistent
),
643 3 => match split
[2] {
644 "unordered" => (Unordered
, Unordered
),
645 "relaxed" => (Monotonic
, Monotonic
),
646 "acq" => (Acquire
, Acquire
),
647 "rel" => (Release
, Monotonic
),
648 "acqrel" => (AcquireRelease
, Acquire
),
649 "failrelaxed" if is_cxchg
=>
650 (SequentiallyConsistent
, Monotonic
),
651 "failacq" if is_cxchg
=>
652 (SequentiallyConsistent
, Acquire
),
653 _
=> ccx
.sess().fatal("unknown ordering in atomic intrinsic")
655 4 => match (split
[2], split
[3]) {
656 ("acq", "failrelaxed") if is_cxchg
=>
657 (Acquire
, Monotonic
),
658 ("acqrel", "failrelaxed") if is_cxchg
=>
659 (AcquireRelease
, Monotonic
),
660 _
=> ccx
.sess().fatal("unknown ordering in atomic intrinsic")
662 _
=> ccx
.sess().fatal("Atomic intrinsic not in correct format"),
666 "cxchg" | "cxchgweak" => {
667 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
668 if int_type_width_signed(sty
, ccx
).is_some() {
669 let weak
= if split
[1] == "cxchgweak" { llvm::True }
else { llvm::False }
;
670 let val
= AtomicCmpXchg(bcx
, llargs
[0], llargs
[1], llargs
[2],
671 order
, failorder
, weak
);
672 let result
= ExtractValue(bcx
, val
, 0);
673 let success
= ZExt(bcx
, ExtractValue(bcx
, val
, 1), Type
::bool(bcx
.ccx()));
674 Store(bcx
, result
, StructGEP(bcx
, llresult
, 0));
675 Store(bcx
, success
, StructGEP(bcx
, llresult
, 1));
677 span_invalid_monomorphization_error(
679 &format
!("invalid monomorphization of `{}` intrinsic: \
680 expected basic integer type, found `{}`", name
, sty
));
686 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
687 if int_type_width_signed(sty
, ccx
).is_some() {
688 AtomicLoad(bcx
, llargs
[0], order
)
690 span_invalid_monomorphization_error(
692 &format
!("invalid monomorphization of `{}` intrinsic: \
693 expected basic integer type, found `{}`", name
, sty
));
699 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
700 if int_type_width_signed(sty
, ccx
).is_some() {
701 AtomicStore(bcx
, llargs
[1], llargs
[0], order
);
703 span_invalid_monomorphization_error(
705 &format
!("invalid monomorphization of `{}` intrinsic: \
706 expected basic integer type, found `{}`", name
, sty
));
712 AtomicFence(bcx
, order
, llvm
::SynchronizationScope
::CrossThread
);
716 "singlethreadfence" => {
717 AtomicFence(bcx
, order
, llvm
::SynchronizationScope
::SingleThread
);
721 // These are all AtomicRMW ops
723 let atom_op
= match op
{
724 "xchg" => llvm
::AtomicXchg
,
725 "xadd" => llvm
::AtomicAdd
,
726 "xsub" => llvm
::AtomicSub
,
727 "and" => llvm
::AtomicAnd
,
728 "nand" => llvm
::AtomicNand
,
729 "or" => llvm
::AtomicOr
,
730 "xor" => llvm
::AtomicXor
,
731 "max" => llvm
::AtomicMax
,
732 "min" => llvm
::AtomicMin
,
733 "umax" => llvm
::AtomicUMax
,
734 "umin" => llvm
::AtomicUMin
,
735 _
=> ccx
.sess().fatal("unknown atomic operation")
738 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
739 if int_type_width_signed(sty
, ccx
).is_some() {
740 AtomicRMW(bcx
, atom_op
, llargs
[0], llargs
[1], order
)
742 span_invalid_monomorphization_error(
744 &format
!("invalid monomorphization of `{}` intrinsic: \
745 expected basic integer type, found `{}`", name
, sty
));
754 let intr
= match Intrinsic
::find(&name
) {
756 None
=> bug
!("unknown intrinsic '{}'", name
),
758 fn one
<T
>(x
: Vec
<T
>) -> T
{
759 assert_eq
!(x
.len(), 1);
760 x
.into_iter().next().unwrap()
762 fn ty_to_type(ccx
: &CrateContext
, t
: &intrinsics
::Type
,
763 any_changes_needed
: &mut bool
) -> Vec
<Type
> {
764 use intrinsics
::Type
::*;
766 Void
=> vec
![Type
::void(ccx
)],
767 Integer(_signed
, width
, llvm_width
) => {
768 *any_changes_needed
|= width
!= llvm_width
;
769 vec
![Type
::ix(ccx
, llvm_width
as u64)]
773 32 => vec
![Type
::f32(ccx
)],
774 64 => vec
![Type
::f64(ccx
)],
778 Pointer(ref t
, ref llvm_elem
, _const
) => {
779 *any_changes_needed
|= llvm_elem
.is_some();
781 let t
= llvm_elem
.as_ref().unwrap_or(t
);
782 let elem
= one(ty_to_type(ccx
, t
,
783 any_changes_needed
));
786 Vector(ref t
, ref llvm_elem
, length
) => {
787 *any_changes_needed
|= llvm_elem
.is_some();
789 let t
= llvm_elem
.as_ref().unwrap_or(t
);
790 let elem
= one(ty_to_type(ccx
, t
,
791 any_changes_needed
));
792 vec
![Type
::vector(&elem
,
795 Aggregate(false, ref contents
) => {
796 let elems
= contents
.iter()
797 .map(|t
| one(ty_to_type(ccx
, t
, any_changes_needed
)))
798 .collect
::<Vec
<_
>>();
799 vec
![Type
::struct_(ccx
, &elems
, false)]
801 Aggregate(true, ref contents
) => {
802 *any_changes_needed
= true;
804 .flat_map(|t
| ty_to_type(ccx
, t
, any_changes_needed
))
810 // This allows an argument list like `foo, (bar, baz),
811 // qux` to be converted into `foo, bar, baz, qux`, integer
812 // arguments to be truncated as needed and pointers to be
814 fn modify_as_needed
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
815 t
: &intrinsics
::Type
,
821 intrinsics
::Type
::Aggregate(true, ref contents
) => {
822 // We found a tuple that needs squishing! So
823 // run over the tuple and load each field.
825 // This assumes the type is "simple", i.e. no
826 // destructors, and the contents are SIMD
828 assert
!(!bcx
.fcx
.type_needs_drop(arg_type
));
830 let repr
= adt
::represent_type(bcx
.ccx(), arg_type
);
831 let repr_ptr
= &repr
;
832 let arg
= adt
::MaybeSizedValue
::sized(llarg
);
835 Load(bcx
, adt
::trans_field_ptr(bcx
, repr_ptr
, arg
, Disr(0), i
))
839 intrinsics
::Type
::Pointer(_
, Some(ref llvm_elem
), _
) => {
840 let llvm_elem
= one(ty_to_type(bcx
.ccx(), llvm_elem
, &mut false));
841 vec
![PointerCast(bcx
, llarg
,
844 intrinsics
::Type
::Vector(_
, Some(ref llvm_elem
), length
) => {
845 let llvm_elem
= one(ty_to_type(bcx
.ccx(), llvm_elem
, &mut false));
846 vec
![BitCast(bcx
, llarg
,
847 Type
::vector(&llvm_elem
, length
as u64))]
849 intrinsics
::Type
::Integer(_
, width
, llvm_width
) if width
!= llvm_width
=> {
850 // the LLVM intrinsic uses a smaller integer
851 // size than the C intrinsic's signature, so
852 // we have to trim it down here.
853 vec
![Trunc(bcx
, llarg
, Type
::ix(bcx
.ccx(), llvm_width
as u64))]
860 let mut any_changes_needed
= false;
861 let inputs
= intr
.inputs
.iter()
862 .flat_map(|t
| ty_to_type(ccx
, t
, &mut any_changes_needed
))
863 .collect
::<Vec
<_
>>();
865 let mut out_changes
= false;
866 let outputs
= one(ty_to_type(ccx
, &intr
.output
, &mut out_changes
));
867 // outputting a flattened aggregate is nonsense
868 assert
!(!out_changes
);
870 let llargs
= if !any_changes_needed
{
871 // no aggregates to flatten, so no change needed
874 // there are some aggregates that need to be flattened
875 // in the LLVM call, so we need to run over the types
876 // again to find them and extract the arguments
880 .flat_map(|((t
, llarg
), ty
)| modify_as_needed(bcx
, t
, ty
, *llarg
))
883 assert_eq
!(inputs
.len(), llargs
.len());
885 let val
= match intr
.definition
{
886 intrinsics
::IntrinsicDef
::Named(name
) => {
887 let f
= declare
::declare_cfn(ccx
,
889 Type
::func(&inputs
, &outputs
));
890 Call(bcx
, f
, &llargs
, call_debug_location
)
895 intrinsics
::Type
::Aggregate(flatten
, ref elems
) => {
896 // the output is a tuple so we need to munge it properly
899 for i
in 0..elems
.len() {
900 let val
= ExtractValue(bcx
, val
, i
);
901 Store(bcx
, val
, StructGEP(bcx
, llresult
, i
));
910 if val_ty(llval
) != Type
::void(ccx
) &&
911 machine
::llsize_of_alloc(ccx
, val_ty(llval
)) != 0 {
912 if let Some(ty
) = fn_ty
.ret
.cast
{
913 let ptr
= PointerCast(bcx
, llresult
, ty
.ptr_to());
914 let store
= Store(bcx
, llval
, ptr
);
916 llvm
::LLVMSetAlignment(store
, type_of
::align_of(ccx
, ret_ty
));
919 store_ty(bcx
, llval
, llresult
, ret_ty
);
923 // If we made a temporary stack slot, let's clean it up
926 bcx
= glue
::drop_ty(bcx
, llresult
, ret_ty
, call_debug_location
);
927 call_lifetime_end(bcx
, llresult
);
929 expr
::SaveIn(_
) => {}
932 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
934 Result
::new(bcx
, llresult
)
937 fn copy_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
944 call_debug_location
: DebugLoc
)
947 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
948 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
949 let size
= machine
::llsize_of(ccx
, lltp_ty
);
950 let int_size
= machine
::llbitsize_of_real(ccx
, ccx
.int_type());
952 let operation
= if allow_overlap
{
958 let name
= format
!("llvm.{}.p0i8.p0i8.i{}", operation
, int_size
);
960 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
961 let src_ptr
= PointerCast(bcx
, src
, Type
::i8p(ccx
));
962 let llfn
= ccx
.get_intrinsic(&name
);
968 Mul(bcx
, size
, count
, DebugLoc
::None
),
970 C_bool(ccx
, volatile
)],
974 fn memset_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
980 call_debug_location
: DebugLoc
)
983 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
984 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
985 let size
= machine
::llsize_of(ccx
, lltp_ty
);
986 let int_size
= machine
::llbitsize_of_real(ccx
, ccx
.int_type());
988 let name
= format
!("llvm.memset.p0i8.i{}", int_size
);
990 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
991 let llfn
= ccx
.get_intrinsic(&name
);
997 Mul(bcx
, size
, count
, DebugLoc
::None
),
999 C_bool(ccx
, volatile
)],
1000 call_debug_location
)
1003 fn count_zeros_intrinsic(bcx
: Block
,
1006 call_debug_location
: DebugLoc
)
1008 let y
= C_bool(bcx
.ccx(), false);
1009 let llfn
= bcx
.ccx().get_intrinsic(&name
);
1010 Call(bcx
, llfn
, &[val
, y
], call_debug_location
)
1013 fn with_overflow_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1018 call_debug_location
: DebugLoc
)
1020 let llfn
= bcx
.ccx().get_intrinsic(&name
);
1022 // Convert `i1` to a `bool`, and write it to the out parameter
1023 let val
= Call(bcx
, llfn
, &[a
, b
], call_debug_location
);
1024 let result
= ExtractValue(bcx
, val
, 0);
1025 let overflow
= ZExt(bcx
, ExtractValue(bcx
, val
, 1), Type
::bool(bcx
.ccx()));
1026 Store(bcx
, result
, StructGEP(bcx
, out
, 0));
1027 Store(bcx
, overflow
, StructGEP(bcx
, out
, 1));
1032 fn try_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1035 local_ptr
: ValueRef
,
1037 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1038 if bcx
.sess().no_landing_pads() {
1039 Call(bcx
, func
, &[data
], dloc
);
1040 Store(bcx
, C_null(Type
::i8p(bcx
.ccx())), dest
);
1042 } else if wants_msvc_seh(bcx
.sess()) {
1043 trans_msvc_try(bcx
, func
, data
, local_ptr
, dest
, dloc
)
1045 trans_gnu_try(bcx
, func
, data
, local_ptr
, dest
, dloc
)
1049 // MSVC's definition of the `rust_try` function.
1051 // This implementation uses the new exception handling instructions in LLVM
1052 // which have support in LLVM for SEH on MSVC targets. Although these
1053 // instructions are meant to work for all targets, as of the time of this
1054 // writing, however, LLVM does not recommend the usage of these new instructions
1055 // as the old ones are still more optimized.
1056 fn trans_msvc_try
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1059 local_ptr
: ValueRef
,
1061 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1062 let llfn
= get_rust_try_fn(bcx
.fcx
, &mut |bcx
| {
1063 let ccx
= bcx
.ccx();
1064 let dloc
= DebugLoc
::None
;
1066 SetPersonalityFn(bcx
, bcx
.fcx
.eh_personality());
1068 let normal
= bcx
.fcx
.new_temp_block("normal");
1069 let catchswitch
= bcx
.fcx
.new_temp_block("catchswitch");
1070 let catchpad
= bcx
.fcx
.new_temp_block("catchpad");
1071 let caught
= bcx
.fcx
.new_temp_block("caught");
1073 let func
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1074 let data
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1075 let local_ptr
= llvm
::get_param(bcx
.fcx
.llfn
, 2);
1077 // We're generating an IR snippet that looks like:
1079 // declare i32 @rust_try(%func, %data, %ptr) {
1080 // %slot = alloca i64*
1081 // invoke %func(%data) to label %normal unwind label %catchswitch
1087 // %cs = catchswitch within none [%catchpad] unwind to caller
1090 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1091 // %ptr[0] = %slot[0]
1092 // %ptr[1] = %slot[1]
1093 // catchret from %tok to label %caught
1099 // This structure follows the basic usage of throw/try/catch in LLVM.
1100 // For example, compile this C++ snippet to see what LLVM generates:
1102 // #include <stdint.h>
1104 // int bar(void (*foo)(void), uint64_t *ret) {
1108 // } catch(uint64_t a[2]) {
1115 // More information can be found in libstd's seh.rs implementation.
1116 let i64p
= Type
::i64(ccx
).ptr_to();
1117 let slot
= Alloca(bcx
, i64p
, "slot");
1118 Invoke(bcx
, func
, &[data
], normal
.llbb
, catchswitch
.llbb
, dloc
);
1120 Ret(normal
, C_i32(ccx
, 0), dloc
);
1122 let cs
= CatchSwitch(catchswitch
, None
, None
, 1);
1123 AddHandler(catchswitch
, cs
, catchpad
.llbb
);
1125 let tcx
= ccx
.tcx();
1126 let tydesc
= match tcx
.lang_items
.msvc_try_filter() {
1127 Some(did
) => ::consts
::get_static(ccx
, did
).to_llref(),
1128 None
=> bug
!("msvc_try_filter not defined"),
1130 let tok
= CatchPad(catchpad
, cs
, &[tydesc
, C_i32(ccx
, 0), slot
]);
1131 let addr
= Load(catchpad
, slot
);
1132 let arg1
= Load(catchpad
, addr
);
1133 let val1
= C_i32(ccx
, 1);
1134 let arg2
= Load(catchpad
, InBoundsGEP(catchpad
, addr
, &[val1
]));
1135 let local_ptr
= BitCast(catchpad
, local_ptr
, i64p
);
1136 Store(catchpad
, arg1
, local_ptr
);
1137 Store(catchpad
, arg2
, InBoundsGEP(catchpad
, local_ptr
, &[val1
]));
1138 CatchRet(catchpad
, tok
, caught
.llbb
);
1140 Ret(caught
, C_i32(ccx
, 1), dloc
);
1143 // Note that no invoke is used here because by definition this function
1144 // can't panic (that's what it's catching).
1145 let ret
= Call(bcx
, llfn
, &[func
, data
, local_ptr
], dloc
);
1146 Store(bcx
, ret
, dest
);
1150 // Definition of the standard "try" function for Rust using the GNU-like model
1151 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1154 // This translation is a little surprising because we always call a shim
1155 // function instead of inlining the call to `invoke` manually here. This is done
1156 // because in LLVM we're only allowed to have one personality per function
1157 // definition. The call to the `try` intrinsic is being inlined into the
1158 // function calling it, and that function may already have other personality
1159 // functions in play. By calling a shim we're guaranteed that our shim will have
1160 // the right personality function.
1161 fn trans_gnu_try
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1164 local_ptr
: ValueRef
,
1166 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1167 let llfn
= get_rust_try_fn(bcx
.fcx
, &mut |bcx
| {
1168 let ccx
= bcx
.ccx();
1169 let dloc
= DebugLoc
::None
;
1171 // Translates the shims described above:
1174 // invoke %func(%args...) normal %normal unwind %catch
1180 // (ptr, _) = landingpad
1181 // store ptr, %local_ptr
1184 // Note that the `local_ptr` data passed into the `try` intrinsic is
1185 // expected to be `*mut *mut u8` for this to actually work, but that's
1186 // managed by the standard library.
1188 let then
= bcx
.fcx
.new_temp_block("then");
1189 let catch = bcx
.fcx
.new_temp_block("catch");
1191 let func
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1192 let data
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1193 let local_ptr
= llvm
::get_param(bcx
.fcx
.llfn
, 2);
1194 Invoke(bcx
, func
, &[data
], then
.llbb
, catch.llbb
, dloc
);
1195 Ret(then
, C_i32(ccx
, 0), dloc
);
1197 // Type indicator for the exception being thrown.
1199 // The first value in this tuple is a pointer to the exception object
1200 // being thrown. The second value is a "selector" indicating which of
1201 // the landing pad clauses the exception's type had been matched to.
1202 // rust_try ignores the selector.
1203 let lpad_ty
= Type
::struct_(ccx
, &[Type
::i8p(ccx
), Type
::i32(ccx
)],
1205 let vals
= LandingPad(catch, lpad_ty
, bcx
.fcx
.eh_personality(), 1);
1206 AddClause(catch, vals
, C_null(Type
::i8p(ccx
)));
1207 let ptr
= ExtractValue(catch, vals
, 0);
1208 Store(catch, ptr
, BitCast(catch, local_ptr
, Type
::i8p(ccx
).ptr_to()));
1209 Ret(catch, C_i32(ccx
, 1), dloc
);
1212 // Note that no invoke is used here because by definition this function
1213 // can't panic (that's what it's catching).
1214 let ret
= Call(bcx
, llfn
, &[func
, data
, local_ptr
], dloc
);
1215 Store(bcx
, ret
, dest
);
1219 // Helper function to give a Block to a closure to translate a shim function.
1220 // This is currently primarily used for the `try` intrinsic functions above.
1221 fn gen_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1223 inputs
: Vec
<Ty
<'tcx
>>,
1225 trans
: &mut for<'b
> FnMut(Block
<'b
, 'tcx
>))
1228 let sig
= ty
::FnSig
{
1233 let fn_ty
= FnType
::new(ccx
, Abi
::Rust
, &sig
, &[]);
1235 let rust_fn_ty
= ccx
.tcx().mk_fn_ptr(ccx
.tcx().mk_bare_fn(ty
::BareFnTy
{
1236 unsafety
: hir
::Unsafety
::Unsafe
,
1238 sig
: ty
::Binder(sig
)
1240 let llfn
= declare
::define_internal_fn(ccx
, name
, rust_fn_ty
);
1241 let (fcx
, block_arena
);
1242 block_arena
= TypedArena
::new();
1243 fcx
= FunctionContext
::new(ccx
, llfn
, fn_ty
, None
, &block_arena
);
1244 let bcx
= fcx
.init(true, None
);
1250 // Helper function used to get a handle to the `__rust_try` function used to
1251 // catch exceptions.
1253 // This function is only generated once and is then cached.
1254 fn get_rust_try_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1255 trans
: &mut for<'b
> FnMut(Block
<'b
, 'tcx
>))
1258 if let Some(llfn
) = ccx
.rust_try_fn().get() {
1262 // Define the type up front for the signature of the rust_try function.
1263 let tcx
= ccx
.tcx();
1264 let i8p
= tcx
.mk_mut_ptr(tcx
.types
.i8);
1265 let fn_ty
= tcx
.mk_fn_ptr(tcx
.mk_bare_fn(ty
::BareFnTy
{
1266 unsafety
: hir
::Unsafety
::Unsafe
,
1268 sig
: ty
::Binder(ty
::FnSig
{
1270 output
: tcx
.mk_nil(),
1274 let output
= tcx
.types
.i32;
1275 let rust_try
= gen_fn(fcx
, "__rust_try", vec
![fn_ty
, i8p
, i8p
], output
, trans
);
1276 ccx
.rust_try_fn().set(Some(rust_try
));
1280 fn span_invalid_monomorphization_error(a
: &Session
, b
: Span
, c
: &str) {
1281 span_err
!(a
, b
, E0511
, "{}", c
);
1284 fn generic_simd_intrinsic
<'blk
, 'tcx
, 'a
>
1285 (bcx
: Block
<'blk
, 'tcx
>,
1287 substs
: &'tcx subst
::Substs
<'tcx
>,
1288 callee_ty
: Ty
<'tcx
>,
1289 args
: Option
<&[P
<hir
::Expr
>]>,
1290 llargs
: &[ValueRef
],
1293 call_debug_location
: DebugLoc
,
1294 span
: Span
) -> ValueRef
1296 // macros for error handling:
1297 macro_rules
! emit_error
{
1301 ($msg
: tt
, $
($fmt
: tt
)*) => {
1302 span_invalid_monomorphization_error(
1304 &format
!(concat
!("invalid monomorphization of `{}` intrinsic: ",
1309 macro_rules
! require
{
1310 ($cond
: expr
, $
($fmt
: tt
)*) => {
1312 emit_error
!($
($fmt
)*);
1313 return C_nil(bcx
.ccx())
1317 macro_rules
! require_simd
{
1318 ($ty
: expr
, $position
: expr
) => {
1319 require
!($ty
.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position
, $ty
)
1325 let tcx
= bcx
.tcx();
1326 let sig
= tcx
.erase_late_bound_regions(callee_ty
.fn_sig());
1327 let sig
= tcx
.normalize_associated_type(&sig
);
1328 let arg_tys
= sig
.inputs
;
1330 // every intrinsic takes a SIMD vector as its first argument
1331 require_simd
!(arg_tys
[0], "input");
1332 let in_ty
= arg_tys
[0];
1333 let in_elem
= arg_tys
[0].simd_type(tcx
);
1334 let in_len
= arg_tys
[0].simd_size(tcx
);
1336 let comparison
= match name
{
1337 "simd_eq" => Some(hir
::BiEq
),
1338 "simd_ne" => Some(hir
::BiNe
),
1339 "simd_lt" => Some(hir
::BiLt
),
1340 "simd_le" => Some(hir
::BiLe
),
1341 "simd_gt" => Some(hir
::BiGt
),
1342 "simd_ge" => Some(hir
::BiGe
),
1346 if let Some(cmp_op
) = comparison
{
1347 require_simd
!(ret_ty
, "return");
1349 let out_len
= ret_ty
.simd_size(tcx
);
1350 require
!(in_len
== out_len
,
1351 "expected return type with length {} (same as input type `{}`), \
1352 found `{}` with length {}",
1355 require
!(llret_ty
.element_type().kind() == llvm
::Integer
,
1356 "expected return type with integer elements, found `{}` with non-integer `{}`",
1358 ret_ty
.simd_type(tcx
));
1360 return compare_simd_types(bcx
,
1366 call_debug_location
)
1369 if name
.starts_with("simd_shuffle") {
1370 let n
: usize = match name
["simd_shuffle".len()..].parse() {
1372 Err(_
) => span_bug
!(span
,
1373 "bad `simd_shuffle` instruction only caught in trans?")
1376 require_simd
!(ret_ty
, "return");
1378 let out_len
= ret_ty
.simd_size(tcx
);
1379 require
!(out_len
== n
,
1380 "expected return type of length {}, found `{}` with length {}",
1381 n
, ret_ty
, out_len
);
1382 require
!(in_elem
== ret_ty
.simd_type(tcx
),
1383 "expected return element type `{}` (element of input `{}`), \
1384 found `{}` with element type `{}`",
1386 ret_ty
, ret_ty
.simd_type(tcx
));
1388 let total_len
= in_len
as u64 * 2;
1390 let vector
= match args
{
1392 match consts
::const_expr(bcx
.ccx(), &args
[2], substs
, None
,
1393 // this should probably help simd error reporting
1394 consts
::TrueConst
::Yes
) {
1395 Ok((vector
, _
)) => vector
,
1397 fatal_const_eval_err(bcx
.tcx(), err
.as_inner(), span
,
1405 let indices
: Option
<Vec
<_
>> = (0..n
)
1408 let val
= const_get_elt(vector
, &[i
as libc
::c_uint
]);
1409 match const_to_opt_uint(val
) {
1411 emit_error
!("shuffle index #{} is not a constant", arg_idx
);
1414 Some(idx
) if idx
>= total_len
=> {
1415 emit_error
!("shuffle index #{} is out of bounds (limit {})",
1416 arg_idx
, total_len
);
1419 Some(idx
) => Some(C_i32(bcx
.ccx(), idx
as i32)),
1423 let indices
= match indices
{
1425 None
=> return C_null(llret_ty
)
1428 return ShuffleVector(bcx
, llargs
[0], llargs
[1], C_vector(&indices
))
1431 if name
== "simd_insert" {
1432 require
!(in_elem
== arg_tys
[2],
1433 "expected inserted type `{}` (element of input `{}`), found `{}`",
1434 in_elem
, in_ty
, arg_tys
[2]);
1435 return InsertElement(bcx
, llargs
[0], llargs
[2], llargs
[1])
1437 if name
== "simd_extract" {
1438 require
!(ret_ty
== in_elem
,
1439 "expected return type `{}` (element of input `{}`), found `{}`",
1440 in_elem
, in_ty
, ret_ty
);
1441 return ExtractElement(bcx
, llargs
[0], llargs
[1])
1444 if name
== "simd_cast" {
1445 require_simd
!(ret_ty
, "return");
1446 let out_len
= ret_ty
.simd_size(tcx
);
1447 require
!(in_len
== out_len
,
1448 "expected return type with length {} (same as input type `{}`), \
1449 found `{}` with length {}",
1452 // casting cares about nominal type, not just structural type
1453 let out_elem
= ret_ty
.simd_type(tcx
);
1455 if in_elem
== out_elem { return llargs[0]; }
1457 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1459 let (in_style
, in_width
) = match in_elem
.sty
{
1460 // vectors of pointer-sized integers should've been
1461 // disallowed before here, so this unwrap is safe.
1462 ty
::TyInt(i
) => (Style
::Int(true), i
.bit_width().unwrap()),
1463 ty
::TyUint(u
) => (Style
::Int(false), u
.bit_width().unwrap()),
1464 ty
::TyFloat(f
) => (Style
::Float
, f
.bit_width()),
1465 _
=> (Style
::Unsupported
, 0)
1467 let (out_style
, out_width
) = match out_elem
.sty
{
1468 ty
::TyInt(i
) => (Style
::Int(true), i
.bit_width().unwrap()),
1469 ty
::TyUint(u
) => (Style
::Int(false), u
.bit_width().unwrap()),
1470 ty
::TyFloat(f
) => (Style
::Float
, f
.bit_width()),
1471 _
=> (Style
::Unsupported
, 0)
1474 match (in_style
, out_style
) {
1475 (Style
::Int(in_is_signed
), Style
::Int(_
)) => {
1476 return match in_width
.cmp(&out_width
) {
1477 Ordering
::Greater
=> Trunc(bcx
, llargs
[0], llret_ty
),
1478 Ordering
::Equal
=> llargs
[0],
1479 Ordering
::Less
=> if in_is_signed
{
1480 SExt(bcx
, llargs
[0], llret_ty
)
1482 ZExt(bcx
, llargs
[0], llret_ty
)
1486 (Style
::Int(in_is_signed
), Style
::Float
) => {
1487 return if in_is_signed
{
1488 SIToFP(bcx
, llargs
[0], llret_ty
)
1490 UIToFP(bcx
, llargs
[0], llret_ty
)
1493 (Style
::Float
, Style
::Int(out_is_signed
)) => {
1494 return if out_is_signed
{
1495 FPToSI(bcx
, llargs
[0], llret_ty
)
1497 FPToUI(bcx
, llargs
[0], llret_ty
)
1500 (Style
::Float
, Style
::Float
) => {
1501 return match in_width
.cmp(&out_width
) {
1502 Ordering
::Greater
=> FPTrunc(bcx
, llargs
[0], llret_ty
),
1503 Ordering
::Equal
=> llargs
[0],
1504 Ordering
::Less
=> FPExt(bcx
, llargs
[0], llret_ty
)
1507 _
=> {/* Unsupported. Fallthrough. */}
1510 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1514 macro_rules
! arith
{
1515 ($
($name
: ident
: $
($
($p
: ident
),* => $call
: expr
),*;)*) => {
1517 if name
== stringify
!($name
) {
1521 return $
call(bcx
, llargs
[0], llargs
[1], call_debug_location
)
1527 "unsupported operation on `{}` with element `{}`",
1534 simd_add
: TyUint
, TyInt
=> Add
, TyFloat
=> FAdd
;
1535 simd_sub
: TyUint
, TyInt
=> Sub
, TyFloat
=> FSub
;
1536 simd_mul
: TyUint
, TyInt
=> Mul
, TyFloat
=> FMul
;
1537 simd_div
: TyFloat
=> FDiv
;
1538 simd_shl
: TyUint
, TyInt
=> Shl
;
1539 simd_shr
: TyUint
=> LShr
, TyInt
=> AShr
;
1540 simd_and
: TyUint
, TyInt
=> And
;
1541 simd_or
: TyUint
, TyInt
=> Or
;
1542 simd_xor
: TyUint
, TyInt
=> Xor
;
1544 span_bug
!(span
, "unknown SIMD intrinsic");
1547 // Returns the width of an int TypeVariant, and if it's signed or not
1548 // Returns None if the type is not an integer
1549 fn int_type_width_signed
<'tcx
>(sty
: &ty
::TypeVariants
<'tcx
>, ccx
: &CrateContext
)
1550 -> Option
<(u64, bool
)> {
1551 use rustc
::ty
::{TyInt, TyUint}
;
1553 TyInt(t
) => Some((match t
{
1555 match &ccx
.tcx().sess
.target
.target
.target_pointer_width
[..] {
1559 tws
=> bug
!("Unsupported target word size for isize: {}", tws
),
1562 ast
::IntTy
::I8
=> 8,
1563 ast
::IntTy
::I16
=> 16,
1564 ast
::IntTy
::I32
=> 32,
1565 ast
::IntTy
::I64
=> 64,
1567 TyUint(t
) => Some((match t
{
1568 ast
::UintTy
::Us
=> {
1569 match &ccx
.tcx().sess
.target
.target
.target_pointer_width
[..] {
1573 tws
=> bug
!("Unsupported target word size for usize: {}", tws
),
1576 ast
::UintTy
::U8
=> 8,
1577 ast
::UintTy
::U16
=> 16,
1578 ast
::UintTy
::U32
=> 32,
1579 ast
::UintTy
::U64
=> 64,
1585 // Returns the width of a float TypeVariant
1586 // Returns None if the type is not a float
1587 fn float_type_width
<'tcx
>(sty
: &ty
::TypeVariants
<'tcx
>)
1589 use rustc
::ty
::TyFloat
;
1591 TyFloat(t
) => Some(match t
{
1592 ast
::FloatTy
::F32
=> 32,
1593 ast
::FloatTy
::F64
=> 64,