1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use arena
::TypedArena
;
14 use intrinsics
::{self, Intrinsic}
;
17 use llvm
::{ValueRef, TypeKind}
;
20 use rustc
::ty
::subst
::FnSpace
;
21 use abi
::{Abi, FnType}
;
26 use callee
::{self, Callee}
;
28 use cleanup
::CleanupMethods
;
32 use debuginfo
::DebugLoc
;
39 use rustc
::ty
::{self, Ty}
;
41 use rustc
::ty
::subst
::Substs
;
45 use syntax
::parse
::token
;
47 use rustc
::session
::Session
;
48 use syntax
::codemap
::{Span, DUMMY_SP}
;
50 use std
::cmp
::Ordering
;
52 fn get_simple_intrinsic(ccx
: &CrateContext
, name
: &str) -> Option
<ValueRef
> {
53 let llvm_name
= match name
{
54 "sqrtf32" => "llvm.sqrt.f32",
55 "sqrtf64" => "llvm.sqrt.f64",
56 "powif32" => "llvm.powi.f32",
57 "powif64" => "llvm.powi.f64",
58 "sinf32" => "llvm.sin.f32",
59 "sinf64" => "llvm.sin.f64",
60 "cosf32" => "llvm.cos.f32",
61 "cosf64" => "llvm.cos.f64",
62 "powf32" => "llvm.pow.f32",
63 "powf64" => "llvm.pow.f64",
64 "expf32" => "llvm.exp.f32",
65 "expf64" => "llvm.exp.f64",
66 "exp2f32" => "llvm.exp2.f32",
67 "exp2f64" => "llvm.exp2.f64",
68 "logf32" => "llvm.log.f32",
69 "logf64" => "llvm.log.f64",
70 "log10f32" => "llvm.log10.f32",
71 "log10f64" => "llvm.log10.f64",
72 "log2f32" => "llvm.log2.f32",
73 "log2f64" => "llvm.log2.f64",
74 "fmaf32" => "llvm.fma.f32",
75 "fmaf64" => "llvm.fma.f64",
76 "fabsf32" => "llvm.fabs.f32",
77 "fabsf64" => "llvm.fabs.f64",
78 "copysignf32" => "llvm.copysign.f32",
79 "copysignf64" => "llvm.copysign.f64",
80 "floorf32" => "llvm.floor.f32",
81 "floorf64" => "llvm.floor.f64",
82 "ceilf32" => "llvm.ceil.f32",
83 "ceilf64" => "llvm.ceil.f64",
84 "truncf32" => "llvm.trunc.f32",
85 "truncf64" => "llvm.trunc.f64",
86 "rintf32" => "llvm.rint.f32",
87 "rintf64" => "llvm.rint.f64",
88 "nearbyintf32" => "llvm.nearbyint.f32",
89 "nearbyintf64" => "llvm.nearbyint.f64",
90 "roundf32" => "llvm.round.f32",
91 "roundf64" => "llvm.round.f64",
92 "assume" => "llvm.assume",
95 Some(ccx
.get_intrinsic(&llvm_name
))
98 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
99 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
100 /// add them to librustc_trans/trans/context.rs
101 pub fn trans_intrinsic_call
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
104 args
: callee
::CallArgs
<'a
, 'tcx
>,
106 call_debug_location
: DebugLoc
)
107 -> Result
<'blk
, 'tcx
> {
112 let _icx
= push_ctxt("trans_intrinsic_call");
114 let (def_id
, substs
, sig
) = match callee_ty
.sty
{
115 ty
::TyFnDef(def_id
, substs
, fty
) => {
116 let sig
= tcx
.erase_late_bound_regions(&fty
.sig
);
117 (def_id
, substs
, infer
::normalize_associated_type(tcx
, &sig
))
119 _
=> bug
!("expected fn item type, found {}", callee_ty
)
121 let arg_tys
= sig
.inputs
;
122 let ret_ty
= sig
.output
;
123 let name
= tcx
.item_name(def_id
).as_str();
125 let span
= match call_debug_location
{
126 DebugLoc
::At(_
, span
) => span
,
127 DebugLoc
::None
=> fcx
.span
.unwrap_or(DUMMY_SP
)
130 let cleanup_scope
= fcx
.push_custom_cleanup_scope();
132 // For `transmute` we can just trans the input expr directly into dest
133 if name
== "transmute" {
134 let llret_ty
= type_of
::type_of(ccx
, ret_ty
.unwrap());
136 callee
::ArgExprs(arg_exprs
) => {
137 assert_eq
!(arg_exprs
.len(), 1);
139 let (in_type
, out_type
) = (*substs
.types
.get(FnSpace
, 0),
140 *substs
.types
.get(FnSpace
, 1));
141 let llintype
= type_of
::type_of(ccx
, in_type
);
142 let llouttype
= type_of
::type_of(ccx
, out_type
);
144 let in_type_size
= machine
::llbitsize_of_real(ccx
, llintype
);
145 let out_type_size
= machine
::llbitsize_of_real(ccx
, llouttype
);
147 if let ty
::TyFnDef(def_id
, substs
, _
) = in_type
.sty
{
148 if out_type_size
!= 0 {
149 // FIXME #19925 Remove this hack after a release cycle.
150 let _
= unpack_datum
!(bcx
, expr
::trans(bcx
, &arg_exprs
[0]));
151 let llfn
= Callee
::def(ccx
, def_id
, substs
).reify(ccx
).val
;
152 let llfnty
= val_ty(llfn
);
153 let llresult
= match dest
{
154 expr
::SaveIn(d
) => d
,
155 expr
::Ignore
=> alloc_ty(bcx
, out_type
, "ret")
157 Store(bcx
, llfn
, PointerCast(bcx
, llresult
, llfnty
.ptr_to()));
158 if dest
== expr
::Ignore
{
159 bcx
= glue
::drop_ty(bcx
, llresult
, out_type
,
160 call_debug_location
);
162 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
163 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
164 return Result
::new(bcx
, llresult
);
168 // This should be caught by the intrinsicck pass
169 assert_eq
!(in_type_size
, out_type_size
);
171 let nonpointer_nonaggregate
= |llkind
: TypeKind
| -> bool
{
172 use llvm
::TypeKind
::*;
174 Half
| Float
| Double
| X86_FP80
| FP128
|
175 PPC_FP128
| Integer
| Vector
| X86_MMX
=> true,
180 // An approximation to which types can be directly cast via
181 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
182 // but does, importantly, cover SIMD types.
183 let in_kind
= llintype
.kind();
184 let ret_kind
= llret_ty
.kind();
185 let bitcast_compatible
=
186 (nonpointer_nonaggregate(in_kind
) && nonpointer_nonaggregate(ret_kind
)) || {
187 in_kind
== TypeKind
::Pointer
&& ret_kind
== TypeKind
::Pointer
190 let dest
= if bitcast_compatible
{
191 // if we're here, the type is scalar-like (a primitive, a
192 // SIMD type or a pointer), and so can be handled as a
193 // by-value ValueRef and can also be directly bitcast to the
194 // target type. Doing this special case makes conversions
195 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
196 // efficient (these are done efficiently implicitly in C
197 // with the `__m128i` type and so this means Rust doesn't
199 let expr
= &arg_exprs
[0];
200 let datum
= unpack_datum
!(bcx
, expr
::trans(bcx
, expr
));
201 let datum
= unpack_datum
!(bcx
, datum
.to_rvalue_datum(bcx
, "transmute_temp"));
202 let val
= if datum
.kind
.is_by_ref() {
203 load_ty(bcx
, datum
.val
, datum
.ty
)
205 from_immediate(bcx
, datum
.val
)
208 let cast_val
= BitCast(bcx
, val
, llret_ty
);
212 // this often occurs in a sequence like `Store(val,
213 // d); val2 = Load(d)`, so disappears easily.
214 Store(bcx
, cast_val
, d
);
220 // The types are too complicated to do with a by-value
221 // bitcast, so pointer cast instead. We need to cast the
222 // dest so the types work out.
223 let dest
= match dest
{
224 expr
::SaveIn(d
) => expr
::SaveIn(PointerCast(bcx
, d
, llintype
.ptr_to())),
225 expr
::Ignore
=> expr
::Ignore
227 bcx
= expr
::trans_into(bcx
, &arg_exprs
[0], dest
);
231 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
232 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
235 expr
::SaveIn(d
) => Result
::new(bcx
, d
),
236 expr
::Ignore
=> Result
::new(bcx
, C_undef(llret_ty
.ptr_to()))
242 bug
!("expected expr as argument for transmute");
247 // For `move_val_init` we can evaluate the destination address
248 // (the first argument) and then trans the source value (the
249 // second argument) directly into the resulting destination
251 if name
== "move_val_init" {
252 if let callee
::ArgExprs(ref exprs
) = args
{
253 let (dest_expr
, source_expr
) = if exprs
.len() != 2 {
254 bug
!("expected two exprs as arguments for `move_val_init` intrinsic");
256 (&exprs
[0], &exprs
[1])
259 // evaluate destination address
260 let dest_datum
= unpack_datum
!(bcx
, expr
::trans(bcx
, dest_expr
));
261 let dest_datum
= unpack_datum
!(
262 bcx
, dest_datum
.to_rvalue_datum(bcx
, "arg"));
263 let dest_datum
= unpack_datum
!(
264 bcx
, dest_datum
.to_appropriate_datum(bcx
));
266 // `expr::trans_into(bcx, expr, dest)` is equiv to
268 // `trans(bcx, expr).store_to_dest(dest)`,
270 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
272 // `trans(bcx, expr).store_to(bcx, addr)`.
273 let lldest
= expr
::Dest
::SaveIn(dest_datum
.val
);
274 bcx
= expr
::trans_into(bcx
, source_expr
, lldest
);
276 let llresult
= C_nil(ccx
);
277 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
279 return Result
::new(bcx
, llresult
);
281 bug
!("expected two exprs as arguments for `move_val_init` intrinsic");
285 // save the actual AST arguments for later (some places need to do
286 // const-evaluation on them)
287 let expr_arguments
= match args
{
288 callee
::ArgExprs(args
) => Some(args
),
292 // Push the arguments.
293 let mut llargs
= Vec
::new();
294 bcx
= callee
::trans_args(bcx
,
297 &mut callee
::Intrinsic
,
300 cleanup
::CustomScope(cleanup_scope
));
302 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
304 // These are the only intrinsic functions that diverge.
306 let llfn
= ccx
.get_intrinsic(&("llvm.trap"));
307 Call(bcx
, llfn
, &[], call_debug_location
);
308 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
310 return Result
::new(bcx
, C_undef(Type
::nil(ccx
).ptr_to()));
311 } else if &name
[..] == "unreachable" {
312 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
314 return Result
::new(bcx
, C_nil(ccx
));
317 let ret_ty
= match ret_ty
{
318 ty
::FnConverging(ret_ty
) => ret_ty
,
319 ty
::FnDiverging
=> bug
!()
322 let llret_ty
= type_of
::type_of(ccx
, ret_ty
);
324 // Get location to store the result. If the user does
325 // not care about the result, just make a stack slot
326 let llresult
= match dest
{
327 expr
::SaveIn(d
) => d
,
329 if !type_is_zero_size(ccx
, ret_ty
) {
330 let llresult
= alloc_ty(bcx
, ret_ty
, "intrinsic_result");
331 call_lifetime_start(bcx
, llresult
);
334 C_undef(llret_ty
.ptr_to())
339 let simple
= get_simple_intrinsic(ccx
, &name
);
340 let llval
= match (simple
, &name
[..]) {
342 Call(bcx
, llfn
, &llargs
, call_debug_location
)
345 bcx
= try_intrinsic(bcx
, llargs
[0], llargs
[1], llargs
[2], llresult
,
346 call_debug_location
);
349 (_
, "breakpoint") => {
350 let llfn
= ccx
.get_intrinsic(&("llvm.debugtrap"));
351 Call(bcx
, llfn
, &[], call_debug_location
)
354 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
355 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
356 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
358 (_
, "size_of_val") => {
359 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
360 if !type_is_sized(tcx
, tp_ty
) {
362 glue
::size_and_align_of_dst(&bcx
.build(), tp_ty
, llargs
[1]);
365 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
366 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
369 (_
, "min_align_of") => {
370 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
371 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
373 (_
, "min_align_of_val") => {
374 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
375 if !type_is_sized(tcx
, tp_ty
) {
377 glue
::size_and_align_of_dst(&bcx
.build(), tp_ty
, llargs
[1]);
380 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
383 (_
, "pref_align_of") => {
384 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
385 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
386 C_uint(ccx
, machine
::llalign_of_pref(ccx
, lltp_ty
))
388 (_
, "drop_in_place") => {
389 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
390 let ptr
= if type_is_sized(tcx
, tp_ty
) {
393 let scratch
= rvalue_scratch_datum(bcx
, tp_ty
, "tmp");
394 Store(bcx
, llargs
[0], expr
::get_dataptr(bcx
, scratch
.val
));
395 Store(bcx
, llargs
[1], expr
::get_meta(bcx
, scratch
.val
));
396 fcx
.schedule_lifetime_end(cleanup
::CustomScope(cleanup_scope
), scratch
.val
);
399 glue
::drop_ty(bcx
, ptr
, tp_ty
, call_debug_location
);
402 (_
, "type_name") => {
403 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
404 let ty_name
= token
::intern_and_get_ident(&tp_ty
.to_string());
405 C_str_slice(ccx
, ty_name
)
408 let hash
= ccx
.tcx().hash_crate_independent(*substs
.types
.get(FnSpace
, 0),
409 &ccx
.link_meta().crate_hash
);
412 (_
, "init_dropped") => {
413 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
414 if !type_is_zero_size(ccx
, tp_ty
) {
415 drop_done_fill_mem(bcx
, llresult
, tp_ty
);
420 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
421 if !type_is_zero_size(ccx
, tp_ty
) {
422 // Just zero out the stack slot. (See comment on base::memzero for explanation)
423 init_zero_mem(bcx
, llresult
, tp_ty
);
427 // Effectively no-ops
428 (_
, "uninit") | (_
, "forget") => {
431 (_
, "needs_drop") => {
432 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
434 C_bool(ccx
, bcx
.fcx
.type_needs_drop(tp_ty
))
438 let offset
= llargs
[1];
439 InBoundsGEP(bcx
, ptr
, &[offset
])
441 (_
, "arith_offset") => {
443 let offset
= llargs
[1];
444 GEP(bcx
, ptr
, &[offset
])
447 (_
, "copy_nonoverlapping") => {
451 *substs
.types
.get(FnSpace
, 0),
461 *substs
.types
.get(FnSpace
, 0),
467 (_
, "write_bytes") => {
468 memset_intrinsic(bcx
,
470 *substs
.types
.get(FnSpace
, 0),
477 (_
, "volatile_copy_nonoverlapping_memory") => {
481 *substs
.types
.get(FnSpace
, 0),
487 (_
, "volatile_copy_memory") => {
491 *substs
.types
.get(FnSpace
, 0),
497 (_
, "volatile_set_memory") => {
498 memset_intrinsic(bcx
,
500 *substs
.types
.get(FnSpace
, 0),
506 (_
, "volatile_load") => {
507 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
508 let mut ptr
= llargs
[0];
509 if let Some(ty
) = fn_ty
.ret
.cast
{
510 ptr
= PointerCast(bcx
, ptr
, ty
.ptr_to());
512 let load
= VolatileLoad(bcx
, ptr
);
514 llvm
::LLVMSetAlignment(load
, type_of
::align_of(ccx
, tp_ty
));
516 to_immediate(bcx
, load
, tp_ty
)
518 (_
, "volatile_store") => {
519 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
520 if type_is_fat_ptr(bcx
.tcx(), tp_ty
) {
521 VolatileStore(bcx
, llargs
[1], expr
::get_dataptr(bcx
, llargs
[0]));
522 VolatileStore(bcx
, llargs
[2], expr
::get_meta(bcx
, llargs
[0]));
524 let val
= if fn_ty
.args
[1].is_indirect() {
527 from_immediate(bcx
, llargs
[1])
529 let ptr
= PointerCast(bcx
, llargs
[0], val_ty(val
).ptr_to());
530 let store
= VolatileStore(bcx
, val
, ptr
);
532 llvm
::LLVMSetAlignment(store
, type_of
::align_of(ccx
, tp_ty
));
538 (_
, "ctlz") | (_
, "cttz") | (_
, "ctpop") | (_
, "bswap") |
539 (_
, "add_with_overflow") | (_
, "sub_with_overflow") | (_
, "mul_with_overflow") |
540 (_
, "overflowing_add") | (_
, "overflowing_sub") | (_
, "overflowing_mul") |
541 (_
, "unchecked_div") | (_
, "unchecked_rem") => {
542 let sty
= &arg_tys
[0].sty
;
543 match int_type_width_signed(sty
, ccx
) {
544 Some((width
, signed
)) =>
546 "ctlz" => count_zeros_intrinsic(bcx
, &format
!("llvm.ctlz.i{}", width
),
547 llargs
[0], call_debug_location
),
548 "cttz" => count_zeros_intrinsic(bcx
, &format
!("llvm.cttz.i{}", width
),
549 llargs
[0], call_debug_location
),
550 "ctpop" => Call(bcx
, ccx
.get_intrinsic(&format
!("llvm.ctpop.i{}", width
)),
551 &llargs
, call_debug_location
),
554 llargs
[0] // byte swap a u8/i8 is just a no-op
556 Call(bcx
, ccx
.get_intrinsic(&format
!("llvm.bswap.i{}", width
)),
557 &llargs
, call_debug_location
)
560 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
561 let intrinsic
= format
!("llvm.{}{}.with.overflow.i{}",
562 if signed { 's' }
else { 'u' }
,
564 with_overflow_intrinsic(bcx
, &intrinsic
, llargs
[0], llargs
[1], llresult
,
567 "overflowing_add" => Add(bcx
, llargs
[0], llargs
[1], call_debug_location
),
568 "overflowing_sub" => Sub(bcx
, llargs
[0], llargs
[1], call_debug_location
),
569 "overflowing_mul" => Mul(bcx
, llargs
[0], llargs
[1], call_debug_location
),
572 SDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
)
574 UDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
)
578 SRem(bcx
, llargs
[0], llargs
[1], call_debug_location
)
580 URem(bcx
, llargs
[0], llargs
[1], call_debug_location
)
585 span_invalid_monomorphization_error(
587 &format
!("invalid monomorphization of `{}` intrinsic: \
588 expected basic integer type, found `{}`", name
, sty
));
594 (_
, "fadd_fast") | (_
, "fsub_fast") | (_
, "fmul_fast") | (_
, "fdiv_fast") |
595 (_
, "frem_fast") => {
596 let sty
= &arg_tys
[0].sty
;
597 match float_type_width(sty
) {
600 "fadd_fast" => FAddFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
601 "fsub_fast" => FSubFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
602 "fmul_fast" => FMulFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
603 "fdiv_fast" => FDivFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
604 "frem_fast" => FRemFast(bcx
, llargs
[0], llargs
[1], call_debug_location
),
608 span_invalid_monomorphization_error(
610 &format
!("invalid monomorphization of `{}` intrinsic: \
611 expected basic float type, found `{}`", name
, sty
));
619 (_
, "return_address") => {
620 if !fcx
.fn_ty
.ret
.is_indirect() {
621 span_err
!(tcx
.sess
, span
, E0510
,
622 "invalid use of `return_address` intrinsic: function \
623 does not use out pointer");
624 C_null(Type
::i8p(ccx
))
626 PointerCast(bcx
, llvm
::get_param(fcx
.llfn
, 0), Type
::i8p(ccx
))
630 (_
, "discriminant_value") => {
631 let val_ty
= substs
.types
.get(FnSpace
, 0);
634 let repr
= adt
::represent_type(ccx
, *val_ty
);
635 adt
::trans_get_discr(bcx
, &repr
, llargs
[0],
636 Some(llret_ty
), true)
638 _
=> C_null(llret_ty
)
641 (_
, name
) if name
.starts_with("simd_") => {
642 generic_simd_intrinsic(bcx
, name
,
651 // This requires that atomic intrinsics follow a specific naming pattern:
652 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
653 (_
, name
) if name
.starts_with("atomic_") => {
654 let split
: Vec
<&str> = name
.split('_'
).collect();
656 let is_cxchg
= split
[1] == "cxchg" || split
[1] == "cxchgweak";
657 let (order
, failorder
) = match split
.len() {
658 2 => (llvm
::SequentiallyConsistent
, llvm
::SequentiallyConsistent
),
659 3 => match split
[2] {
660 "unordered" => (llvm
::Unordered
, llvm
::Unordered
),
661 "relaxed" => (llvm
::Monotonic
, llvm
::Monotonic
),
662 "acq" => (llvm
::Acquire
, llvm
::Acquire
),
663 "rel" => (llvm
::Release
, llvm
::Monotonic
),
664 "acqrel" => (llvm
::AcquireRelease
, llvm
::Acquire
),
665 "failrelaxed" if is_cxchg
=>
666 (llvm
::SequentiallyConsistent
, llvm
::Monotonic
),
667 "failacq" if is_cxchg
=>
668 (llvm
::SequentiallyConsistent
, llvm
::Acquire
),
669 _
=> ccx
.sess().fatal("unknown ordering in atomic intrinsic")
671 4 => match (split
[2], split
[3]) {
672 ("acq", "failrelaxed") if is_cxchg
=>
673 (llvm
::Acquire
, llvm
::Monotonic
),
674 ("acqrel", "failrelaxed") if is_cxchg
=>
675 (llvm
::AcquireRelease
, llvm
::Monotonic
),
676 _
=> ccx
.sess().fatal("unknown ordering in atomic intrinsic")
678 _
=> ccx
.sess().fatal("Atomic intrinsic not in correct format"),
682 "cxchg" | "cxchgweak" => {
683 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
684 if int_type_width_signed(sty
, ccx
).is_some() {
685 let weak
= if split
[1] == "cxchgweak" { llvm::True }
else { llvm::False }
;
686 let val
= AtomicCmpXchg(bcx
, llargs
[0], llargs
[1], llargs
[2],
687 order
, failorder
, weak
);
688 let result
= ExtractValue(bcx
, val
, 0);
689 let success
= ZExt(bcx
, ExtractValue(bcx
, val
, 1), Type
::bool(bcx
.ccx()));
690 Store(bcx
, result
, StructGEP(bcx
, llresult
, 0));
691 Store(bcx
, success
, StructGEP(bcx
, llresult
, 1));
693 span_invalid_monomorphization_error(
695 &format
!("invalid monomorphization of `{}` intrinsic: \
696 expected basic integer type, found `{}`", name
, sty
));
702 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
703 if int_type_width_signed(sty
, ccx
).is_some() {
704 AtomicLoad(bcx
, llargs
[0], order
)
706 span_invalid_monomorphization_error(
708 &format
!("invalid monomorphization of `{}` intrinsic: \
709 expected basic integer type, found `{}`", name
, sty
));
715 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
716 if int_type_width_signed(sty
, ccx
).is_some() {
717 AtomicStore(bcx
, llargs
[1], llargs
[0], order
);
719 span_invalid_monomorphization_error(
721 &format
!("invalid monomorphization of `{}` intrinsic: \
722 expected basic integer type, found `{}`", name
, sty
));
728 AtomicFence(bcx
, order
, llvm
::CrossThread
);
732 "singlethreadfence" => {
733 AtomicFence(bcx
, order
, llvm
::SingleThread
);
737 // These are all AtomicRMW ops
739 let atom_op
= match op
{
740 "xchg" => llvm
::AtomicXchg
,
741 "xadd" => llvm
::AtomicAdd
,
742 "xsub" => llvm
::AtomicSub
,
743 "and" => llvm
::AtomicAnd
,
744 "nand" => llvm
::AtomicNand
,
745 "or" => llvm
::AtomicOr
,
746 "xor" => llvm
::AtomicXor
,
747 "max" => llvm
::AtomicMax
,
748 "min" => llvm
::AtomicMin
,
749 "umax" => llvm
::AtomicUMax
,
750 "umin" => llvm
::AtomicUMin
,
751 _
=> ccx
.sess().fatal("unknown atomic operation")
754 let sty
= &substs
.types
.get(FnSpace
, 0).sty
;
755 if int_type_width_signed(sty
, ccx
).is_some() {
756 AtomicRMW(bcx
, atom_op
, llargs
[0], llargs
[1], order
)
758 span_invalid_monomorphization_error(
760 &format
!("invalid monomorphization of `{}` intrinsic: \
761 expected basic integer type, found `{}`", name
, sty
));
770 let intr
= match Intrinsic
::find(&name
) {
772 None
=> bug
!("unknown intrinsic '{}'", name
),
774 fn one
<T
>(x
: Vec
<T
>) -> T
{
775 assert_eq
!(x
.len(), 1);
776 x
.into_iter().next().unwrap()
778 fn ty_to_type(ccx
: &CrateContext
, t
: &intrinsics
::Type
,
779 any_changes_needed
: &mut bool
) -> Vec
<Type
> {
780 use intrinsics
::Type
::*;
782 Void
=> vec
![Type
::void(ccx
)],
783 Integer(_signed
, width
, llvm_width
) => {
784 *any_changes_needed
|= width
!= llvm_width
;
785 vec
![Type
::ix(ccx
, llvm_width
as u64)]
789 32 => vec
![Type
::f32(ccx
)],
790 64 => vec
![Type
::f64(ccx
)],
794 Pointer(ref t
, ref llvm_elem
, _const
) => {
795 *any_changes_needed
|= llvm_elem
.is_some();
797 let t
= llvm_elem
.as_ref().unwrap_or(t
);
798 let elem
= one(ty_to_type(ccx
, t
,
799 any_changes_needed
));
802 Vector(ref t
, ref llvm_elem
, length
) => {
803 *any_changes_needed
|= llvm_elem
.is_some();
805 let t
= llvm_elem
.as_ref().unwrap_or(t
);
806 let elem
= one(ty_to_type(ccx
, t
,
807 any_changes_needed
));
808 vec
![Type
::vector(&elem
,
811 Aggregate(false, ref contents
) => {
812 let elems
= contents
.iter()
813 .map(|t
| one(ty_to_type(ccx
, t
, any_changes_needed
)))
814 .collect
::<Vec
<_
>>();
815 vec
![Type
::struct_(ccx
, &elems
, false)]
817 Aggregate(true, ref contents
) => {
818 *any_changes_needed
= true;
820 .flat_map(|t
| ty_to_type(ccx
, t
, any_changes_needed
))
826 // This allows an argument list like `foo, (bar, baz),
827 // qux` to be converted into `foo, bar, baz, qux`, integer
828 // arguments to be truncated as needed and pointers to be
830 fn modify_as_needed
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
831 t
: &intrinsics
::Type
,
837 intrinsics
::Type
::Aggregate(true, ref contents
) => {
838 // We found a tuple that needs squishing! So
839 // run over the tuple and load each field.
841 // This assumes the type is "simple", i.e. no
842 // destructors, and the contents are SIMD
844 assert
!(!bcx
.fcx
.type_needs_drop(arg_type
));
846 let repr
= adt
::represent_type(bcx
.ccx(), arg_type
);
847 let repr_ptr
= &repr
;
848 let arg
= adt
::MaybeSizedValue
::sized(llarg
);
851 Load(bcx
, adt
::trans_field_ptr(bcx
, repr_ptr
, arg
, Disr(0), i
))
855 intrinsics
::Type
::Pointer(_
, Some(ref llvm_elem
), _
) => {
856 let llvm_elem
= one(ty_to_type(bcx
.ccx(), llvm_elem
, &mut false));
857 vec
![PointerCast(bcx
, llarg
,
860 intrinsics
::Type
::Vector(_
, Some(ref llvm_elem
), length
) => {
861 let llvm_elem
= one(ty_to_type(bcx
.ccx(), llvm_elem
, &mut false));
862 vec
![BitCast(bcx
, llarg
,
863 Type
::vector(&llvm_elem
, length
as u64))]
865 intrinsics
::Type
::Integer(_
, width
, llvm_width
) if width
!= llvm_width
=> {
866 // the LLVM intrinsic uses a smaller integer
867 // size than the C intrinsic's signature, so
868 // we have to trim it down here.
869 vec
![Trunc(bcx
, llarg
, Type
::ix(bcx
.ccx(), llvm_width
as u64))]
876 let mut any_changes_needed
= false;
877 let inputs
= intr
.inputs
.iter()
878 .flat_map(|t
| ty_to_type(ccx
, t
, &mut any_changes_needed
))
879 .collect
::<Vec
<_
>>();
881 let mut out_changes
= false;
882 let outputs
= one(ty_to_type(ccx
, &intr
.output
, &mut out_changes
));
883 // outputting a flattened aggregate is nonsense
884 assert
!(!out_changes
);
886 let llargs
= if !any_changes_needed
{
887 // no aggregates to flatten, so no change needed
890 // there are some aggregates that need to be flattened
891 // in the LLVM call, so we need to run over the types
892 // again to find them and extract the arguments
896 .flat_map(|((t
, llarg
), ty
)| modify_as_needed(bcx
, t
, ty
, *llarg
))
899 assert_eq
!(inputs
.len(), llargs
.len());
901 let val
= match intr
.definition
{
902 intrinsics
::IntrinsicDef
::Named(name
) => {
903 let f
= declare
::declare_cfn(ccx
,
905 Type
::func(&inputs
, &outputs
));
906 Call(bcx
, f
, &llargs
, call_debug_location
)
911 intrinsics
::Type
::Aggregate(flatten
, ref elems
) => {
912 // the output is a tuple so we need to munge it properly
915 for i
in 0..elems
.len() {
916 let val
= ExtractValue(bcx
, val
, i
);
917 Store(bcx
, val
, StructGEP(bcx
, llresult
, i
));
926 if val_ty(llval
) != Type
::void(ccx
) &&
927 machine
::llsize_of_alloc(ccx
, val_ty(llval
)) != 0 {
928 if let Some(ty
) = fn_ty
.ret
.cast
{
929 let ptr
= PointerCast(bcx
, llresult
, ty
.ptr_to());
930 let store
= Store(bcx
, llval
, ptr
);
932 llvm
::LLVMSetAlignment(store
, type_of
::align_of(ccx
, ret_ty
));
935 store_ty(bcx
, llval
, llresult
, ret_ty
);
939 // If we made a temporary stack slot, let's clean it up
942 bcx
= glue
::drop_ty(bcx
, llresult
, ret_ty
, call_debug_location
);
943 call_lifetime_end(bcx
, llresult
);
945 expr
::SaveIn(_
) => {}
948 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
950 Result
::new(bcx
, llresult
)
953 fn copy_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
960 call_debug_location
: DebugLoc
)
963 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
964 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
965 let size
= machine
::llsize_of(ccx
, lltp_ty
);
966 let int_size
= machine
::llbitsize_of_real(ccx
, ccx
.int_type());
968 let operation
= if allow_overlap
{
974 let name
= format
!("llvm.{}.p0i8.p0i8.i{}", operation
, int_size
);
976 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
977 let src_ptr
= PointerCast(bcx
, src
, Type
::i8p(ccx
));
978 let llfn
= ccx
.get_intrinsic(&name
);
984 Mul(bcx
, size
, count
, DebugLoc
::None
),
986 C_bool(ccx
, volatile
)],
990 fn memset_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
996 call_debug_location
: DebugLoc
)
999 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
1000 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
1001 let size
= machine
::llsize_of(ccx
, lltp_ty
);
1002 let int_size
= machine
::llbitsize_of_real(ccx
, ccx
.int_type());
1004 let name
= format
!("llvm.memset.p0i8.i{}", int_size
);
1006 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
1007 let llfn
= ccx
.get_intrinsic(&name
);
1013 Mul(bcx
, size
, count
, DebugLoc
::None
),
1015 C_bool(ccx
, volatile
)],
1016 call_debug_location
)
1019 fn count_zeros_intrinsic(bcx
: Block
,
1022 call_debug_location
: DebugLoc
)
1024 let y
= C_bool(bcx
.ccx(), false);
1025 let llfn
= bcx
.ccx().get_intrinsic(&name
);
1026 Call(bcx
, llfn
, &[val
, y
], call_debug_location
)
1029 fn with_overflow_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1034 call_debug_location
: DebugLoc
)
1036 let llfn
= bcx
.ccx().get_intrinsic(&name
);
1038 // Convert `i1` to a `bool`, and write it to the out parameter
1039 let val
= Call(bcx
, llfn
, &[a
, b
], call_debug_location
);
1040 let result
= ExtractValue(bcx
, val
, 0);
1041 let overflow
= ZExt(bcx
, ExtractValue(bcx
, val
, 1), Type
::bool(bcx
.ccx()));
1042 Store(bcx
, result
, StructGEP(bcx
, out
, 0));
1043 Store(bcx
, overflow
, StructGEP(bcx
, out
, 1));
1048 fn try_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1051 local_ptr
: ValueRef
,
1053 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1054 if bcx
.sess().no_landing_pads() {
1055 Call(bcx
, func
, &[data
], dloc
);
1056 Store(bcx
, C_null(Type
::i8p(bcx
.ccx())), dest
);
1058 } else if wants_msvc_seh(bcx
.sess()) {
1059 trans_msvc_try(bcx
, func
, data
, local_ptr
, dest
, dloc
)
1061 trans_gnu_try(bcx
, func
, data
, local_ptr
, dest
, dloc
)
1065 // MSVC's definition of the `rust_try` function.
1067 // This implementation uses the new exception handling instructions in LLVM
1068 // which have support in LLVM for SEH on MSVC targets. Although these
1069 // instructions are meant to work for all targets, as of the time of this
1070 // writing, however, LLVM does not recommend the usage of these new instructions
1071 // as the old ones are still more optimized.
1072 fn trans_msvc_try
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1075 local_ptr
: ValueRef
,
1077 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1078 let llfn
= get_rust_try_fn(bcx
.fcx
, &mut |bcx
| {
1079 let ccx
= bcx
.ccx();
1080 let dloc
= DebugLoc
::None
;
1082 SetPersonalityFn(bcx
, bcx
.fcx
.eh_personality());
1084 let normal
= bcx
.fcx
.new_temp_block("normal");
1085 let catchswitch
= bcx
.fcx
.new_temp_block("catchswitch");
1086 let catchpad
= bcx
.fcx
.new_temp_block("catchpad");
1087 let caught
= bcx
.fcx
.new_temp_block("caught");
1089 let func
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1090 let data
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1091 let local_ptr
= llvm
::get_param(bcx
.fcx
.llfn
, 2);
1093 // We're generating an IR snippet that looks like:
1095 // declare i32 @rust_try(%func, %data, %ptr) {
1096 // %slot = alloca i8*
1097 // call @llvm.localescape(%slot)
1098 // store %ptr, %slot
1099 // invoke %func(%data) to label %normal unwind label %catchswitch
1105 // %cs = catchswitch within none [%catchpad] unwind to caller
1108 // %tok = catchpad within %cs [%rust_try_filter]
1109 // catchret from %tok to label %caught
1115 // This structure follows the basic usage of the instructions in LLVM
1116 // (see their documentation/test cases for examples), but a
1117 // perhaps-surprising part here is the usage of the `localescape`
1118 // intrinsic. This is used to allow the filter function (also generated
1119 // here) to access variables on the stack of this intrinsic. This
1120 // ability enables us to transfer information about the exception being
1121 // thrown to this point, where we're catching the exception.
1123 // More information can be found in libstd's seh.rs implementation.
1124 let slot
= Alloca(bcx
, Type
::i8p(ccx
), "slot");
1125 let localescape
= ccx
.get_intrinsic(&"llvm.localescape");
1126 Call(bcx
, localescape
, &[slot
], dloc
);
1127 Store(bcx
, local_ptr
, slot
);
1128 Invoke(bcx
, func
, &[data
], normal
.llbb
, catchswitch
.llbb
, dloc
);
1130 Ret(normal
, C_i32(ccx
, 0), dloc
);
1132 let cs
= CatchSwitch(catchswitch
, None
, None
, 1);
1133 AddHandler(catchswitch
, cs
, catchpad
.llbb
);
1135 let filter
= generate_filter_fn(bcx
.fcx
, bcx
.fcx
.llfn
);
1136 let filter
= BitCast(catchpad
, filter
, Type
::i8p(ccx
));
1137 let tok
= CatchPad(catchpad
, cs
, &[filter
]);
1138 CatchRet(catchpad
, tok
, caught
.llbb
);
1140 Ret(caught
, C_i32(ccx
, 1), dloc
);
1143 // Note that no invoke is used here because by definition this function
1144 // can't panic (that's what it's catching).
1145 let ret
= Call(bcx
, llfn
, &[func
, data
, local_ptr
], dloc
);
1146 Store(bcx
, ret
, dest
);
1150 // Definition of the standard "try" function for Rust using the GNU-like model
1151 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1154 // This translation is a little surprising because we always call a shim
1155 // function instead of inlining the call to `invoke` manually here. This is done
1156 // because in LLVM we're only allowed to have one personality per function
1157 // definition. The call to the `try` intrinsic is being inlined into the
1158 // function calling it, and that function may already have other personality
1159 // functions in play. By calling a shim we're guaranteed that our shim will have
1160 // the right personality function.
1161 fn trans_gnu_try
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1164 local_ptr
: ValueRef
,
1166 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1167 let llfn
= get_rust_try_fn(bcx
.fcx
, &mut |bcx
| {
1168 let ccx
= bcx
.ccx();
1169 let tcx
= ccx
.tcx();
1170 let dloc
= DebugLoc
::None
;
1172 // Translates the shims described above:
1175 // invoke %func(%args...) normal %normal unwind %catch
1181 // (ptr, _) = landingpad
1182 // store ptr, %local_ptr
1185 // Note that the `local_ptr` data passed into the `try` intrinsic is
1186 // expected to be `*mut *mut u8` for this to actually work, but that's
1187 // managed by the standard library.
1189 attributes
::emit_uwtable(bcx
.fcx
.llfn
, true);
1190 let catch_pers
= match tcx
.lang_items
.eh_personality_catch() {
1192 Callee
::def(ccx
, did
, tcx
.mk_substs(Substs
::empty())).reify(ccx
).val
1194 None
=> bug
!("eh_personality_catch not defined"),
1197 let then
= bcx
.fcx
.new_temp_block("then");
1198 let catch = bcx
.fcx
.new_temp_block("catch");
1200 let func
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1201 let data
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1202 let local_ptr
= llvm
::get_param(bcx
.fcx
.llfn
, 2);
1203 Invoke(bcx
, func
, &[data
], then
.llbb
, catch.llbb
, dloc
);
1204 Ret(then
, C_i32(ccx
, 0), dloc
);
1206 // Type indicator for the exception being thrown.
1208 // The first value in this tuple is a pointer to the exception object
1209 // being thrown. The second value is a "selector" indicating which of
1210 // the landing pad clauses the exception's type had been matched to.
1211 // rust_try ignores the selector.
1212 let lpad_ty
= Type
::struct_(ccx
, &[Type
::i8p(ccx
), Type
::i32(ccx
)],
1214 let vals
= LandingPad(catch, lpad_ty
, catch_pers
, 1);
1215 AddClause(catch, vals
, C_null(Type
::i8p(ccx
)));
1216 let ptr
= ExtractValue(catch, vals
, 0);
1217 Store(catch, ptr
, BitCast(catch, local_ptr
, Type
::i8p(ccx
).ptr_to()));
1218 Ret(catch, C_i32(ccx
, 1), dloc
);
1221 // Note that no invoke is used here because by definition this function
1222 // can't panic (that's what it's catching).
1223 let ret
= Call(bcx
, llfn
, &[func
, data
, local_ptr
], dloc
);
1224 Store(bcx
, ret
, dest
);
1228 // Helper function to give a Block to a closure to translate a shim function.
1229 // This is currently primarily used for the `try` intrinsic functions above.
1230 fn gen_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1232 inputs
: Vec
<Ty
<'tcx
>>,
1233 output
: ty
::FnOutput
<'tcx
>,
1234 trans
: &mut for<'b
> FnMut(Block
<'b
, 'tcx
>))
1237 let sig
= ty
::FnSig
{
1242 let fn_ty
= FnType
::new(ccx
, Abi
::Rust
, &sig
, &[]);
1244 let rust_fn_ty
= ccx
.tcx().mk_fn_ptr(ty
::BareFnTy
{
1245 unsafety
: hir
::Unsafety
::Unsafe
,
1247 sig
: ty
::Binder(sig
)
1249 let llfn
= declare
::define_internal_fn(ccx
, name
, rust_fn_ty
);
1250 let empty_substs
= ccx
.tcx().mk_substs(Substs
::empty());
1251 let (fcx
, block_arena
);
1252 block_arena
= TypedArena
::new();
1253 fcx
= FunctionContext
::new(ccx
, llfn
, fn_ty
, None
, empty_substs
, &block_arena
);
1254 let bcx
= fcx
.init(true, None
);
1260 // Helper function used to get a handle to the `__rust_try` function used to
1261 // catch exceptions.
1263 // This function is only generated once and is then cached.
1264 fn get_rust_try_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1265 trans
: &mut for<'b
> FnMut(Block
<'b
, 'tcx
>))
1268 if let Some(llfn
) = ccx
.rust_try_fn().get() {
1272 // Define the type up front for the signature of the rust_try function.
1273 let tcx
= ccx
.tcx();
1274 let i8p
= tcx
.mk_mut_ptr(tcx
.types
.i8);
1275 let fn_ty
= tcx
.mk_fn_ptr(ty
::BareFnTy
{
1276 unsafety
: hir
::Unsafety
::Unsafe
,
1278 sig
: ty
::Binder(ty
::FnSig
{
1280 output
: ty
::FnOutput
::FnConverging(tcx
.mk_nil()),
1284 let output
= ty
::FnOutput
::FnConverging(tcx
.types
.i32);
1285 let rust_try
= gen_fn(fcx
, "__rust_try", vec
![fn_ty
, i8p
, i8p
], output
, trans
);
1286 ccx
.rust_try_fn().set(Some(rust_try
));
1290 // For MSVC-style exceptions (SEH), the compiler generates a filter function
1291 // which is used to determine whether an exception is being caught (e.g. if it's
1292 // a Rust exception or some other).
1294 // This function is used to generate said filter function. The shim generated
1295 // here is actually just a thin wrapper to call the real implementation in the
1296 // standard library itself. For reasons as to why, see seh.rs in the standard
1298 fn generate_filter_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1299 rust_try_fn
: ValueRef
)
1302 let tcx
= ccx
.tcx();
1303 let dloc
= DebugLoc
::None
;
1305 let rust_try_filter
= match tcx
.lang_items
.msvc_try_filter() {
1307 Callee
::def(ccx
, did
, tcx
.mk_substs(Substs
::empty())).reify(ccx
).val
1309 None
=> bug
!("msvc_try_filter not defined"),
1312 let output
= ty
::FnOutput
::FnConverging(tcx
.types
.i32);
1313 let i8p
= tcx
.mk_mut_ptr(tcx
.types
.i8);
1315 let frameaddress
= ccx
.get_intrinsic(&"llvm.frameaddress");
1316 let recoverfp
= ccx
.get_intrinsic(&"llvm.x86.seh.recoverfp");
1317 let localrecover
= ccx
.get_intrinsic(&"llvm.localrecover");
1319 // On all platforms, once we have the EXCEPTION_POINTERS handle as well as
1320 // the base pointer, we follow the standard layout of:
1323 // %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
1324 // %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
1325 // %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
1328 // The recoverfp intrinsic is used to recover the frame frame pointer of the
1329 // `rust_try_fn` function, which is then in turn passed to the
1330 // `localrecover` intrinsic (pairing with the `localescape` intrinsic
1331 // mentioned above). Putting all this together means that we now have a
1332 // handle to the arguments passed into the `try` function, allowing writing
1333 // to the stack over there.
1335 // For more info, see seh.rs in the standard library.
1336 let do_trans
= |bcx
: Block
, ehptrs
, base_pointer
| {
1337 let rust_try_fn
= BitCast(bcx
, rust_try_fn
, Type
::i8p(ccx
));
1338 let parentfp
= Call(bcx
, recoverfp
, &[rust_try_fn
, base_pointer
], dloc
);
1339 let arg
= Call(bcx
, localrecover
,
1340 &[rust_try_fn
, parentfp
, C_i32(ccx
, 0)], dloc
);
1341 let ret
= Call(bcx
, rust_try_filter
, &[ehptrs
, arg
], dloc
);
1342 Ret(bcx
, ret
, dloc
);
1345 if ccx
.tcx().sess
.target
.target
.arch
== "x86" {
1346 // On x86 the filter function doesn't actually receive any arguments.
1347 // Instead the %ebp register contains some contextual information.
1349 // Unfortunately I don't know of any great documentation as to what's
1350 // going on here, all I can say is that there's a few tests cases in
1351 // LLVM's test suite which follow this pattern of instructions, so we
1352 // just do the same.
1353 gen_fn(fcx
, "__rustc_try_filter", vec
![], output
, &mut |bcx
| {
1354 let ebp
= Call(bcx
, frameaddress
, &[C_i32(ccx
, 1)], dloc
);
1355 let exn
= InBoundsGEP(bcx
, ebp
, &[C_i32(ccx
, -20)]);
1356 let exn
= Load(bcx
, BitCast(bcx
, exn
, Type
::i8p(ccx
).ptr_to()));
1357 do_trans(bcx
, exn
, ebp
);
1359 } else if ccx
.tcx().sess
.target
.target
.arch
== "x86_64" {
1360 // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
1361 // are passed in as arguments to the filter function, so we just pass
1363 gen_fn(fcx
, "__rustc_try_filter", vec
![i8p
, i8p
], output
, &mut |bcx
| {
1364 let exn
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1365 let rbp
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1366 do_trans(bcx
, exn
, rbp
);
1369 bug
!("unknown target to generate a filter function")
1373 fn span_invalid_monomorphization_error(a
: &Session
, b
: Span
, c
: &str) {
1374 span_err
!(a
, b
, E0511
, "{}", c
);
1377 fn generic_simd_intrinsic
<'blk
, 'tcx
, 'a
>
1378 (bcx
: Block
<'blk
, 'tcx
>,
1380 substs
: &'tcx subst
::Substs
<'tcx
>,
1381 callee_ty
: Ty
<'tcx
>,
1382 args
: Option
<&[P
<hir
::Expr
>]>,
1383 llargs
: &[ValueRef
],
1386 call_debug_location
: DebugLoc
,
1387 span
: Span
) -> ValueRef
1389 // macros for error handling:
1390 macro_rules
! emit_error
{
1394 ($msg
: tt
, $
($fmt
: tt
)*) => {
1395 span_invalid_monomorphization_error(
1397 &format
!(concat
!("invalid monomorphization of `{}` intrinsic: ",
1402 macro_rules
! require
{
1403 ($cond
: expr
, $
($fmt
: tt
)*) => {
1405 emit_error
!($
($fmt
)*);
1406 return C_nil(bcx
.ccx())
1410 macro_rules
! require_simd
{
1411 ($ty
: expr
, $position
: expr
) => {
1412 require
!($ty
.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position
, $ty
)
1418 let tcx
= bcx
.tcx();
1419 let sig
= tcx
.erase_late_bound_regions(callee_ty
.fn_sig());
1420 let sig
= infer
::normalize_associated_type(tcx
, &sig
);
1421 let arg_tys
= sig
.inputs
;
1423 // every intrinsic takes a SIMD vector as its first argument
1424 require_simd
!(arg_tys
[0], "input");
1425 let in_ty
= arg_tys
[0];
1426 let in_elem
= arg_tys
[0].simd_type(tcx
);
1427 let in_len
= arg_tys
[0].simd_size(tcx
);
1429 let comparison
= match name
{
1430 "simd_eq" => Some(hir
::BiEq
),
1431 "simd_ne" => Some(hir
::BiNe
),
1432 "simd_lt" => Some(hir
::BiLt
),
1433 "simd_le" => Some(hir
::BiLe
),
1434 "simd_gt" => Some(hir
::BiGt
),
1435 "simd_ge" => Some(hir
::BiGe
),
1439 if let Some(cmp_op
) = comparison
{
1440 require_simd
!(ret_ty
, "return");
1442 let out_len
= ret_ty
.simd_size(tcx
);
1443 require
!(in_len
== out_len
,
1444 "expected return type with length {} (same as input type `{}`), \
1445 found `{}` with length {}",
1448 require
!(llret_ty
.element_type().kind() == llvm
::Integer
,
1449 "expected return type with integer elements, found `{}` with non-integer `{}`",
1451 ret_ty
.simd_type(tcx
));
1453 return compare_simd_types(bcx
,
1459 call_debug_location
)
1462 if name
.starts_with("simd_shuffle") {
1463 let n
: usize = match name
["simd_shuffle".len()..].parse() {
1465 Err(_
) => span_bug
!(span
,
1466 "bad `simd_shuffle` instruction only caught in trans?")
1469 require_simd
!(ret_ty
, "return");
1471 let out_len
= ret_ty
.simd_size(tcx
);
1472 require
!(out_len
== n
,
1473 "expected return type of length {}, found `{}` with length {}",
1474 n
, ret_ty
, out_len
);
1475 require
!(in_elem
== ret_ty
.simd_type(tcx
),
1476 "expected return element type `{}` (element of input `{}`), \
1477 found `{}` with element type `{}`",
1479 ret_ty
, ret_ty
.simd_type(tcx
));
1481 let total_len
= in_len
as u64 * 2;
1483 let (vector
, indirect
) = match args
{
1485 match consts
::const_expr(bcx
.ccx(), &args
[2], substs
, None
,
1486 // this should probably help simd error reporting
1487 consts
::TrueConst
::Yes
) {
1488 Ok((vector
, _
)) => (vector
, false),
1489 Err(err
) => bcx
.sess().span_fatal(span
, &err
.description()),
1492 None
=> (llargs
[2], !type_is_immediate(bcx
.ccx(), arg_tys
[2]))
1495 let indices
: Option
<Vec
<_
>> = (0..n
)
1498 let val
= if indirect
{
1499 Load(bcx
, StructGEP(bcx
, vector
, i
))
1501 const_get_elt(vector
, &[i
as libc
::c_uint
])
1503 let c
= const_to_opt_uint(val
);
1506 emit_error
!("shuffle index #{} is not a constant", arg_idx
);
1509 Some(idx
) if idx
>= total_len
=> {
1510 emit_error
!("shuffle index #{} is out of bounds (limit {})",
1511 arg_idx
, total_len
);
1514 Some(idx
) => Some(C_i32(bcx
.ccx(), idx
as i32)),
1518 let indices
= match indices
{
1520 None
=> return C_null(llret_ty
)
1523 return ShuffleVector(bcx
, llargs
[0], llargs
[1], C_vector(&indices
))
1526 if name
== "simd_insert" {
1527 require
!(in_elem
== arg_tys
[2],
1528 "expected inserted type `{}` (element of input `{}`), found `{}`",
1529 in_elem
, in_ty
, arg_tys
[2]);
1530 return InsertElement(bcx
, llargs
[0], llargs
[2], llargs
[1])
1532 if name
== "simd_extract" {
1533 require
!(ret_ty
== in_elem
,
1534 "expected return type `{}` (element of input `{}`), found `{}`",
1535 in_elem
, in_ty
, ret_ty
);
1536 return ExtractElement(bcx
, llargs
[0], llargs
[1])
1539 if name
== "simd_cast" {
1540 require_simd
!(ret_ty
, "return");
1541 let out_len
= ret_ty
.simd_size(tcx
);
1542 require
!(in_len
== out_len
,
1543 "expected return type with length {} (same as input type `{}`), \
1544 found `{}` with length {}",
1547 // casting cares about nominal type, not just structural type
1548 let out_elem
= ret_ty
.simd_type(tcx
);
1550 if in_elem
== out_elem { return llargs[0]; }
1552 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1554 let (in_style
, in_width
) = match in_elem
.sty
{
1555 // vectors of pointer-sized integers should've been
1556 // disallowed before here, so this unwrap is safe.
1557 ty
::TyInt(i
) => (Style
::Int(true), i
.bit_width().unwrap()),
1558 ty
::TyUint(u
) => (Style
::Int(false), u
.bit_width().unwrap()),
1559 ty
::TyFloat(f
) => (Style
::Float
, f
.bit_width()),
1560 _
=> (Style
::Unsupported
, 0)
1562 let (out_style
, out_width
) = match out_elem
.sty
{
1563 ty
::TyInt(i
) => (Style
::Int(true), i
.bit_width().unwrap()),
1564 ty
::TyUint(u
) => (Style
::Int(false), u
.bit_width().unwrap()),
1565 ty
::TyFloat(f
) => (Style
::Float
, f
.bit_width()),
1566 _
=> (Style
::Unsupported
, 0)
1569 match (in_style
, out_style
) {
1570 (Style
::Int(in_is_signed
), Style
::Int(_
)) => {
1571 return match in_width
.cmp(&out_width
) {
1572 Ordering
::Greater
=> Trunc(bcx
, llargs
[0], llret_ty
),
1573 Ordering
::Equal
=> llargs
[0],
1574 Ordering
::Less
=> if in_is_signed
{
1575 SExt(bcx
, llargs
[0], llret_ty
)
1577 ZExt(bcx
, llargs
[0], llret_ty
)
1581 (Style
::Int(in_is_signed
), Style
::Float
) => {
1582 return if in_is_signed
{
1583 SIToFP(bcx
, llargs
[0], llret_ty
)
1585 UIToFP(bcx
, llargs
[0], llret_ty
)
1588 (Style
::Float
, Style
::Int(out_is_signed
)) => {
1589 return if out_is_signed
{
1590 FPToSI(bcx
, llargs
[0], llret_ty
)
1592 FPToUI(bcx
, llargs
[0], llret_ty
)
1595 (Style
::Float
, Style
::Float
) => {
1596 return match in_width
.cmp(&out_width
) {
1597 Ordering
::Greater
=> FPTrunc(bcx
, llargs
[0], llret_ty
),
1598 Ordering
::Equal
=> llargs
[0],
1599 Ordering
::Less
=> FPExt(bcx
, llargs
[0], llret_ty
)
1602 _
=> {/* Unsupported. Fallthrough. */}
1605 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1609 macro_rules
! arith
{
1610 ($
($name
: ident
: $
($
($p
: ident
),* => $call
: expr
),*;)*) => {
1612 if name
== stringify
!($name
) {
1616 return $
call(bcx
, llargs
[0], llargs
[1], call_debug_location
)
1622 "unsupported operation on `{}` with element `{}`",
1629 simd_add
: TyUint
, TyInt
=> Add
, TyFloat
=> FAdd
;
1630 simd_sub
: TyUint
, TyInt
=> Sub
, TyFloat
=> FSub
;
1631 simd_mul
: TyUint
, TyInt
=> Mul
, TyFloat
=> FMul
;
1632 simd_div
: TyFloat
=> FDiv
;
1633 simd_shl
: TyUint
, TyInt
=> Shl
;
1634 simd_shr
: TyUint
=> LShr
, TyInt
=> AShr
;
1635 simd_and
: TyUint
, TyInt
=> And
;
1636 simd_or
: TyUint
, TyInt
=> Or
;
1637 simd_xor
: TyUint
, TyInt
=> Xor
;
1639 span_bug
!(span
, "unknown SIMD intrinsic");
1642 // Returns the width of an int TypeVariant, and if it's signed or not
1643 // Returns None if the type is not an integer
1644 fn int_type_width_signed
<'tcx
>(sty
: &ty
::TypeVariants
<'tcx
>, ccx
: &CrateContext
)
1645 -> Option
<(u64, bool
)> {
1646 use rustc
::ty
::{TyInt, TyUint}
;
1648 TyInt(t
) => Some((match t
{
1650 match &ccx
.tcx().sess
.target
.target
.target_pointer_width
[..] {
1653 tws
=> bug
!("Unsupported target word size for isize: {}", tws
),
1656 ast
::IntTy
::I8
=> 8,
1657 ast
::IntTy
::I16
=> 16,
1658 ast
::IntTy
::I32
=> 32,
1659 ast
::IntTy
::I64
=> 64,
1661 TyUint(t
) => Some((match t
{
1662 ast
::UintTy
::Us
=> {
1663 match &ccx
.tcx().sess
.target
.target
.target_pointer_width
[..] {
1666 tws
=> bug
!("Unsupported target word size for usize: {}", tws
),
1669 ast
::UintTy
::U8
=> 8,
1670 ast
::UintTy
::U16
=> 16,
1671 ast
::UintTy
::U32
=> 32,
1672 ast
::UintTy
::U64
=> 64,
1678 // Returns the width of a float TypeVariant
1679 // Returns None if the type is not a float
1680 fn float_type_width
<'tcx
>(sty
: &ty
::TypeVariants
<'tcx
>)
1682 use rustc
::ty
::TyFloat
;
1684 TyFloat(t
) => Some(match t
{
1685 ast
::FloatTy
::F32
=> 32,
1686 ast
::FloatTy
::F64
=> 64,