1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
13 use arena
::TypedArena
;
14 use intrinsics
::{self, Intrinsic}
;
17 use llvm
::{ValueRef, TypeKind}
;
20 use middle
::subst
::FnSpace
;
22 use trans
::attributes
;
27 use trans
::cleanup
::CleanupMethods
;
31 use trans
::debuginfo
::DebugLoc
;
37 use trans
::type_
::Type
;
38 use middle
::ty
::{self, Ty, TypeFoldable}
;
40 use middle
::subst
::Substs
;
41 use rustc
::dep_graph
::DepNode
;
46 use syntax
::parse
::token
;
48 use rustc
::session
::Session
;
49 use syntax
::codemap
::Span
;
51 use std
::cmp
::Ordering
;
53 pub fn get_simple_intrinsic(ccx
: &CrateContext
, item
: &hir
::ForeignItem
) -> Option
<ValueRef
> {
54 let name
= match &*item
.name
.as_str() {
55 "sqrtf32" => "llvm.sqrt.f32",
56 "sqrtf64" => "llvm.sqrt.f64",
57 "powif32" => "llvm.powi.f32",
58 "powif64" => "llvm.powi.f64",
59 "sinf32" => "llvm.sin.f32",
60 "sinf64" => "llvm.sin.f64",
61 "cosf32" => "llvm.cos.f32",
62 "cosf64" => "llvm.cos.f64",
63 "powf32" => "llvm.pow.f32",
64 "powf64" => "llvm.pow.f64",
65 "expf32" => "llvm.exp.f32",
66 "expf64" => "llvm.exp.f64",
67 "exp2f32" => "llvm.exp2.f32",
68 "exp2f64" => "llvm.exp2.f64",
69 "logf32" => "llvm.log.f32",
70 "logf64" => "llvm.log.f64",
71 "log10f32" => "llvm.log10.f32",
72 "log10f64" => "llvm.log10.f64",
73 "log2f32" => "llvm.log2.f32",
74 "log2f64" => "llvm.log2.f64",
75 "fmaf32" => "llvm.fma.f32",
76 "fmaf64" => "llvm.fma.f64",
77 "fabsf32" => "llvm.fabs.f32",
78 "fabsf64" => "llvm.fabs.f64",
79 "copysignf32" => "llvm.copysign.f32",
80 "copysignf64" => "llvm.copysign.f64",
81 "floorf32" => "llvm.floor.f32",
82 "floorf64" => "llvm.floor.f64",
83 "ceilf32" => "llvm.ceil.f32",
84 "ceilf64" => "llvm.ceil.f64",
85 "truncf32" => "llvm.trunc.f32",
86 "truncf64" => "llvm.trunc.f64",
87 "rintf32" => "llvm.rint.f32",
88 "rintf64" => "llvm.rint.f64",
89 "nearbyintf32" => "llvm.nearbyint.f32",
90 "nearbyintf64" => "llvm.nearbyint.f64",
91 "roundf32" => "llvm.round.f32",
92 "roundf64" => "llvm.round.f64",
93 "assume" => "llvm.assume",
96 Some(ccx
.get_intrinsic(&name
))
99 pub fn span_transmute_size_error(a
: &Session
, b
: Span
, msg
: &str) {
100 span_err
!(a
, b
, E0512
, "{}", msg
);
103 /// Performs late verification that intrinsics are used correctly. At present,
104 /// the only intrinsic that needs such verification is `transmute`.
105 pub fn check_intrinsics(ccx
: &CrateContext
) {
106 let _task
= ccx
.tcx().dep_graph
.in_task(DepNode
::IntrinsicUseCheck
);
107 let mut last_failing_id
= None
;
108 for transmute_restriction
in ccx
.tcx().transmute_restrictions
.borrow().iter() {
109 // Sometimes, a single call to transmute will push multiple
110 // type pairs to test in order to exhaustively test the
111 // possibility around a type parameter. If one of those fails,
112 // there is no sense reporting errors on the others.
113 if last_failing_id
== Some(transmute_restriction
.id
) {
117 debug
!("transmute_restriction: {:?}", transmute_restriction
);
119 assert
!(!transmute_restriction
.substituted_from
.has_param_types());
120 assert
!(!transmute_restriction
.substituted_to
.has_param_types());
122 let llfromtype
= type_of
::sizing_type_of(ccx
,
123 transmute_restriction
.substituted_from
);
124 let lltotype
= type_of
::sizing_type_of(ccx
,
125 transmute_restriction
.substituted_to
);
126 let from_type_size
= machine
::llbitsize_of_real(ccx
, llfromtype
);
127 let to_type_size
= machine
::llbitsize_of_real(ccx
, lltotype
);
128 if from_type_size
!= to_type_size
{
129 last_failing_id
= Some(transmute_restriction
.id
);
131 if transmute_restriction
.original_from
!= transmute_restriction
.substituted_from
{
132 span_transmute_size_error(ccx
.sess(), transmute_restriction
.span
,
133 &format
!("transmute called with differently sized types: \
134 {} (could be {} bit{}) to {} (could be {} bit{})",
135 transmute_restriction
.original_from
,
136 from_type_size
as usize,
137 if from_type_size
== 1 {""}
else {"s"}
,
138 transmute_restriction
.original_to
,
139 to_type_size
as usize,
140 if to_type_size
== 1 {""}
else {"s"}
));
142 span_transmute_size_error(ccx
.sess(), transmute_restriction
.span
,
143 &format
!("transmute called with differently sized types: \
144 {} ({} bit{}) to {} ({} bit{})",
145 transmute_restriction
.original_from
,
146 from_type_size
as usize,
147 if from_type_size
== 1 {""}
else {"s"}
,
148 transmute_restriction
.original_to
,
149 to_type_size
as usize,
150 if to_type_size
== 1 {""}
else {"s"}
));
154 ccx
.sess().abort_if_errors();
157 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
158 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
159 /// add them to librustc_trans/trans/context.rs
160 pub fn trans_intrinsic_call
<'a
, 'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
163 cleanup_scope
: cleanup
::CustomScopeIndex
,
164 args
: callee
::CallArgs
<'a
, 'tcx
>,
166 substs
: subst
::Substs
<'tcx
>,
167 call_info
: NodeIdAndSpan
)
168 -> Result
<'blk
, 'tcx
> {
173 let _icx
= push_ctxt("trans_intrinsic_call");
175 let sig
= ccx
.tcx().erase_late_bound_regions(callee_ty
.fn_sig());
176 let sig
= infer
::normalize_associated_type(ccx
.tcx(), &sig
);
177 let arg_tys
= sig
.inputs
;
178 let ret_ty
= sig
.output
;
179 let foreign_item
= tcx
.map
.expect_foreign_item(node
);
180 let name
= foreign_item
.name
.as_str();
182 // For `transmute` we can just trans the input expr directly into dest
183 if name
== "transmute" {
184 let llret_ty
= type_of
::type_of(ccx
, ret_ty
.unwrap());
186 callee
::ArgExprs(arg_exprs
) => {
187 assert_eq
!(arg_exprs
.len(), 1);
189 let (in_type
, out_type
) = (*substs
.types
.get(FnSpace
, 0),
190 *substs
.types
.get(FnSpace
, 1));
191 let llintype
= type_of
::type_of(ccx
, in_type
);
192 let llouttype
= type_of
::type_of(ccx
, out_type
);
194 let in_type_size
= machine
::llbitsize_of_real(ccx
, llintype
);
195 let out_type_size
= machine
::llbitsize_of_real(ccx
, llouttype
);
197 // This should be caught by the intrinsicck pass
198 assert_eq
!(in_type_size
, out_type_size
);
200 let nonpointer_nonaggregate
= |llkind
: TypeKind
| -> bool
{
201 use llvm
::TypeKind
::*;
203 Half
| Float
| Double
| X86_FP80
| FP128
|
204 PPC_FP128
| Integer
| Vector
| X86_MMX
=> true,
209 // An approximation to which types can be directly cast via
210 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
211 // but does, importantly, cover SIMD types.
212 let in_kind
= llintype
.kind();
213 let ret_kind
= llret_ty
.kind();
214 let bitcast_compatible
=
215 (nonpointer_nonaggregate(in_kind
) && nonpointer_nonaggregate(ret_kind
)) || {
216 in_kind
== TypeKind
::Pointer
&& ret_kind
== TypeKind
::Pointer
219 let dest
= if bitcast_compatible
{
220 // if we're here, the type is scalar-like (a primitive, a
221 // SIMD type or a pointer), and so can be handled as a
222 // by-value ValueRef and can also be directly bitcast to the
223 // target type. Doing this special case makes conversions
224 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
225 // efficient (these are done efficiently implicitly in C
226 // with the `__m128i` type and so this means Rust doesn't
228 let expr
= &arg_exprs
[0];
229 let datum
= unpack_datum
!(bcx
, expr
::trans(bcx
, expr
));
230 let datum
= unpack_datum
!(bcx
, datum
.to_rvalue_datum(bcx
, "transmute_temp"));
231 let val
= if datum
.kind
.is_by_ref() {
232 load_ty(bcx
, datum
.val
, datum
.ty
)
234 from_arg_ty(bcx
, datum
.val
, datum
.ty
)
237 let cast_val
= BitCast(bcx
, val
, llret_ty
);
241 // this often occurs in a sequence like `Store(val,
242 // d); val2 = Load(d)`, so disappears easily.
243 Store(bcx
, cast_val
, d
);
249 // The types are too complicated to do with a by-value
250 // bitcast, so pointer cast instead. We need to cast the
251 // dest so the types work out.
252 let dest
= match dest
{
253 expr
::SaveIn(d
) => expr
::SaveIn(PointerCast(bcx
, d
, llintype
.ptr_to())),
254 expr
::Ignore
=> expr
::Ignore
256 bcx
= expr
::trans_into(bcx
, &arg_exprs
[0], dest
);
260 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
261 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
264 expr
::SaveIn(d
) => Result
::new(bcx
, d
),
265 expr
::Ignore
=> Result
::new(bcx
, C_undef(llret_ty
.ptr_to()))
271 ccx
.sess().bug("expected expr as argument for transmute");
276 // For `move_val_init` we can evaluate the destination address
277 // (the first argument) and then trans the source value (the
278 // second argument) directly into the resulting destination
280 if name
== "move_val_init" {
281 if let callee
::ArgExprs(ref exprs
) = args
{
282 let (dest_expr
, source_expr
) = if exprs
.len() != 2 {
283 ccx
.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
285 (&exprs
[0], &exprs
[1])
288 // evaluate destination address
289 let dest_datum
= unpack_datum
!(bcx
, expr
::trans(bcx
, dest_expr
));
290 let dest_datum
= unpack_datum
!(
291 bcx
, dest_datum
.to_rvalue_datum(bcx
, "arg"));
292 let dest_datum
= unpack_datum
!(
293 bcx
, dest_datum
.to_appropriate_datum(bcx
));
295 // `expr::trans_into(bcx, expr, dest)` is equiv to
297 // `trans(bcx, expr).store_to_dest(dest)`,
299 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
301 // `trans(bcx, expr).store_to(bcx, addr)`.
302 let lldest
= expr
::Dest
::SaveIn(dest_datum
.val
);
303 bcx
= expr
::trans_into(bcx
, source_expr
, lldest
);
305 let llresult
= C_nil(ccx
);
306 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
308 return Result
::new(bcx
, llresult
);
310 ccx
.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
314 let call_debug_location
= DebugLoc
::At(call_info
.id
, call_info
.span
);
316 // For `try` we need some custom control flow
317 if &name
[..] == "try" {
318 if let callee
::ArgExprs(ref exprs
) = args
{
319 let (func
, data
, local_ptr
) = if exprs
.len() != 3 {
320 ccx
.sess().bug("expected three exprs as arguments for \
323 (&exprs
[0], &exprs
[1], &exprs
[2])
326 // translate arguments
327 let func
= unpack_datum
!(bcx
, expr
::trans(bcx
, func
));
328 let func
= unpack_datum
!(bcx
, func
.to_rvalue_datum(bcx
, "func"));
329 let data
= unpack_datum
!(bcx
, expr
::trans(bcx
, data
));
330 let data
= unpack_datum
!(bcx
, data
.to_rvalue_datum(bcx
, "data"));
331 let local_ptr
= unpack_datum
!(bcx
, expr
::trans(bcx
, local_ptr
));
332 let local_ptr
= local_ptr
.to_rvalue_datum(bcx
, "local_ptr");
333 let local_ptr
= unpack_datum
!(bcx
, local_ptr
);
335 let dest
= match dest
{
336 expr
::SaveIn(d
) => d
,
337 expr
::Ignore
=> alloc_ty(bcx
, tcx
.mk_mut_ptr(tcx
.types
.i8),
342 bcx
= try_intrinsic(bcx
, func
.val
, data
.val
, local_ptr
.val
, dest
,
343 call_debug_location
);
345 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
346 return Result
::new(bcx
, dest
);
348 ccx
.sess().bug("expected two exprs as arguments for \
353 // save the actual AST arguments for later (some places need to do
354 // const-evaluation on them)
355 let expr_arguments
= match args
{
356 callee
::ArgExprs(args
) => Some(args
),
360 // Push the arguments.
361 let mut llargs
= Vec
::new();
362 bcx
= callee
::trans_args(bcx
,
366 cleanup
::CustomScope(cleanup_scope
),
370 fcx
.scopes
.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
372 // These are the only intrinsic functions that diverge.
374 let llfn
= ccx
.get_intrinsic(&("llvm.trap"));
375 Call(bcx
, llfn
, &[], None
, call_debug_location
);
376 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
378 return Result
::new(bcx
, C_undef(Type
::nil(ccx
).ptr_to()));
379 } else if &name
[..] == "unreachable" {
380 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
382 return Result
::new(bcx
, C_nil(ccx
));
385 let ret_ty
= match ret_ty
{
386 ty
::FnConverging(ret_ty
) => ret_ty
,
387 ty
::FnDiverging
=> unreachable
!()
390 let llret_ty
= type_of
::type_of(ccx
, ret_ty
);
392 // Get location to store the result. If the user does
393 // not care about the result, just make a stack slot
394 let llresult
= match dest
{
395 expr
::SaveIn(d
) => d
,
397 if !type_is_zero_size(ccx
, ret_ty
) {
398 let llresult
= alloc_ty(bcx
, ret_ty
, "intrinsic_result");
399 call_lifetime_start(bcx
, llresult
);
402 C_undef(llret_ty
.ptr_to())
407 let simple
= get_simple_intrinsic(ccx
, &foreign_item
);
408 let llval
= match (simple
, &*name
) {
410 Call(bcx
, llfn
, &llargs
, None
, call_debug_location
)
412 (_
, "breakpoint") => {
413 let llfn
= ccx
.get_intrinsic(&("llvm.debugtrap"));
414 Call(bcx
, llfn
, &[], None
, call_debug_location
)
417 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
418 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
419 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
421 (_
, "size_of_val") => {
422 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
423 if !type_is_sized(tcx
, tp_ty
) {
424 let (llsize
, _
) = glue
::size_and_align_of_dst(bcx
, tp_ty
, llargs
[1]);
427 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
428 C_uint(ccx
, machine
::llsize_of_alloc(ccx
, lltp_ty
))
431 (_
, "min_align_of") => {
432 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
433 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
435 (_
, "min_align_of_val") => {
436 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
437 if !type_is_sized(tcx
, tp_ty
) {
438 let (_
, llalign
) = glue
::size_and_align_of_dst(bcx
, tp_ty
, llargs
[1]);
441 C_uint(ccx
, type_of
::align_of(ccx
, tp_ty
))
444 (_
, "pref_align_of") => {
445 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
446 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
447 C_uint(ccx
, machine
::llalign_of_pref(ccx
, lltp_ty
))
449 (_
, "drop_in_place") => {
450 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
451 let ptr
= if type_is_sized(tcx
, tp_ty
) {
454 let scratch
= rvalue_scratch_datum(bcx
, tp_ty
, "tmp");
455 Store(bcx
, llargs
[0], expr
::get_dataptr(bcx
, scratch
.val
));
456 Store(bcx
, llargs
[1], expr
::get_meta(bcx
, scratch
.val
));
457 fcx
.schedule_lifetime_end(cleanup
::CustomScope(cleanup_scope
), scratch
.val
);
460 glue
::drop_ty(bcx
, ptr
, tp_ty
, call_debug_location
);
463 (_
, "type_name") => {
464 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
465 let ty_name
= token
::intern_and_get_ident(&tp_ty
.to_string());
466 C_str_slice(ccx
, ty_name
)
469 let hash
= ccx
.tcx().hash_crate_independent(*substs
.types
.get(FnSpace
, 0),
470 &ccx
.link_meta().crate_hash
);
473 (_
, "init_dropped") => {
474 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
475 if !return_type_is_void(ccx
, tp_ty
) {
476 drop_done_fill_mem(bcx
, llresult
, tp_ty
);
481 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
482 if !return_type_is_void(ccx
, tp_ty
) {
483 // Just zero out the stack slot. (See comment on base::memzero for explanation)
484 init_zero_mem(bcx
, llresult
, tp_ty
);
488 // Effectively no-ops
489 (_
, "uninit") | (_
, "forget") => {
492 (_
, "needs_drop") => {
493 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
495 C_bool(ccx
, bcx
.fcx
.type_needs_drop(tp_ty
))
499 let offset
= llargs
[1];
500 InBoundsGEP(bcx
, ptr
, &[offset
])
502 (_
, "arith_offset") => {
504 let offset
= llargs
[1];
505 GEP(bcx
, ptr
, &[offset
])
508 (_
, "copy_nonoverlapping") => {
512 *substs
.types
.get(FnSpace
, 0),
522 *substs
.types
.get(FnSpace
, 0),
528 (_
, "write_bytes") => {
529 memset_intrinsic(bcx
,
531 *substs
.types
.get(FnSpace
, 0),
538 (_
, "volatile_copy_nonoverlapping_memory") => {
542 *substs
.types
.get(FnSpace
, 0),
548 (_
, "volatile_copy_memory") => {
552 *substs
.types
.get(FnSpace
, 0),
558 (_
, "volatile_set_memory") => {
559 memset_intrinsic(bcx
,
561 *substs
.types
.get(FnSpace
, 0),
567 (_
, "volatile_load") => {
568 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
569 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
570 let load
= VolatileLoad(bcx
, ptr
);
572 llvm
::LLVMSetAlignment(load
, type_of
::align_of(ccx
, tp_ty
));
574 to_arg_ty(bcx
, load
, tp_ty
)
576 (_
, "volatile_store") => {
577 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
578 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
579 let val
= from_arg_ty(bcx
, llargs
[1], tp_ty
);
580 let store
= VolatileStore(bcx
, val
, ptr
);
582 llvm
::LLVMSetAlignment(store
, type_of
::align_of(ccx
, tp_ty
));
587 (_
, "ctlz") | (_
, "cttz") | (_
, "ctpop") | (_
, "bswap") |
588 (_
, "add_with_overflow") | (_
, "sub_with_overflow") | (_
, "mul_with_overflow") |
589 (_
, "overflowing_add") | (_
, "overflowing_sub") | (_
, "overflowing_mul") |
590 (_
, "unchecked_div") | (_
, "unchecked_rem") => {
591 let sty
= &arg_tys
[0].sty
;
592 match int_type_width_signed(sty
, ccx
) {
593 Some((width
, signed
)) =>
595 "ctlz" => count_zeros_intrinsic(bcx
, &format
!("llvm.ctlz.i{}", width
),
596 llargs
[0], call_debug_location
),
597 "cttz" => count_zeros_intrinsic(bcx
, &format
!("llvm.cttz.i{}", width
),
598 llargs
[0], call_debug_location
),
599 "ctpop" => Call(bcx
, ccx
.get_intrinsic(&format
!("llvm.ctpop.i{}", width
)),
600 &llargs
, None
, call_debug_location
),
603 llargs
[0] // byte swap a u8/i8 is just a no-op
605 Call(bcx
, ccx
.get_intrinsic(&format
!("llvm.bswap.i{}", width
)),
606 &llargs
, None
, call_debug_location
)
609 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
610 let intrinsic
= format
!("llvm.{}{}.with.overflow.i{}",
611 if signed { 's' }
else { 'u' }
,
613 with_overflow_intrinsic(bcx
, &intrinsic
, llargs
[0], llargs
[1], llresult
,
616 "overflowing_add" => Add(bcx
, llargs
[0], llargs
[1], call_debug_location
),
617 "overflowing_sub" => Sub(bcx
, llargs
[0], llargs
[1], call_debug_location
),
618 "overflowing_mul" => Mul(bcx
, llargs
[0], llargs
[1], call_debug_location
),
621 SDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
)
623 UDiv(bcx
, llargs
[0], llargs
[1], call_debug_location
)
627 SRem(bcx
, llargs
[0], llargs
[1], call_debug_location
)
629 URem(bcx
, llargs
[0], llargs
[1], call_debug_location
)
634 span_invalid_monomorphization_error(
635 tcx
.sess
, call_info
.span
,
636 &format
!("invalid monomorphization of `{}` intrinsic: \
637 expected basic integer type, found `{}`", name
, sty
));
645 (_
, "return_address") => {
646 if !fcx
.caller_expects_out_pointer
{
647 span_err
!(tcx
.sess
, call_info
.span
, E0510
,
648 "invalid use of `return_address` intrinsic: function \
649 does not use out pointer");
650 C_null(Type
::i8p(ccx
))
652 PointerCast(bcx
, llvm
::get_param(fcx
.llfn
, 0), Type
::i8p(ccx
))
656 (_
, "discriminant_value") => {
657 let val_ty
= substs
.types
.get(FnSpace
, 0);
660 let repr
= adt
::represent_type(ccx
, *val_ty
);
661 adt
::trans_get_discr(bcx
, &repr
, llargs
[0],
662 Some(llret_ty
), true)
664 _
=> C_null(llret_ty
)
667 (_
, name
) if name
.starts_with("simd_") => {
668 generic_simd_intrinsic(bcx
, name
,
677 // This requires that atomic intrinsics follow a specific naming pattern:
678 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
679 (_
, name
) if name
.starts_with("atomic_") => {
680 let split
: Vec
<&str> = name
.split('_'
).collect();
682 let (order
, failorder
) = match split
.len() {
683 2 => (llvm
::SequentiallyConsistent
, llvm
::SequentiallyConsistent
),
684 3 => match split
[2] {
685 "unordered" => (llvm
::Unordered
, llvm
::Unordered
),
686 "relaxed" => (llvm
::Monotonic
, llvm
::Monotonic
),
687 "acq" => (llvm
::Acquire
, llvm
::Acquire
),
688 "rel" => (llvm
::Release
, llvm
::Monotonic
),
689 "acqrel" => (llvm
::AcquireRelease
, llvm
::Acquire
),
690 "failrelaxed" if split
[1] == "cxchg" || split
[1] == "cxchgweak" =>
691 (llvm
::SequentiallyConsistent
, llvm
::Monotonic
),
692 "failacq" if split
[1] == "cxchg" || split
[1] == "cxchgweak" =>
693 (llvm
::SequentiallyConsistent
, llvm
::Acquire
),
694 _
=> ccx
.sess().fatal("unknown ordering in atomic intrinsic")
696 4 => match (split
[2], split
[3]) {
697 ("acq", "failrelaxed") if split
[1] == "cxchg" || split
[1] == "cxchgweak" =>
698 (llvm
::Acquire
, llvm
::Monotonic
),
699 ("acqrel", "failrelaxed") if split
[1] == "cxchg" || split
[1] == "cxchgweak" =>
700 (llvm
::AcquireRelease
, llvm
::Monotonic
),
701 _
=> ccx
.sess().fatal("unknown ordering in atomic intrinsic")
703 _
=> ccx
.sess().fatal("Atomic intrinsic not in correct format"),
708 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
709 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
710 let cmp
= from_arg_ty(bcx
, llargs
[1], tp_ty
);
711 let src
= from_arg_ty(bcx
, llargs
[2], tp_ty
);
712 let res
= AtomicCmpXchg(bcx
, ptr
, cmp
, src
, order
, failorder
, llvm
::False
);
713 ExtractValue(bcx
, res
, 0)
717 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
718 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
719 let cmp
= from_arg_ty(bcx
, llargs
[1], tp_ty
);
720 let src
= from_arg_ty(bcx
, llargs
[2], tp_ty
);
721 let val
= AtomicCmpXchg(bcx
, ptr
, cmp
, src
, order
, failorder
, llvm
::True
);
722 let result
= ExtractValue(bcx
, val
, 0);
723 let success
= ZExt(bcx
, ExtractValue(bcx
, val
, 1), Type
::bool(bcx
.ccx()));
724 Store(bcx
, result
, StructGEP(bcx
, llresult
, 0));
725 Store(bcx
, success
, StructGEP(bcx
, llresult
, 1));
730 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
731 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
732 to_arg_ty(bcx
, AtomicLoad(bcx
, ptr
, order
), tp_ty
)
735 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
736 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
737 let val
= from_arg_ty(bcx
, llargs
[1], tp_ty
);
738 AtomicStore(bcx
, val
, ptr
, order
);
743 AtomicFence(bcx
, order
, llvm
::CrossThread
);
747 "singlethreadfence" => {
748 AtomicFence(bcx
, order
, llvm
::SingleThread
);
752 // These are all AtomicRMW ops
754 let atom_op
= match op
{
755 "xchg" => llvm
::AtomicXchg
,
756 "xadd" => llvm
::AtomicAdd
,
757 "xsub" => llvm
::AtomicSub
,
758 "and" => llvm
::AtomicAnd
,
759 "nand" => llvm
::AtomicNand
,
760 "or" => llvm
::AtomicOr
,
761 "xor" => llvm
::AtomicXor
,
762 "max" => llvm
::AtomicMax
,
763 "min" => llvm
::AtomicMin
,
764 "umax" => llvm
::AtomicUMax
,
765 "umin" => llvm
::AtomicUMin
,
766 _
=> ccx
.sess().fatal("unknown atomic operation")
769 let tp_ty
= *substs
.types
.get(FnSpace
, 0);
770 let ptr
= to_arg_ty_ptr(bcx
, llargs
[0], tp_ty
);
771 let val
= from_arg_ty(bcx
, llargs
[1], tp_ty
);
772 AtomicRMW(bcx
, atom_op
, ptr
, val
, order
)
779 let intr
= match Intrinsic
::find(tcx
, &name
) {
781 None
=> ccx
.sess().span_bug(foreign_item
.span
,
782 &format
!("unknown intrinsic '{}'", name
)),
784 fn one
<T
>(x
: Vec
<T
>) -> T
{
785 assert_eq
!(x
.len(), 1);
786 x
.into_iter().next().unwrap()
788 fn ty_to_type(ccx
: &CrateContext
, t
: &intrinsics
::Type
,
789 any_changes_needed
: &mut bool
) -> Vec
<Type
> {
790 use intrinsics
::Type
::*;
792 Void
=> vec
![Type
::void(ccx
)],
793 Integer(_signed
, width
, llvm_width
) => {
794 *any_changes_needed
|= width
!= llvm_width
;
795 vec
![Type
::ix(ccx
, llvm_width
as u64)]
799 32 => vec
![Type
::f32(ccx
)],
800 64 => vec
![Type
::f64(ccx
)],
804 Pointer(ref t
, ref llvm_elem
, _const
) => {
805 *any_changes_needed
|= llvm_elem
.is_some();
807 let t
= llvm_elem
.as_ref().unwrap_or(t
);
808 let elem
= one(ty_to_type(ccx
, t
,
809 any_changes_needed
));
812 Vector(ref t
, ref llvm_elem
, length
) => {
813 *any_changes_needed
|= llvm_elem
.is_some();
815 let t
= llvm_elem
.as_ref().unwrap_or(t
);
816 let elem
= one(ty_to_type(ccx
, t
,
817 any_changes_needed
));
818 vec
![Type
::vector(&elem
,
821 Aggregate(false, ref contents
) => {
822 let elems
= contents
.iter()
823 .map(|t
| one(ty_to_type(ccx
, t
, any_changes_needed
)))
824 .collect
::<Vec
<_
>>();
825 vec
![Type
::struct_(ccx
, &elems
, false)]
827 Aggregate(true, ref contents
) => {
828 *any_changes_needed
= true;
830 .flat_map(|t
| ty_to_type(ccx
, t
, any_changes_needed
))
836 // This allows an argument list like `foo, (bar, baz),
837 // qux` to be converted into `foo, bar, baz, qux`, integer
838 // arguments to be truncated as needed and pointers to be
840 fn modify_as_needed
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
841 t
: &intrinsics
::Type
,
847 intrinsics
::Type
::Aggregate(true, ref contents
) => {
848 // We found a tuple that needs squishing! So
849 // run over the tuple and load each field.
851 // This assumes the type is "simple", i.e. no
852 // destructors, and the contents are SIMD
854 assert
!(!bcx
.fcx
.type_needs_drop(arg_type
));
856 let repr
= adt
::represent_type(bcx
.ccx(), arg_type
);
857 let repr_ptr
= &repr
;
858 let arg
= adt
::MaybeSizedValue
::sized(llarg
);
861 Load(bcx
, adt
::trans_field_ptr(bcx
, repr_ptr
, arg
, Disr(0), i
))
865 intrinsics
::Type
::Pointer(_
, Some(ref llvm_elem
), _
) => {
866 let llvm_elem
= one(ty_to_type(bcx
.ccx(), llvm_elem
, &mut false));
867 vec
![PointerCast(bcx
, llarg
,
870 intrinsics
::Type
::Vector(_
, Some(ref llvm_elem
), length
) => {
871 let llvm_elem
= one(ty_to_type(bcx
.ccx(), llvm_elem
, &mut false));
872 vec
![BitCast(bcx
, llarg
,
873 Type
::vector(&llvm_elem
, length
as u64))]
875 intrinsics
::Type
::Integer(_
, width
, llvm_width
) if width
!= llvm_width
=> {
876 // the LLVM intrinsic uses a smaller integer
877 // size than the C intrinsic's signature, so
878 // we have to trim it down here.
879 vec
![Trunc(bcx
, llarg
, Type
::ix(bcx
.ccx(), llvm_width
as u64))]
886 let mut any_changes_needed
= false;
887 let inputs
= intr
.inputs
.iter()
888 .flat_map(|t
| ty_to_type(ccx
, t
, &mut any_changes_needed
))
889 .collect
::<Vec
<_
>>();
891 let mut out_changes
= false;
892 let outputs
= one(ty_to_type(ccx
, &intr
.output
, &mut out_changes
));
893 // outputting a flattened aggregate is nonsense
894 assert
!(!out_changes
);
896 let llargs
= if !any_changes_needed
{
897 // no aggregates to flatten, so no change needed
900 // there are some aggregates that need to be flattened
901 // in the LLVM call, so we need to run over the types
902 // again to find them and extract the arguments
906 .flat_map(|((t
, llarg
), ty
)| modify_as_needed(bcx
, t
, ty
, *llarg
))
909 assert_eq
!(inputs
.len(), llargs
.len());
911 let val
= match intr
.definition
{
912 intrinsics
::IntrinsicDef
::Named(name
) => {
913 let f
= declare
::declare_cfn(ccx
,
915 Type
::func(&inputs
, &outputs
),
917 Call(bcx
, f
, &llargs
, None
, call_debug_location
)
922 intrinsics
::Type
::Aggregate(flatten
, ref elems
) => {
923 // the output is a tuple so we need to munge it properly
926 for i
in 0..elems
.len() {
927 let val
= ExtractValue(bcx
, val
, i
);
928 Store(bcx
, val
, StructGEP(bcx
, llresult
, i
));
937 if val_ty(llval
) != Type
::void(ccx
) &&
938 machine
::llsize_of_alloc(ccx
, val_ty(llval
)) != 0 {
939 store_ty(bcx
, llval
, llresult
, ret_ty
);
942 // If we made a temporary stack slot, let's clean it up
945 bcx
= glue
::drop_ty(bcx
, llresult
, ret_ty
, call_debug_location
);
946 call_lifetime_end(bcx
, llresult
);
948 expr
::SaveIn(_
) => {}
951 fcx
.pop_and_trans_custom_cleanup_scope(bcx
, cleanup_scope
);
953 Result
::new(bcx
, llresult
)
956 fn copy_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
963 call_debug_location
: DebugLoc
)
966 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
967 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
968 let size
= machine
::llsize_of(ccx
, lltp_ty
);
969 let int_size
= machine
::llbitsize_of_real(ccx
, ccx
.int_type());
971 let operation
= if allow_overlap
{
977 let name
= format
!("llvm.{}.p0i8.p0i8.i{}", operation
, int_size
);
979 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
980 let src_ptr
= PointerCast(bcx
, src
, Type
::i8p(ccx
));
981 let llfn
= ccx
.get_intrinsic(&name
);
987 Mul(bcx
, size
, count
, DebugLoc
::None
),
989 C_bool(ccx
, volatile
)],
994 fn memset_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1000 call_debug_location
: DebugLoc
)
1002 let ccx
= bcx
.ccx();
1003 let lltp_ty
= type_of
::type_of(ccx
, tp_ty
);
1004 let align
= C_i32(ccx
, type_of
::align_of(ccx
, tp_ty
) as i32);
1005 let size
= machine
::llsize_of(ccx
, lltp_ty
);
1006 let int_size
= machine
::llbitsize_of_real(ccx
, ccx
.int_type());
1008 let name
= format
!("llvm.memset.p0i8.i{}", int_size
);
1010 let dst_ptr
= PointerCast(bcx
, dst
, Type
::i8p(ccx
));
1011 let llfn
= ccx
.get_intrinsic(&name
);
1017 Mul(bcx
, size
, count
, DebugLoc
::None
),
1019 C_bool(ccx
, volatile
)],
1021 call_debug_location
)
1024 fn count_zeros_intrinsic(bcx
: Block
,
1027 call_debug_location
: DebugLoc
)
1029 let y
= C_bool(bcx
.ccx(), false);
1030 let llfn
= bcx
.ccx().get_intrinsic(&name
);
1031 Call(bcx
, llfn
, &[val
, y
], None
, call_debug_location
)
1034 fn with_overflow_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1039 call_debug_location
: DebugLoc
)
1041 let llfn
= bcx
.ccx().get_intrinsic(&name
);
1043 // Convert `i1` to a `bool`, and write it to the out parameter
1044 let val
= Call(bcx
, llfn
, &[a
, b
], None
, call_debug_location
);
1045 let result
= ExtractValue(bcx
, val
, 0);
1046 let overflow
= ZExt(bcx
, ExtractValue(bcx
, val
, 1), Type
::bool(bcx
.ccx()));
1047 Store(bcx
, result
, StructGEP(bcx
, out
, 0));
1048 Store(bcx
, overflow
, StructGEP(bcx
, out
, 1));
1053 fn try_intrinsic
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1056 local_ptr
: ValueRef
,
1058 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1059 if bcx
.sess().no_landing_pads() {
1060 Call(bcx
, func
, &[data
], None
, dloc
);
1061 Store(bcx
, C_null(Type
::i8p(bcx
.ccx())), dest
);
1063 } else if wants_msvc_seh(bcx
.sess()) {
1064 trans_msvc_try(bcx
, func
, data
, local_ptr
, dest
, dloc
)
1066 trans_gnu_try(bcx
, func
, data
, local_ptr
, dest
, dloc
)
1070 // MSVC's definition of the `rust_try` function.
1072 // This implementation uses the new exception handling instructions in LLVM
1073 // which have support in LLVM for SEH on MSVC targets. Although these
1074 // instructions are meant to work for all targets, as of the time of this
1075 // writing, however, LLVM does not recommend the usage of these new instructions
1076 // as the old ones are still more optimized.
1077 fn trans_msvc_try
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1080 local_ptr
: ValueRef
,
1082 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1083 let llfn
= get_rust_try_fn(bcx
.fcx
, &mut |bcx
| {
1084 let ccx
= bcx
.ccx();
1085 let dloc
= DebugLoc
::None
;
1087 SetPersonalityFn(bcx
, bcx
.fcx
.eh_personality());
1089 let normal
= bcx
.fcx
.new_temp_block("normal");
1090 let catchswitch
= bcx
.fcx
.new_temp_block("catchswitch");
1091 let catchpad
= bcx
.fcx
.new_temp_block("catchpad");
1092 let caught
= bcx
.fcx
.new_temp_block("caught");
1094 let func
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1095 let data
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1096 let local_ptr
= llvm
::get_param(bcx
.fcx
.llfn
, 2);
1098 // We're generating an IR snippet that looks like:
1100 // declare i32 @rust_try(%func, %data, %ptr) {
1101 // %slot = alloca i8*
1102 // call @llvm.localescape(%slot)
1103 // store %ptr, %slot
1104 // invoke %func(%data) to label %normal unwind label %catchswitch
1110 // %cs = catchswitch within none [%catchpad] unwind to caller
1113 // %tok = catchpad within %cs [%rust_try_filter]
1114 // catchret from %tok to label %caught
1120 // This structure follows the basic usage of the instructions in LLVM
1121 // (see their documentation/test cases for examples), but a
1122 // perhaps-surprising part here is the usage of the `localescape`
1123 // intrinsic. This is used to allow the filter function (also generated
1124 // here) to access variables on the stack of this intrinsic. This
1125 // ability enables us to transfer information about the exception being
1126 // thrown to this point, where we're catching the exception.
1128 // More information can be found in libstd's seh.rs implementation.
1129 let slot
= Alloca(bcx
, Type
::i8p(ccx
), "slot");
1130 let localescape
= ccx
.get_intrinsic(&"llvm.localescape");
1131 Call(bcx
, localescape
, &[slot
], None
, dloc
);
1132 Store(bcx
, local_ptr
, slot
);
1133 Invoke(bcx
, func
, &[data
], normal
.llbb
, catchswitch
.llbb
, None
, dloc
);
1135 Ret(normal
, C_i32(ccx
, 0), dloc
);
1137 let cs
= CatchSwitch(catchswitch
, None
, None
, 1);
1138 AddHandler(catchswitch
, cs
, catchpad
.llbb
);
1140 let filter
= generate_filter_fn(bcx
.fcx
, bcx
.fcx
.llfn
);
1141 let filter
= BitCast(catchpad
, filter
, Type
::i8p(ccx
));
1142 let tok
= CatchPad(catchpad
, cs
, &[filter
]);
1143 CatchRet(catchpad
, tok
, caught
.llbb
);
1145 Ret(caught
, C_i32(ccx
, 1), dloc
);
1148 // Note that no invoke is used here because by definition this function
1149 // can't panic (that's what it's catching).
1150 let ret
= Call(bcx
, llfn
, &[func
, data
, local_ptr
], None
, dloc
);
1151 Store(bcx
, ret
, dest
);
1155 // Definition of the standard "try" function for Rust using the GNU-like model
1156 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1159 // This translation is a little surprising because we always call a shim
1160 // function instead of inlining the call to `invoke` manually here. This is done
1161 // because in LLVM we're only allowed to have one personality per function
1162 // definition. The call to the `try` intrinsic is being inlined into the
1163 // function calling it, and that function may already have other personality
1164 // functions in play. By calling a shim we're guaranteed that our shim will have
1165 // the right personality function.
1166 fn trans_gnu_try
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1169 local_ptr
: ValueRef
,
1171 dloc
: DebugLoc
) -> Block
<'blk
, 'tcx
> {
1172 let llfn
= get_rust_try_fn(bcx
.fcx
, &mut |bcx
| {
1173 let ccx
= bcx
.ccx();
1174 let dloc
= DebugLoc
::None
;
1176 // Translates the shims described above:
1179 // invoke %func(%args...) normal %normal unwind %catch
1185 // (ptr, _) = landingpad
1186 // store ptr, %local_ptr
1189 // Note that the `local_ptr` data passed into the `try` intrinsic is
1190 // expected to be `*mut *mut u8` for this to actually work, but that's
1191 // managed by the standard library.
1193 attributes
::emit_uwtable(bcx
.fcx
.llfn
, true);
1194 let catch_pers
= match bcx
.tcx().lang_items
.eh_personality_catch() {
1195 Some(did
) => callee
::trans_fn_ref(ccx
, did
, ExprId(0),
1196 bcx
.fcx
.param_substs
).val
,
1197 None
=> bcx
.tcx().sess
.bug("eh_personality_catch not defined"),
1200 let then
= bcx
.fcx
.new_temp_block("then");
1201 let catch = bcx
.fcx
.new_temp_block("catch");
1203 let func
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1204 let data
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1205 let local_ptr
= llvm
::get_param(bcx
.fcx
.llfn
, 2);
1206 Invoke(bcx
, func
, &[data
], then
.llbb
, catch.llbb
, None
, dloc
);
1207 Ret(then
, C_i32(ccx
, 0), dloc
);
1209 // Type indicator for the exception being thrown.
1211 // The first value in this tuple is a pointer to the exception object
1212 // being thrown. The second value is a "selector" indicating which of
1213 // the landing pad clauses the exception's type had been matched to.
1214 // rust_try ignores the selector.
1215 let lpad_ty
= Type
::struct_(ccx
, &[Type
::i8p(ccx
), Type
::i32(ccx
)],
1217 let vals
= LandingPad(catch, lpad_ty
, catch_pers
, 1);
1218 AddClause(catch, vals
, C_null(Type
::i8p(ccx
)));
1219 let ptr
= ExtractValue(catch, vals
, 0);
1220 Store(catch, ptr
, BitCast(catch, local_ptr
, Type
::i8p(ccx
).ptr_to()));
1221 Ret(catch, C_i32(ccx
, 1), dloc
);
1224 // Note that no invoke is used here because by definition this function
1225 // can't panic (that's what it's catching).
1226 let ret
= Call(bcx
, llfn
, &[func
, data
, local_ptr
], None
, dloc
);
1227 Store(bcx
, ret
, dest
);
1231 // Helper function to give a Block to a closure to translate a shim function.
1232 // This is currently primarily used for the `try` intrinsic functions above.
1233 fn gen_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1236 output
: ty
::FnOutput
<'tcx
>,
1237 trans
: &mut for<'b
> FnMut(Block
<'b
, 'tcx
>))
1240 let llfn
= declare
::define_internal_rust_fn(ccx
, name
, ty
);
1241 let (fcx
, block_arena
);
1242 block_arena
= TypedArena
::new();
1243 fcx
= new_fn_ctxt(ccx
, llfn
, ast
::DUMMY_NODE_ID
, false,
1244 output
, ccx
.tcx().mk_substs(Substs
::trans_empty()),
1245 None
, &block_arena
);
1246 let bcx
= init_function(&fcx
, true, output
);
1252 // Helper function used to get a handle to the `__rust_try` function used to
1253 // catch exceptions.
1255 // This function is only generated once and is then cached.
1256 fn get_rust_try_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1257 trans
: &mut for<'b
> FnMut(Block
<'b
, 'tcx
>))
1260 if let Some(llfn
) = *ccx
.rust_try_fn().borrow() {
1264 // Define the type up front for the signature of the rust_try function.
1265 let tcx
= ccx
.tcx();
1266 let i8p
= tcx
.mk_mut_ptr(tcx
.types
.i8);
1267 let fn_ty
= tcx
.mk_bare_fn(ty
::BareFnTy
{
1268 unsafety
: hir
::Unsafety
::Unsafe
,
1270 sig
: ty
::Binder(ty
::FnSig
{
1272 output
: ty
::FnOutput
::FnConverging(tcx
.mk_nil()),
1276 let fn_ty
= tcx
.mk_fn(None
, fn_ty
);
1277 let output
= ty
::FnOutput
::FnConverging(tcx
.types
.i32);
1278 let try_fn_ty
= tcx
.mk_bare_fn(ty
::BareFnTy
{
1279 unsafety
: hir
::Unsafety
::Unsafe
,
1281 sig
: ty
::Binder(ty
::FnSig
{
1282 inputs
: vec
![fn_ty
, i8p
, i8p
],
1287 let rust_try
= gen_fn(fcx
, "__rust_try", tcx
.mk_fn(None
, try_fn_ty
), output
,
1289 *ccx
.rust_try_fn().borrow_mut() = Some(rust_try
);
1293 // For MSVC-style exceptions (SEH), the compiler generates a filter function
1294 // which is used to determine whether an exception is being caught (e.g. if it's
1295 // a Rust exception or some other).
1297 // This function is used to generate said filter function. The shim generated
1298 // here is actually just a thin wrapper to call the real implementation in the
1299 // standard library itself. For reasons as to why, see seh.rs in the standard
1301 fn generate_filter_fn
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1302 rust_try_fn
: ValueRef
)
1305 let tcx
= ccx
.tcx();
1306 let dloc
= DebugLoc
::None
;
1308 let rust_try_filter
= match ccx
.tcx().lang_items
.msvc_try_filter() {
1309 Some(did
) => callee
::trans_fn_ref(ccx
, did
, ExprId(0),
1310 fcx
.param_substs
).val
,
1311 None
=> ccx
.sess().bug("msvc_try_filter not defined"),
1314 let output
= ty
::FnOutput
::FnConverging(tcx
.types
.i32);
1315 let i8p
= tcx
.mk_mut_ptr(tcx
.types
.i8);
1317 let frameaddress
= ccx
.get_intrinsic(&"llvm.frameaddress");
1318 let recoverfp
= ccx
.get_intrinsic(&"llvm.x86.seh.recoverfp");
1319 let localrecover
= ccx
.get_intrinsic(&"llvm.localrecover");
1321 // On all platforms, once we have the EXCEPTION_POINTERS handle as well as
1322 // the base pointer, we follow the standard layout of:
1325 // %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
1326 // %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
1327 // %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
1330 // The recoverfp intrinsic is used to recover the frame frame pointer of the
1331 // `rust_try_fn` function, which is then in turn passed to the
1332 // `localrecover` intrinsic (pairing with the `localescape` intrinsic
1333 // mentioned above). Putting all this together means that we now have a
1334 // handle to the arguments passed into the `try` function, allowing writing
1335 // to the stack over there.
1337 // For more info, see seh.rs in the standard library.
1338 let do_trans
= |bcx
: Block
, ehptrs
, base_pointer
| {
1339 let rust_try_fn
= BitCast(bcx
, rust_try_fn
, Type
::i8p(ccx
));
1340 let parentfp
= Call(bcx
, recoverfp
, &[rust_try_fn
, base_pointer
],
1342 let arg
= Call(bcx
, localrecover
,
1343 &[rust_try_fn
, parentfp
, C_i32(ccx
, 0)], None
, dloc
);
1344 let ret
= Call(bcx
, rust_try_filter
, &[ehptrs
, arg
], None
, dloc
);
1345 Ret(bcx
, ret
, dloc
);
1348 if ccx
.tcx().sess
.target
.target
.arch
== "x86" {
1349 // On x86 the filter function doesn't actually receive any arguments.
1350 // Instead the %ebp register contains some contextual information.
1352 // Unfortunately I don't know of any great documentation as to what's
1353 // going on here, all I can say is that there's a few tests cases in
1354 // LLVM's test suite which follow this pattern of instructions, so we
1355 // just do the same.
1356 let filter_fn_ty
= tcx
.mk_bare_fn(ty
::BareFnTy
{
1357 unsafety
: hir
::Unsafety
::Unsafe
,
1359 sig
: ty
::Binder(ty
::FnSig
{
1365 let filter_fn_ty
= tcx
.mk_fn(None
, filter_fn_ty
);
1366 gen_fn(fcx
, "__rustc_try_filter", filter_fn_ty
, output
, &mut |bcx
| {
1367 let ebp
= Call(bcx
, frameaddress
, &[C_i32(ccx
, 1)], None
, dloc
);
1368 let exn
= InBoundsGEP(bcx
, ebp
, &[C_i32(ccx
, -20)]);
1369 let exn
= Load(bcx
, BitCast(bcx
, exn
, Type
::i8p(ccx
).ptr_to()));
1370 do_trans(bcx
, exn
, ebp
);
1372 } else if ccx
.tcx().sess
.target
.target
.arch
== "x86_64" {
1373 // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
1374 // are passed in as arguments to the filter function, so we just pass
1376 let filter_fn_ty
= tcx
.mk_bare_fn(ty
::BareFnTy
{
1377 unsafety
: hir
::Unsafety
::Unsafe
,
1379 sig
: ty
::Binder(ty
::FnSig
{
1380 inputs
: vec
![i8p
, i8p
],
1385 let filter_fn_ty
= tcx
.mk_fn(None
, filter_fn_ty
);
1386 gen_fn(fcx
, "__rustc_try_filter", filter_fn_ty
, output
, &mut |bcx
| {
1387 let exn
= llvm
::get_param(bcx
.fcx
.llfn
, 0);
1388 let rbp
= llvm
::get_param(bcx
.fcx
.llfn
, 1);
1389 do_trans(bcx
, exn
, rbp
);
1392 panic
!("unknown target to generate a filter function")
1396 fn span_invalid_monomorphization_error(a
: &Session
, b
: Span
, c
: &str) {
1397 span_err
!(a
, b
, E0511
, "{}", c
);
1400 fn generic_simd_intrinsic
<'blk
, 'tcx
, 'a
>
1401 (bcx
: Block
<'blk
, 'tcx
>,
1403 substs
: subst
::Substs
<'tcx
>,
1404 callee_ty
: Ty
<'tcx
>,
1405 args
: Option
<&[P
<hir
::Expr
>]>,
1406 llargs
: &[ValueRef
],
1409 call_debug_location
: DebugLoc
,
1410 call_info
: NodeIdAndSpan
) -> ValueRef
1412 // macros for error handling:
1413 macro_rules
! emit_error
{
1417 ($msg
: tt
, $
($fmt
: tt
)*) => {
1418 span_invalid_monomorphization_error(
1419 bcx
.sess(), call_info
.span
,
1420 &format
!(concat
!("invalid monomorphization of `{}` intrinsic: ",
1425 macro_rules
! require
{
1426 ($cond
: expr
, $
($fmt
: tt
)*) => {
1428 emit_error
!($
($fmt
)*);
1429 return C_null(llret_ty
)
1433 macro_rules
! require_simd
{
1434 ($ty
: expr
, $position
: expr
) => {
1435 require
!($ty
.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position
, $ty
)
1441 let tcx
= bcx
.tcx();
1442 let sig
= tcx
.erase_late_bound_regions(callee_ty
.fn_sig());
1443 let sig
= infer
::normalize_associated_type(tcx
, &sig
);
1444 let arg_tys
= sig
.inputs
;
1446 // every intrinsic takes a SIMD vector as its first argument
1447 require_simd
!(arg_tys
[0], "input");
1448 let in_ty
= arg_tys
[0];
1449 let in_elem
= arg_tys
[0].simd_type(tcx
);
1450 let in_len
= arg_tys
[0].simd_size(tcx
);
1452 let comparison
= match name
{
1453 "simd_eq" => Some(hir
::BiEq
),
1454 "simd_ne" => Some(hir
::BiNe
),
1455 "simd_lt" => Some(hir
::BiLt
),
1456 "simd_le" => Some(hir
::BiLe
),
1457 "simd_gt" => Some(hir
::BiGt
),
1458 "simd_ge" => Some(hir
::BiGe
),
1462 if let Some(cmp_op
) = comparison
{
1463 require_simd
!(ret_ty
, "return");
1465 let out_len
= ret_ty
.simd_size(tcx
);
1466 require
!(in_len
== out_len
,
1467 "expected return type with length {} (same as input type `{}`), \
1468 found `{}` with length {}",
1471 require
!(llret_ty
.element_type().kind() == llvm
::Integer
,
1472 "expected return type with integer elements, found `{}` with non-integer `{}`",
1474 ret_ty
.simd_type(tcx
));
1476 return compare_simd_types(bcx
,
1482 call_debug_location
)
1485 if name
.starts_with("simd_shuffle") {
1486 let n
: usize = match name
["simd_shuffle".len()..].parse() {
1488 Err(_
) => tcx
.sess
.span_bug(call_info
.span
,
1489 "bad `simd_shuffle` instruction only caught in trans?")
1492 require_simd
!(ret_ty
, "return");
1494 let out_len
= ret_ty
.simd_size(tcx
);
1495 require
!(out_len
== n
,
1496 "expected return type of length {}, found `{}` with length {}",
1497 n
, ret_ty
, out_len
);
1498 require
!(in_elem
== ret_ty
.simd_type(tcx
),
1499 "expected return element type `{}` (element of input `{}`), \
1500 found `{}` with element type `{}`",
1502 ret_ty
, ret_ty
.simd_type(tcx
));
1504 let total_len
= in_len
as u64 * 2;
1506 let vector
= match args
{
1507 Some(args
) => &args
[2],
1508 None
=> bcx
.sess().span_bug(call_info
.span
,
1509 "intrinsic call with unexpected argument shape"),
1511 let vector
= match consts
::const_expr(
1514 tcx
.mk_substs(substs
),
1516 consts
::TrueConst
::Yes
, // this should probably help simd error reporting
1518 Ok((vector
, _
)) => vector
,
1519 Err(err
) => bcx
.sess().span_fatal(call_info
.span
, &err
.description()),
1522 let indices
: Option
<Vec
<_
>> = (0..n
)
1525 let val
= const_get_elt(bcx
.ccx(), vector
, &[i
as libc
::c_uint
]);
1526 let c
= const_to_opt_uint(val
);
1529 emit_error
!("shuffle index #{} is not a constant", arg_idx
);
1532 Some(idx
) if idx
>= total_len
=> {
1533 emit_error
!("shuffle index #{} is out of bounds (limit {})",
1534 arg_idx
, total_len
);
1537 Some(idx
) => Some(C_i32(bcx
.ccx(), idx
as i32)),
1541 let indices
= match indices
{
1543 None
=> return C_null(llret_ty
)
1546 return ShuffleVector(bcx
, llargs
[0], llargs
[1], C_vector(&indices
))
1549 if name
== "simd_insert" {
1550 require
!(in_elem
== arg_tys
[2],
1551 "expected inserted type `{}` (element of input `{}`), found `{}`",
1552 in_elem
, in_ty
, arg_tys
[2]);
1553 return InsertElement(bcx
, llargs
[0], llargs
[2], llargs
[1])
1555 if name
== "simd_extract" {
1556 require
!(ret_ty
== in_elem
,
1557 "expected return type `{}` (element of input `{}`), found `{}`",
1558 in_elem
, in_ty
, ret_ty
);
1559 return ExtractElement(bcx
, llargs
[0], llargs
[1])
1562 if name
== "simd_cast" {
1563 require_simd
!(ret_ty
, "return");
1564 let out_len
= ret_ty
.simd_size(tcx
);
1565 require
!(in_len
== out_len
,
1566 "expected return type with length {} (same as input type `{}`), \
1567 found `{}` with length {}",
1570 // casting cares about nominal type, not just structural type
1571 let out_elem
= ret_ty
.simd_type(tcx
);
1573 if in_elem
== out_elem { return llargs[0]; }
1575 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1577 let (in_style
, in_width
) = match in_elem
.sty
{
1578 // vectors of pointer-sized integers should've been
1579 // disallowed before here, so this unwrap is safe.
1580 ty
::TyInt(i
) => (Style
::Int(true), i
.bit_width().unwrap()),
1581 ty
::TyUint(u
) => (Style
::Int(false), u
.bit_width().unwrap()),
1582 ty
::TyFloat(f
) => (Style
::Float
, f
.bit_width()),
1583 _
=> (Style
::Unsupported
, 0)
1585 let (out_style
, out_width
) = match out_elem
.sty
{
1586 ty
::TyInt(i
) => (Style
::Int(true), i
.bit_width().unwrap()),
1587 ty
::TyUint(u
) => (Style
::Int(false), u
.bit_width().unwrap()),
1588 ty
::TyFloat(f
) => (Style
::Float
, f
.bit_width()),
1589 _
=> (Style
::Unsupported
, 0)
1592 match (in_style
, out_style
) {
1593 (Style
::Int(in_is_signed
), Style
::Int(_
)) => {
1594 return match in_width
.cmp(&out_width
) {
1595 Ordering
::Greater
=> Trunc(bcx
, llargs
[0], llret_ty
),
1596 Ordering
::Equal
=> llargs
[0],
1597 Ordering
::Less
=> if in_is_signed
{
1598 SExt(bcx
, llargs
[0], llret_ty
)
1600 ZExt(bcx
, llargs
[0], llret_ty
)
1604 (Style
::Int(in_is_signed
), Style
::Float
) => {
1605 return if in_is_signed
{
1606 SIToFP(bcx
, llargs
[0], llret_ty
)
1608 UIToFP(bcx
, llargs
[0], llret_ty
)
1611 (Style
::Float
, Style
::Int(out_is_signed
)) => {
1612 return if out_is_signed
{
1613 FPToSI(bcx
, llargs
[0], llret_ty
)
1615 FPToUI(bcx
, llargs
[0], llret_ty
)
1618 (Style
::Float
, Style
::Float
) => {
1619 return match in_width
.cmp(&out_width
) {
1620 Ordering
::Greater
=> FPTrunc(bcx
, llargs
[0], llret_ty
),
1621 Ordering
::Equal
=> llargs
[0],
1622 Ordering
::Less
=> FPExt(bcx
, llargs
[0], llret_ty
)
1625 _
=> {/* Unsupported. Fallthrough. */}
1628 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1632 macro_rules
! arith
{
1633 ($
($name
: ident
: $
($
($p
: ident
),* => $call
: expr
),*;)*) => {
1635 if name
== stringify
!($name
) {
1639 return $
call(bcx
, llargs
[0], llargs
[1], call_debug_location
)
1645 "unsupported operation on `{}` with element `{}`",
1652 simd_add
: TyUint
, TyInt
=> Add
, TyFloat
=> FAdd
;
1653 simd_sub
: TyUint
, TyInt
=> Sub
, TyFloat
=> FSub
;
1654 simd_mul
: TyUint
, TyInt
=> Mul
, TyFloat
=> FMul
;
1655 simd_div
: TyFloat
=> FDiv
;
1656 simd_shl
: TyUint
, TyInt
=> Shl
;
1657 simd_shr
: TyUint
=> LShr
, TyInt
=> AShr
;
1658 simd_and
: TyUint
, TyInt
=> And
;
1659 simd_or
: TyUint
, TyInt
=> Or
;
1660 simd_xor
: TyUint
, TyInt
=> Xor
;
1662 bcx
.sess().span_bug(call_info
.span
, "unknown SIMD intrinsic");
1665 // Returns the width of an int TypeVariant, and if it's signed or not
1666 // Returns None if the type is not an integer
1667 fn int_type_width_signed
<'tcx
>(sty
: &ty
::TypeVariants
<'tcx
>, ccx
: &CrateContext
)
1668 -> Option
<(u64, bool
)> {
1669 use rustc
::middle
::ty
::{TyInt, TyUint}
;
1671 TyInt(t
) => Some((match t
{
1673 match &ccx
.tcx().sess
.target
.target
.target_pointer_width
[..] {
1676 tws
=> panic
!("Unsupported target word size for isize: {}", tws
),
1679 ast
::IntTy
::I8
=> 8,
1680 ast
::IntTy
::I16
=> 16,
1681 ast
::IntTy
::I32
=> 32,
1682 ast
::IntTy
::I64
=> 64,
1684 TyUint(t
) => Some((match t
{
1685 ast
::UintTy
::Us
=> {
1686 match &ccx
.tcx().sess
.target
.target
.target_pointer_width
[..] {
1689 tws
=> panic
!("Unsupported target word size for usize: {}", tws
),
1692 ast
::UintTy
::U8
=> 8,
1693 ast
::UintTy
::U16
=> 16,
1694 ast
::UintTy
::U32
=> 32,
1695 ast
::UintTy
::U64
=> 64,