]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/intrinsic.rs
Imported Upstream version 1.9.0+dfsg1
[rustc.git] / src / librustc_trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
15 use libc;
16 use llvm;
17 use llvm::{ValueRef, TypeKind};
18 use rustc::infer;
19 use rustc::ty::subst;
20 use rustc::ty::subst::FnSpace;
21 use abi::{Abi, FnType};
22 use adt;
23 use attributes;
24 use base::*;
25 use build::*;
26 use callee::{self, Callee};
27 use cleanup;
28 use cleanup::CleanupMethods;
29 use common::*;
30 use consts;
31 use datum::*;
32 use debuginfo::DebugLoc;
33 use declare;
34 use expr;
35 use glue;
36 use type_of;
37 use machine;
38 use type_::Type;
39 use rustc::ty::{self, Ty};
40 use Disr;
41 use rustc::ty::subst::Substs;
42 use rustc::hir;
43 use syntax::ast;
44 use syntax::ptr::P;
45 use syntax::parse::token;
46
47 use rustc::session::Session;
48 use syntax::codemap::{Span, DUMMY_SP};
49
50 use std::cmp::Ordering;
51
52 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
53 let llvm_name = match name {
54 "sqrtf32" => "llvm.sqrt.f32",
55 "sqrtf64" => "llvm.sqrt.f64",
56 "powif32" => "llvm.powi.f32",
57 "powif64" => "llvm.powi.f64",
58 "sinf32" => "llvm.sin.f32",
59 "sinf64" => "llvm.sin.f64",
60 "cosf32" => "llvm.cos.f32",
61 "cosf64" => "llvm.cos.f64",
62 "powf32" => "llvm.pow.f32",
63 "powf64" => "llvm.pow.f64",
64 "expf32" => "llvm.exp.f32",
65 "expf64" => "llvm.exp.f64",
66 "exp2f32" => "llvm.exp2.f32",
67 "exp2f64" => "llvm.exp2.f64",
68 "logf32" => "llvm.log.f32",
69 "logf64" => "llvm.log.f64",
70 "log10f32" => "llvm.log10.f32",
71 "log10f64" => "llvm.log10.f64",
72 "log2f32" => "llvm.log2.f32",
73 "log2f64" => "llvm.log2.f64",
74 "fmaf32" => "llvm.fma.f32",
75 "fmaf64" => "llvm.fma.f64",
76 "fabsf32" => "llvm.fabs.f32",
77 "fabsf64" => "llvm.fabs.f64",
78 "copysignf32" => "llvm.copysign.f32",
79 "copysignf64" => "llvm.copysign.f64",
80 "floorf32" => "llvm.floor.f32",
81 "floorf64" => "llvm.floor.f64",
82 "ceilf32" => "llvm.ceil.f32",
83 "ceilf64" => "llvm.ceil.f64",
84 "truncf32" => "llvm.trunc.f32",
85 "truncf64" => "llvm.trunc.f64",
86 "rintf32" => "llvm.rint.f32",
87 "rintf64" => "llvm.rint.f64",
88 "nearbyintf32" => "llvm.nearbyint.f32",
89 "nearbyintf64" => "llvm.nearbyint.f64",
90 "roundf32" => "llvm.round.f32",
91 "roundf64" => "llvm.round.f64",
92 "assume" => "llvm.assume",
93 _ => return None
94 };
95 Some(ccx.get_intrinsic(&llvm_name))
96 }
97
98 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
99 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
100 /// add them to librustc_trans/trans/context.rs
101 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
102 callee_ty: Ty<'tcx>,
103 fn_ty: &FnType,
104 args: callee::CallArgs<'a, 'tcx>,
105 dest: expr::Dest,
106 call_debug_location: DebugLoc)
107 -> Result<'blk, 'tcx> {
108 let fcx = bcx.fcx;
109 let ccx = fcx.ccx;
110 let tcx = bcx.tcx();
111
112 let _icx = push_ctxt("trans_intrinsic_call");
113
114 let (def_id, substs, sig) = match callee_ty.sty {
115 ty::TyFnDef(def_id, substs, fty) => {
116 let sig = tcx.erase_late_bound_regions(&fty.sig);
117 (def_id, substs, infer::normalize_associated_type(tcx, &sig))
118 }
119 _ => bug!("expected fn item type, found {}", callee_ty)
120 };
121 let arg_tys = sig.inputs;
122 let ret_ty = sig.output;
123 let name = tcx.item_name(def_id).as_str();
124
125 let span = match call_debug_location {
126 DebugLoc::At(_, span) => span,
127 DebugLoc::None => fcx.span.unwrap_or(DUMMY_SP)
128 };
129
130 let cleanup_scope = fcx.push_custom_cleanup_scope();
131
132 // For `transmute` we can just trans the input expr directly into dest
133 if name == "transmute" {
134 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
135 match args {
136 callee::ArgExprs(arg_exprs) => {
137 assert_eq!(arg_exprs.len(), 1);
138
139 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
140 *substs.types.get(FnSpace, 1));
141 let llintype = type_of::type_of(ccx, in_type);
142 let llouttype = type_of::type_of(ccx, out_type);
143
144 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
145 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
146
147 if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
148 if out_type_size != 0 {
149 // FIXME #19925 Remove this hack after a release cycle.
150 let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
151 let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
152 let llfnty = val_ty(llfn);
153 let llresult = match dest {
154 expr::SaveIn(d) => d,
155 expr::Ignore => alloc_ty(bcx, out_type, "ret")
156 };
157 Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
158 if dest == expr::Ignore {
159 bcx = glue::drop_ty(bcx, llresult, out_type,
160 call_debug_location);
161 }
162 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
163 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
164 return Result::new(bcx, llresult);
165 }
166 }
167
168 // This should be caught by the intrinsicck pass
169 assert_eq!(in_type_size, out_type_size);
170
171 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
172 use llvm::TypeKind::*;
173 match llkind {
174 Half | Float | Double | X86_FP80 | FP128 |
175 PPC_FP128 | Integer | Vector | X86_MMX => true,
176 _ => false
177 }
178 };
179
180 // An approximation to which types can be directly cast via
181 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
182 // but does, importantly, cover SIMD types.
183 let in_kind = llintype.kind();
184 let ret_kind = llret_ty.kind();
185 let bitcast_compatible =
186 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
187 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
188 };
189
190 let dest = if bitcast_compatible {
191 // if we're here, the type is scalar-like (a primitive, a
192 // SIMD type or a pointer), and so can be handled as a
193 // by-value ValueRef and can also be directly bitcast to the
194 // target type. Doing this special case makes conversions
195 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
196 // efficient (these are done efficiently implicitly in C
197 // with the `__m128i` type and so this means Rust doesn't
198 // lose out there).
199 let expr = &arg_exprs[0];
200 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
201 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
202 let val = if datum.kind.is_by_ref() {
203 load_ty(bcx, datum.val, datum.ty)
204 } else {
205 from_immediate(bcx, datum.val)
206 };
207
208 let cast_val = BitCast(bcx, val, llret_ty);
209
210 match dest {
211 expr::SaveIn(d) => {
212 // this often occurs in a sequence like `Store(val,
213 // d); val2 = Load(d)`, so disappears easily.
214 Store(bcx, cast_val, d);
215 }
216 expr::Ignore => {}
217 }
218 dest
219 } else {
220 // The types are too complicated to do with a by-value
221 // bitcast, so pointer cast instead. We need to cast the
222 // dest so the types work out.
223 let dest = match dest {
224 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
225 expr::Ignore => expr::Ignore
226 };
227 bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
228 dest
229 };
230
231 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
232 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
233
234 return match dest {
235 expr::SaveIn(d) => Result::new(bcx, d),
236 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
237 };
238
239 }
240
241 _ => {
242 bug!("expected expr as argument for transmute");
243 }
244 }
245 }
246
247 // For `move_val_init` we can evaluate the destination address
248 // (the first argument) and then trans the source value (the
249 // second argument) directly into the resulting destination
250 // address.
251 if name == "move_val_init" {
252 if let callee::ArgExprs(ref exprs) = args {
253 let (dest_expr, source_expr) = if exprs.len() != 2 {
254 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
255 } else {
256 (&exprs[0], &exprs[1])
257 };
258
259 // evaluate destination address
260 let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
261 let dest_datum = unpack_datum!(
262 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
263 let dest_datum = unpack_datum!(
264 bcx, dest_datum.to_appropriate_datum(bcx));
265
266 // `expr::trans_into(bcx, expr, dest)` is equiv to
267 //
268 // `trans(bcx, expr).store_to_dest(dest)`,
269 //
270 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
271 //
272 // `trans(bcx, expr).store_to(bcx, addr)`.
273 let lldest = expr::Dest::SaveIn(dest_datum.val);
274 bcx = expr::trans_into(bcx, source_expr, lldest);
275
276 let llresult = C_nil(ccx);
277 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
278
279 return Result::new(bcx, llresult);
280 } else {
281 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
282 }
283 }
284
285 // save the actual AST arguments for later (some places need to do
286 // const-evaluation on them)
287 let expr_arguments = match args {
288 callee::ArgExprs(args) => Some(args),
289 _ => None,
290 };
291
292 // Push the arguments.
293 let mut llargs = Vec::new();
294 bcx = callee::trans_args(bcx,
295 Abi::RustIntrinsic,
296 fn_ty,
297 &mut callee::Intrinsic,
298 args,
299 &mut llargs,
300 cleanup::CustomScope(cleanup_scope));
301
302 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
303
304 // These are the only intrinsic functions that diverge.
305 if name == "abort" {
306 let llfn = ccx.get_intrinsic(&("llvm.trap"));
307 Call(bcx, llfn, &[], call_debug_location);
308 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
309 Unreachable(bcx);
310 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
311 } else if &name[..] == "unreachable" {
312 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
313 Unreachable(bcx);
314 return Result::new(bcx, C_nil(ccx));
315 }
316
317 let ret_ty = match ret_ty {
318 ty::FnConverging(ret_ty) => ret_ty,
319 ty::FnDiverging => bug!()
320 };
321
322 let llret_ty = type_of::type_of(ccx, ret_ty);
323
324 // Get location to store the result. If the user does
325 // not care about the result, just make a stack slot
326 let llresult = match dest {
327 expr::SaveIn(d) => d,
328 expr::Ignore => {
329 if !type_is_zero_size(ccx, ret_ty) {
330 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
331 call_lifetime_start(bcx, llresult);
332 llresult
333 } else {
334 C_undef(llret_ty.ptr_to())
335 }
336 }
337 };
338
339 let simple = get_simple_intrinsic(ccx, &name);
340 let llval = match (simple, &name[..]) {
341 (Some(llfn), _) => {
342 Call(bcx, llfn, &llargs, call_debug_location)
343 }
344 (_, "try") => {
345 bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
346 call_debug_location);
347 C_nil(ccx)
348 }
349 (_, "breakpoint") => {
350 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
351 Call(bcx, llfn, &[], call_debug_location)
352 }
353 (_, "size_of") => {
354 let tp_ty = *substs.types.get(FnSpace, 0);
355 let lltp_ty = type_of::type_of(ccx, tp_ty);
356 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
357 }
358 (_, "size_of_val") => {
359 let tp_ty = *substs.types.get(FnSpace, 0);
360 if !type_is_sized(tcx, tp_ty) {
361 let (llsize, _) =
362 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
363 llsize
364 } else {
365 let lltp_ty = type_of::type_of(ccx, tp_ty);
366 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
367 }
368 }
369 (_, "min_align_of") => {
370 let tp_ty = *substs.types.get(FnSpace, 0);
371 C_uint(ccx, type_of::align_of(ccx, tp_ty))
372 }
373 (_, "min_align_of_val") => {
374 let tp_ty = *substs.types.get(FnSpace, 0);
375 if !type_is_sized(tcx, tp_ty) {
376 let (_, llalign) =
377 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
378 llalign
379 } else {
380 C_uint(ccx, type_of::align_of(ccx, tp_ty))
381 }
382 }
383 (_, "pref_align_of") => {
384 let tp_ty = *substs.types.get(FnSpace, 0);
385 let lltp_ty = type_of::type_of(ccx, tp_ty);
386 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
387 }
388 (_, "drop_in_place") => {
389 let tp_ty = *substs.types.get(FnSpace, 0);
390 let ptr = if type_is_sized(tcx, tp_ty) {
391 llargs[0]
392 } else {
393 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
394 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
395 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
396 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
397 scratch.val
398 };
399 glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
400 C_nil(ccx)
401 }
402 (_, "type_name") => {
403 let tp_ty = *substs.types.get(FnSpace, 0);
404 let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
405 C_str_slice(ccx, ty_name)
406 }
407 (_, "type_id") => {
408 let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
409 &ccx.link_meta().crate_hash);
410 C_u64(ccx, hash)
411 }
412 (_, "init_dropped") => {
413 let tp_ty = *substs.types.get(FnSpace, 0);
414 if !type_is_zero_size(ccx, tp_ty) {
415 drop_done_fill_mem(bcx, llresult, tp_ty);
416 }
417 C_nil(ccx)
418 }
419 (_, "init") => {
420 let tp_ty = *substs.types.get(FnSpace, 0);
421 if !type_is_zero_size(ccx, tp_ty) {
422 // Just zero out the stack slot. (See comment on base::memzero for explanation)
423 init_zero_mem(bcx, llresult, tp_ty);
424 }
425 C_nil(ccx)
426 }
427 // Effectively no-ops
428 (_, "uninit") | (_, "forget") => {
429 C_nil(ccx)
430 }
431 (_, "needs_drop") => {
432 let tp_ty = *substs.types.get(FnSpace, 0);
433
434 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
435 }
436 (_, "offset") => {
437 let ptr = llargs[0];
438 let offset = llargs[1];
439 InBoundsGEP(bcx, ptr, &[offset])
440 }
441 (_, "arith_offset") => {
442 let ptr = llargs[0];
443 let offset = llargs[1];
444 GEP(bcx, ptr, &[offset])
445 }
446
447 (_, "copy_nonoverlapping") => {
448 copy_intrinsic(bcx,
449 false,
450 false,
451 *substs.types.get(FnSpace, 0),
452 llargs[1],
453 llargs[0],
454 llargs[2],
455 call_debug_location)
456 }
457 (_, "copy") => {
458 copy_intrinsic(bcx,
459 true,
460 false,
461 *substs.types.get(FnSpace, 0),
462 llargs[1],
463 llargs[0],
464 llargs[2],
465 call_debug_location)
466 }
467 (_, "write_bytes") => {
468 memset_intrinsic(bcx,
469 false,
470 *substs.types.get(FnSpace, 0),
471 llargs[0],
472 llargs[1],
473 llargs[2],
474 call_debug_location)
475 }
476
477 (_, "volatile_copy_nonoverlapping_memory") => {
478 copy_intrinsic(bcx,
479 false,
480 true,
481 *substs.types.get(FnSpace, 0),
482 llargs[0],
483 llargs[1],
484 llargs[2],
485 call_debug_location)
486 }
487 (_, "volatile_copy_memory") => {
488 copy_intrinsic(bcx,
489 true,
490 true,
491 *substs.types.get(FnSpace, 0),
492 llargs[0],
493 llargs[1],
494 llargs[2],
495 call_debug_location)
496 }
497 (_, "volatile_set_memory") => {
498 memset_intrinsic(bcx,
499 true,
500 *substs.types.get(FnSpace, 0),
501 llargs[0],
502 llargs[1],
503 llargs[2],
504 call_debug_location)
505 }
506 (_, "volatile_load") => {
507 let tp_ty = *substs.types.get(FnSpace, 0);
508 let mut ptr = llargs[0];
509 if let Some(ty) = fn_ty.ret.cast {
510 ptr = PointerCast(bcx, ptr, ty.ptr_to());
511 }
512 let load = VolatileLoad(bcx, ptr);
513 unsafe {
514 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
515 }
516 to_immediate(bcx, load, tp_ty)
517 },
518 (_, "volatile_store") => {
519 let tp_ty = *substs.types.get(FnSpace, 0);
520 if type_is_fat_ptr(bcx.tcx(), tp_ty) {
521 VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
522 VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
523 } else {
524 let val = if fn_ty.args[1].is_indirect() {
525 Load(bcx, llargs[1])
526 } else {
527 from_immediate(bcx, llargs[1])
528 };
529 let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
530 let store = VolatileStore(bcx, val, ptr);
531 unsafe {
532 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
533 }
534 }
535 C_nil(ccx)
536 },
537
538 (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
539 (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
540 (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
541 (_, "unchecked_div") | (_, "unchecked_rem") => {
542 let sty = &arg_tys[0].sty;
543 match int_type_width_signed(sty, ccx) {
544 Some((width, signed)) =>
545 match &*name {
546 "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
547 llargs[0], call_debug_location),
548 "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
549 llargs[0], call_debug_location),
550 "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
551 &llargs, call_debug_location),
552 "bswap" => {
553 if width == 8 {
554 llargs[0] // byte swap a u8/i8 is just a no-op
555 } else {
556 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
557 &llargs, call_debug_location)
558 }
559 }
560 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
561 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
562 if signed { 's' } else { 'u' },
563 &name[..3], width);
564 with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
565 call_debug_location)
566 },
567 "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
568 "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
569 "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
570 "unchecked_div" =>
571 if signed {
572 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
573 } else {
574 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
575 },
576 "unchecked_rem" =>
577 if signed {
578 SRem(bcx, llargs[0], llargs[1], call_debug_location)
579 } else {
580 URem(bcx, llargs[0], llargs[1], call_debug_location)
581 },
582 _ => bug!(),
583 },
584 None => {
585 span_invalid_monomorphization_error(
586 tcx.sess, span,
587 &format!("invalid monomorphization of `{}` intrinsic: \
588 expected basic integer type, found `{}`", name, sty));
589 C_nil(ccx)
590 }
591 }
592
593 },
594 (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
595 (_, "frem_fast") => {
596 let sty = &arg_tys[0].sty;
597 match float_type_width(sty) {
598 Some(_width) =>
599 match &*name {
600 "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
601 "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
602 "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
603 "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
604 "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
605 _ => bug!(),
606 },
607 None => {
608 span_invalid_monomorphization_error(
609 tcx.sess, span,
610 &format!("invalid monomorphization of `{}` intrinsic: \
611 expected basic float type, found `{}`", name, sty));
612 C_nil(ccx)
613 }
614 }
615
616 },
617
618
619 (_, "return_address") => {
620 if !fcx.fn_ty.ret.is_indirect() {
621 span_err!(tcx.sess, span, E0510,
622 "invalid use of `return_address` intrinsic: function \
623 does not use out pointer");
624 C_null(Type::i8p(ccx))
625 } else {
626 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
627 }
628 }
629
630 (_, "discriminant_value") => {
631 let val_ty = substs.types.get(FnSpace, 0);
632 match val_ty.sty {
633 ty::TyEnum(..) => {
634 let repr = adt::represent_type(ccx, *val_ty);
635 adt::trans_get_discr(bcx, &repr, llargs[0],
636 Some(llret_ty), true)
637 }
638 _ => C_null(llret_ty)
639 }
640 }
641 (_, name) if name.starts_with("simd_") => {
642 generic_simd_intrinsic(bcx, name,
643 substs,
644 callee_ty,
645 expr_arguments,
646 &llargs,
647 ret_ty, llret_ty,
648 call_debug_location,
649 span)
650 }
651 // This requires that atomic intrinsics follow a specific naming pattern:
652 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
653 (_, name) if name.starts_with("atomic_") => {
654 let split: Vec<&str> = name.split('_').collect();
655
656 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
657 let (order, failorder) = match split.len() {
658 2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
659 3 => match split[2] {
660 "unordered" => (llvm::Unordered, llvm::Unordered),
661 "relaxed" => (llvm::Monotonic, llvm::Monotonic),
662 "acq" => (llvm::Acquire, llvm::Acquire),
663 "rel" => (llvm::Release, llvm::Monotonic),
664 "acqrel" => (llvm::AcquireRelease, llvm::Acquire),
665 "failrelaxed" if is_cxchg =>
666 (llvm::SequentiallyConsistent, llvm::Monotonic),
667 "failacq" if is_cxchg =>
668 (llvm::SequentiallyConsistent, llvm::Acquire),
669 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
670 },
671 4 => match (split[2], split[3]) {
672 ("acq", "failrelaxed") if is_cxchg =>
673 (llvm::Acquire, llvm::Monotonic),
674 ("acqrel", "failrelaxed") if is_cxchg =>
675 (llvm::AcquireRelease, llvm::Monotonic),
676 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
677 },
678 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
679 };
680
681 match split[1] {
682 "cxchg" | "cxchgweak" => {
683 let sty = &substs.types.get(FnSpace, 0).sty;
684 if int_type_width_signed(sty, ccx).is_some() {
685 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
686 let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
687 order, failorder, weak);
688 let result = ExtractValue(bcx, val, 0);
689 let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
690 Store(bcx, result, StructGEP(bcx, llresult, 0));
691 Store(bcx, success, StructGEP(bcx, llresult, 1));
692 } else {
693 span_invalid_monomorphization_error(
694 tcx.sess, span,
695 &format!("invalid monomorphization of `{}` intrinsic: \
696 expected basic integer type, found `{}`", name, sty));
697 }
698 C_nil(ccx)
699 }
700
701 "load" => {
702 let sty = &substs.types.get(FnSpace, 0).sty;
703 if int_type_width_signed(sty, ccx).is_some() {
704 AtomicLoad(bcx, llargs[0], order)
705 } else {
706 span_invalid_monomorphization_error(
707 tcx.sess, span,
708 &format!("invalid monomorphization of `{}` intrinsic: \
709 expected basic integer type, found `{}`", name, sty));
710 C_nil(ccx)
711 }
712 }
713
714 "store" => {
715 let sty = &substs.types.get(FnSpace, 0).sty;
716 if int_type_width_signed(sty, ccx).is_some() {
717 AtomicStore(bcx, llargs[1], llargs[0], order);
718 } else {
719 span_invalid_monomorphization_error(
720 tcx.sess, span,
721 &format!("invalid monomorphization of `{}` intrinsic: \
722 expected basic integer type, found `{}`", name, sty));
723 }
724 C_nil(ccx)
725 }
726
727 "fence" => {
728 AtomicFence(bcx, order, llvm::CrossThread);
729 C_nil(ccx)
730 }
731
732 "singlethreadfence" => {
733 AtomicFence(bcx, order, llvm::SingleThread);
734 C_nil(ccx)
735 }
736
737 // These are all AtomicRMW ops
738 op => {
739 let atom_op = match op {
740 "xchg" => llvm::AtomicXchg,
741 "xadd" => llvm::AtomicAdd,
742 "xsub" => llvm::AtomicSub,
743 "and" => llvm::AtomicAnd,
744 "nand" => llvm::AtomicNand,
745 "or" => llvm::AtomicOr,
746 "xor" => llvm::AtomicXor,
747 "max" => llvm::AtomicMax,
748 "min" => llvm::AtomicMin,
749 "umax" => llvm::AtomicUMax,
750 "umin" => llvm::AtomicUMin,
751 _ => ccx.sess().fatal("unknown atomic operation")
752 };
753
754 let sty = &substs.types.get(FnSpace, 0).sty;
755 if int_type_width_signed(sty, ccx).is_some() {
756 AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
757 } else {
758 span_invalid_monomorphization_error(
759 tcx.sess, span,
760 &format!("invalid monomorphization of `{}` intrinsic: \
761 expected basic integer type, found `{}`", name, sty));
762 C_nil(ccx)
763 }
764 }
765 }
766
767 }
768
769 (_, _) => {
770 let intr = match Intrinsic::find(&name) {
771 Some(intr) => intr,
772 None => bug!("unknown intrinsic '{}'", name),
773 };
774 fn one<T>(x: Vec<T>) -> T {
775 assert_eq!(x.len(), 1);
776 x.into_iter().next().unwrap()
777 }
778 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
779 any_changes_needed: &mut bool) -> Vec<Type> {
780 use intrinsics::Type::*;
781 match *t {
782 Void => vec![Type::void(ccx)],
783 Integer(_signed, width, llvm_width) => {
784 *any_changes_needed |= width != llvm_width;
785 vec![Type::ix(ccx, llvm_width as u64)]
786 }
787 Float(x) => {
788 match x {
789 32 => vec![Type::f32(ccx)],
790 64 => vec![Type::f64(ccx)],
791 _ => bug!()
792 }
793 }
794 Pointer(ref t, ref llvm_elem, _const) => {
795 *any_changes_needed |= llvm_elem.is_some();
796
797 let t = llvm_elem.as_ref().unwrap_or(t);
798 let elem = one(ty_to_type(ccx, t,
799 any_changes_needed));
800 vec![elem.ptr_to()]
801 }
802 Vector(ref t, ref llvm_elem, length) => {
803 *any_changes_needed |= llvm_elem.is_some();
804
805 let t = llvm_elem.as_ref().unwrap_or(t);
806 let elem = one(ty_to_type(ccx, t,
807 any_changes_needed));
808 vec![Type::vector(&elem,
809 length as u64)]
810 }
811 Aggregate(false, ref contents) => {
812 let elems = contents.iter()
813 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
814 .collect::<Vec<_>>();
815 vec![Type::struct_(ccx, &elems, false)]
816 }
817 Aggregate(true, ref contents) => {
818 *any_changes_needed = true;
819 contents.iter()
820 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
821 .collect()
822 }
823 }
824 }
825
826 // This allows an argument list like `foo, (bar, baz),
827 // qux` to be converted into `foo, bar, baz, qux`, integer
828 // arguments to be truncated as needed and pointers to be
829 // cast.
830 fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
831 t: &intrinsics::Type,
832 arg_type: Ty<'tcx>,
833 llarg: ValueRef)
834 -> Vec<ValueRef>
835 {
836 match *t {
837 intrinsics::Type::Aggregate(true, ref contents) => {
838 // We found a tuple that needs squishing! So
839 // run over the tuple and load each field.
840 //
841 // This assumes the type is "simple", i.e. no
842 // destructors, and the contents are SIMD
843 // etc.
844 assert!(!bcx.fcx.type_needs_drop(arg_type));
845
846 let repr = adt::represent_type(bcx.ccx(), arg_type);
847 let repr_ptr = &repr;
848 let arg = adt::MaybeSizedValue::sized(llarg);
849 (0..contents.len())
850 .map(|i| {
851 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
852 })
853 .collect()
854 }
855 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
856 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
857 vec![PointerCast(bcx, llarg,
858 llvm_elem.ptr_to())]
859 }
860 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
861 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
862 vec![BitCast(bcx, llarg,
863 Type::vector(&llvm_elem, length as u64))]
864 }
865 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
866 // the LLVM intrinsic uses a smaller integer
867 // size than the C intrinsic's signature, so
868 // we have to trim it down here.
869 vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
870 }
871 _ => vec![llarg],
872 }
873 }
874
875
876 let mut any_changes_needed = false;
877 let inputs = intr.inputs.iter()
878 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
879 .collect::<Vec<_>>();
880
881 let mut out_changes = false;
882 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
883 // outputting a flattened aggregate is nonsense
884 assert!(!out_changes);
885
886 let llargs = if !any_changes_needed {
887 // no aggregates to flatten, so no change needed
888 llargs
889 } else {
890 // there are some aggregates that need to be flattened
891 // in the LLVM call, so we need to run over the types
892 // again to find them and extract the arguments
893 intr.inputs.iter()
894 .zip(&llargs)
895 .zip(&arg_tys)
896 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
897 .collect()
898 };
899 assert_eq!(inputs.len(), llargs.len());
900
901 let val = match intr.definition {
902 intrinsics::IntrinsicDef::Named(name) => {
903 let f = declare::declare_cfn(ccx,
904 name,
905 Type::func(&inputs, &outputs));
906 Call(bcx, f, &llargs, call_debug_location)
907 }
908 };
909
910 match *intr.output {
911 intrinsics::Type::Aggregate(flatten, ref elems) => {
912 // the output is a tuple so we need to munge it properly
913 assert!(!flatten);
914
915 for i in 0..elems.len() {
916 let val = ExtractValue(bcx, val, i);
917 Store(bcx, val, StructGEP(bcx, llresult, i));
918 }
919 C_nil(ccx)
920 }
921 _ => val,
922 }
923 }
924 };
925
926 if val_ty(llval) != Type::void(ccx) &&
927 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
928 if let Some(ty) = fn_ty.ret.cast {
929 let ptr = PointerCast(bcx, llresult, ty.ptr_to());
930 let store = Store(bcx, llval, ptr);
931 unsafe {
932 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
933 }
934 } else {
935 store_ty(bcx, llval, llresult, ret_ty);
936 }
937 }
938
939 // If we made a temporary stack slot, let's clean it up
940 match dest {
941 expr::Ignore => {
942 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
943 call_lifetime_end(bcx, llresult);
944 }
945 expr::SaveIn(_) => {}
946 }
947
948 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
949
950 Result::new(bcx, llresult)
951 }
952
953 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
954 allow_overlap: bool,
955 volatile: bool,
956 tp_ty: Ty<'tcx>,
957 dst: ValueRef,
958 src: ValueRef,
959 count: ValueRef,
960 call_debug_location: DebugLoc)
961 -> ValueRef {
962 let ccx = bcx.ccx();
963 let lltp_ty = type_of::type_of(ccx, tp_ty);
964 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
965 let size = machine::llsize_of(ccx, lltp_ty);
966 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
967
968 let operation = if allow_overlap {
969 "memmove"
970 } else {
971 "memcpy"
972 };
973
974 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
975
976 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
977 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
978 let llfn = ccx.get_intrinsic(&name);
979
980 Call(bcx,
981 llfn,
982 &[dst_ptr,
983 src_ptr,
984 Mul(bcx, size, count, DebugLoc::None),
985 align,
986 C_bool(ccx, volatile)],
987 call_debug_location)
988 }
989
990 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
991 volatile: bool,
992 tp_ty: Ty<'tcx>,
993 dst: ValueRef,
994 val: ValueRef,
995 count: ValueRef,
996 call_debug_location: DebugLoc)
997 -> ValueRef {
998 let ccx = bcx.ccx();
999 let lltp_ty = type_of::type_of(ccx, tp_ty);
1000 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
1001 let size = machine::llsize_of(ccx, lltp_ty);
1002 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
1003
1004 let name = format!("llvm.memset.p0i8.i{}", int_size);
1005
1006 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
1007 let llfn = ccx.get_intrinsic(&name);
1008
1009 Call(bcx,
1010 llfn,
1011 &[dst_ptr,
1012 val,
1013 Mul(bcx, size, count, DebugLoc::None),
1014 align,
1015 C_bool(ccx, volatile)],
1016 call_debug_location)
1017 }
1018
1019 fn count_zeros_intrinsic(bcx: Block,
1020 name: &str,
1021 val: ValueRef,
1022 call_debug_location: DebugLoc)
1023 -> ValueRef {
1024 let y = C_bool(bcx.ccx(), false);
1025 let llfn = bcx.ccx().get_intrinsic(&name);
1026 Call(bcx, llfn, &[val, y], call_debug_location)
1027 }
1028
1029 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1030 name: &str,
1031 a: ValueRef,
1032 b: ValueRef,
1033 out: ValueRef,
1034 call_debug_location: DebugLoc)
1035 -> ValueRef {
1036 let llfn = bcx.ccx().get_intrinsic(&name);
1037
1038 // Convert `i1` to a `bool`, and write it to the out parameter
1039 let val = Call(bcx, llfn, &[a, b], call_debug_location);
1040 let result = ExtractValue(bcx, val, 0);
1041 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1042 Store(bcx, result, StructGEP(bcx, out, 0));
1043 Store(bcx, overflow, StructGEP(bcx, out, 1));
1044
1045 C_nil(bcx.ccx())
1046 }
1047
1048 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1049 func: ValueRef,
1050 data: ValueRef,
1051 local_ptr: ValueRef,
1052 dest: ValueRef,
1053 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1054 if bcx.sess().no_landing_pads() {
1055 Call(bcx, func, &[data], dloc);
1056 Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1057 bcx
1058 } else if wants_msvc_seh(bcx.sess()) {
1059 trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
1060 } else {
1061 trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
1062 }
1063 }
1064
1065 // MSVC's definition of the `rust_try` function.
1066 //
1067 // This implementation uses the new exception handling instructions in LLVM
1068 // which have support in LLVM for SEH on MSVC targets. Although these
1069 // instructions are meant to work for all targets, as of the time of this
1070 // writing, however, LLVM does not recommend the usage of these new instructions
1071 // as the old ones are still more optimized.
1072 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1073 func: ValueRef,
1074 data: ValueRef,
1075 local_ptr: ValueRef,
1076 dest: ValueRef,
1077 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1078 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1079 let ccx = bcx.ccx();
1080 let dloc = DebugLoc::None;
1081
1082 SetPersonalityFn(bcx, bcx.fcx.eh_personality());
1083
1084 let normal = bcx.fcx.new_temp_block("normal");
1085 let catchswitch = bcx.fcx.new_temp_block("catchswitch");
1086 let catchpad = bcx.fcx.new_temp_block("catchpad");
1087 let caught = bcx.fcx.new_temp_block("caught");
1088
1089 let func = llvm::get_param(bcx.fcx.llfn, 0);
1090 let data = llvm::get_param(bcx.fcx.llfn, 1);
1091 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1092
1093 // We're generating an IR snippet that looks like:
1094 //
1095 // declare i32 @rust_try(%func, %data, %ptr) {
1096 // %slot = alloca i8*
1097 // call @llvm.localescape(%slot)
1098 // store %ptr, %slot
1099 // invoke %func(%data) to label %normal unwind label %catchswitch
1100 //
1101 // normal:
1102 // ret i32 0
1103 //
1104 // catchswitch:
1105 // %cs = catchswitch within none [%catchpad] unwind to caller
1106 //
1107 // catchpad:
1108 // %tok = catchpad within %cs [%rust_try_filter]
1109 // catchret from %tok to label %caught
1110 //
1111 // caught:
1112 // ret i32 1
1113 // }
1114 //
1115 // This structure follows the basic usage of the instructions in LLVM
1116 // (see their documentation/test cases for examples), but a
1117 // perhaps-surprising part here is the usage of the `localescape`
1118 // intrinsic. This is used to allow the filter function (also generated
1119 // here) to access variables on the stack of this intrinsic. This
1120 // ability enables us to transfer information about the exception being
1121 // thrown to this point, where we're catching the exception.
1122 //
1123 // More information can be found in libstd's seh.rs implementation.
1124 let slot = Alloca(bcx, Type::i8p(ccx), "slot");
1125 let localescape = ccx.get_intrinsic(&"llvm.localescape");
1126 Call(bcx, localescape, &[slot], dloc);
1127 Store(bcx, local_ptr, slot);
1128 Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
1129
1130 Ret(normal, C_i32(ccx, 0), dloc);
1131
1132 let cs = CatchSwitch(catchswitch, None, None, 1);
1133 AddHandler(catchswitch, cs, catchpad.llbb);
1134
1135 let filter = generate_filter_fn(bcx.fcx, bcx.fcx.llfn);
1136 let filter = BitCast(catchpad, filter, Type::i8p(ccx));
1137 let tok = CatchPad(catchpad, cs, &[filter]);
1138 CatchRet(catchpad, tok, caught.llbb);
1139
1140 Ret(caught, C_i32(ccx, 1), dloc);
1141 });
1142
1143 // Note that no invoke is used here because by definition this function
1144 // can't panic (that's what it's catching).
1145 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1146 Store(bcx, ret, dest);
1147 return bcx
1148 }
1149
1150 // Definition of the standard "try" function for Rust using the GNU-like model
1151 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1152 // instructions).
1153 //
1154 // This translation is a little surprising because we always call a shim
1155 // function instead of inlining the call to `invoke` manually here. This is done
1156 // because in LLVM we're only allowed to have one personality per function
1157 // definition. The call to the `try` intrinsic is being inlined into the
1158 // function calling it, and that function may already have other personality
1159 // functions in play. By calling a shim we're guaranteed that our shim will have
1160 // the right personality function.
1161 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1162 func: ValueRef,
1163 data: ValueRef,
1164 local_ptr: ValueRef,
1165 dest: ValueRef,
1166 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1167 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1168 let ccx = bcx.ccx();
1169 let tcx = ccx.tcx();
1170 let dloc = DebugLoc::None;
1171
1172 // Translates the shims described above:
1173 //
1174 // bcx:
1175 // invoke %func(%args...) normal %normal unwind %catch
1176 //
1177 // normal:
1178 // ret 0
1179 //
1180 // catch:
1181 // (ptr, _) = landingpad
1182 // store ptr, %local_ptr
1183 // ret 1
1184 //
1185 // Note that the `local_ptr` data passed into the `try` intrinsic is
1186 // expected to be `*mut *mut u8` for this to actually work, but that's
1187 // managed by the standard library.
1188
1189 attributes::emit_uwtable(bcx.fcx.llfn, true);
1190 let catch_pers = match tcx.lang_items.eh_personality_catch() {
1191 Some(did) => {
1192 Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
1193 }
1194 None => bug!("eh_personality_catch not defined"),
1195 };
1196
1197 let then = bcx.fcx.new_temp_block("then");
1198 let catch = bcx.fcx.new_temp_block("catch");
1199
1200 let func = llvm::get_param(bcx.fcx.llfn, 0);
1201 let data = llvm::get_param(bcx.fcx.llfn, 1);
1202 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1203 Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
1204 Ret(then, C_i32(ccx, 0), dloc);
1205
1206 // Type indicator for the exception being thrown.
1207 //
1208 // The first value in this tuple is a pointer to the exception object
1209 // being thrown. The second value is a "selector" indicating which of
1210 // the landing pad clauses the exception's type had been matched to.
1211 // rust_try ignores the selector.
1212 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1213 false);
1214 let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1215 AddClause(catch, vals, C_null(Type::i8p(ccx)));
1216 let ptr = ExtractValue(catch, vals, 0);
1217 Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
1218 Ret(catch, C_i32(ccx, 1), dloc);
1219 });
1220
1221 // Note that no invoke is used here because by definition this function
1222 // can't panic (that's what it's catching).
1223 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1224 Store(bcx, ret, dest);
1225 return bcx;
1226 }
1227
1228 // Helper function to give a Block to a closure to translate a shim function.
1229 // This is currently primarily used for the `try` intrinsic functions above.
1230 fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1231 name: &str,
1232 inputs: Vec<Ty<'tcx>>,
1233 output: ty::FnOutput<'tcx>,
1234 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1235 -> ValueRef {
1236 let ccx = fcx.ccx;
1237 let sig = ty::FnSig {
1238 inputs: inputs,
1239 output: output,
1240 variadic: false,
1241 };
1242 let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
1243
1244 let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy {
1245 unsafety: hir::Unsafety::Unsafe,
1246 abi: Abi::Rust,
1247 sig: ty::Binder(sig)
1248 });
1249 let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
1250 let empty_substs = ccx.tcx().mk_substs(Substs::empty());
1251 let (fcx, block_arena);
1252 block_arena = TypedArena::new();
1253 fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena);
1254 let bcx = fcx.init(true, None);
1255 trans(bcx);
1256 fcx.cleanup();
1257 llfn
1258 }
1259
1260 // Helper function used to get a handle to the `__rust_try` function used to
1261 // catch exceptions.
1262 //
1263 // This function is only generated once and is then cached.
1264 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1265 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1266 -> ValueRef {
1267 let ccx = fcx.ccx;
1268 if let Some(llfn) = ccx.rust_try_fn().get() {
1269 return llfn;
1270 }
1271
1272 // Define the type up front for the signature of the rust_try function.
1273 let tcx = ccx.tcx();
1274 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1275 let fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
1276 unsafety: hir::Unsafety::Unsafe,
1277 abi: Abi::Rust,
1278 sig: ty::Binder(ty::FnSig {
1279 inputs: vec![i8p],
1280 output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1281 variadic: false,
1282 }),
1283 });
1284 let output = ty::FnOutput::FnConverging(tcx.types.i32);
1285 let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
1286 ccx.rust_try_fn().set(Some(rust_try));
1287 return rust_try
1288 }
1289
1290 // For MSVC-style exceptions (SEH), the compiler generates a filter function
1291 // which is used to determine whether an exception is being caught (e.g. if it's
1292 // a Rust exception or some other).
1293 //
1294 // This function is used to generate said filter function. The shim generated
1295 // here is actually just a thin wrapper to call the real implementation in the
1296 // standard library itself. For reasons as to why, see seh.rs in the standard
1297 // library.
1298 fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1299 rust_try_fn: ValueRef)
1300 -> ValueRef {
1301 let ccx = fcx.ccx;
1302 let tcx = ccx.tcx();
1303 let dloc = DebugLoc::None;
1304
1305 let rust_try_filter = match tcx.lang_items.msvc_try_filter() {
1306 Some(did) => {
1307 Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
1308 }
1309 None => bug!("msvc_try_filter not defined"),
1310 };
1311
1312 let output = ty::FnOutput::FnConverging(tcx.types.i32);
1313 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1314
1315 let frameaddress = ccx.get_intrinsic(&"llvm.frameaddress");
1316 let recoverfp = ccx.get_intrinsic(&"llvm.x86.seh.recoverfp");
1317 let localrecover = ccx.get_intrinsic(&"llvm.localrecover");
1318
1319 // On all platforms, once we have the EXCEPTION_POINTERS handle as well as
1320 // the base pointer, we follow the standard layout of:
1321 //
1322 // block:
1323 // %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
1324 // %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
1325 // %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
1326 // ret i32 %ret
1327 //
1328 // The recoverfp intrinsic is used to recover the frame frame pointer of the
1329 // `rust_try_fn` function, which is then in turn passed to the
1330 // `localrecover` intrinsic (pairing with the `localescape` intrinsic
1331 // mentioned above). Putting all this together means that we now have a
1332 // handle to the arguments passed into the `try` function, allowing writing
1333 // to the stack over there.
1334 //
1335 // For more info, see seh.rs in the standard library.
1336 let do_trans = |bcx: Block, ehptrs, base_pointer| {
1337 let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
1338 let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], dloc);
1339 let arg = Call(bcx, localrecover,
1340 &[rust_try_fn, parentfp, C_i32(ccx, 0)], dloc);
1341 let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], dloc);
1342 Ret(bcx, ret, dloc);
1343 };
1344
1345 if ccx.tcx().sess.target.target.arch == "x86" {
1346 // On x86 the filter function doesn't actually receive any arguments.
1347 // Instead the %ebp register contains some contextual information.
1348 //
1349 // Unfortunately I don't know of any great documentation as to what's
1350 // going on here, all I can say is that there's a few tests cases in
1351 // LLVM's test suite which follow this pattern of instructions, so we
1352 // just do the same.
1353 gen_fn(fcx, "__rustc_try_filter", vec![], output, &mut |bcx| {
1354 let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], dloc);
1355 let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
1356 let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
1357 do_trans(bcx, exn, ebp);
1358 })
1359 } else if ccx.tcx().sess.target.target.arch == "x86_64" {
1360 // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
1361 // are passed in as arguments to the filter function, so we just pass
1362 // those along.
1363 gen_fn(fcx, "__rustc_try_filter", vec![i8p, i8p], output, &mut |bcx| {
1364 let exn = llvm::get_param(bcx.fcx.llfn, 0);
1365 let rbp = llvm::get_param(bcx.fcx.llfn, 1);
1366 do_trans(bcx, exn, rbp);
1367 })
1368 } else {
1369 bug!("unknown target to generate a filter function")
1370 }
1371 }
1372
1373 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1374 span_err!(a, b, E0511, "{}", c);
1375 }
1376
1377 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1378 (bcx: Block<'blk, 'tcx>,
1379 name: &str,
1380 substs: &'tcx subst::Substs<'tcx>,
1381 callee_ty: Ty<'tcx>,
1382 args: Option<&[P<hir::Expr>]>,
1383 llargs: &[ValueRef],
1384 ret_ty: Ty<'tcx>,
1385 llret_ty: Type,
1386 call_debug_location: DebugLoc,
1387 span: Span) -> ValueRef
1388 {
1389 // macros for error handling:
1390 macro_rules! emit_error {
1391 ($msg: tt) => {
1392 emit_error!($msg, )
1393 };
1394 ($msg: tt, $($fmt: tt)*) => {
1395 span_invalid_monomorphization_error(
1396 bcx.sess(), span,
1397 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1398 $msg),
1399 name, $($fmt)*));
1400 }
1401 }
1402 macro_rules! require {
1403 ($cond: expr, $($fmt: tt)*) => {
1404 if !$cond {
1405 emit_error!($($fmt)*);
1406 return C_nil(bcx.ccx())
1407 }
1408 }
1409 }
1410 macro_rules! require_simd {
1411 ($ty: expr, $position: expr) => {
1412 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1413 }
1414 }
1415
1416
1417
1418 let tcx = bcx.tcx();
1419 let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1420 let sig = infer::normalize_associated_type(tcx, &sig);
1421 let arg_tys = sig.inputs;
1422
1423 // every intrinsic takes a SIMD vector as its first argument
1424 require_simd!(arg_tys[0], "input");
1425 let in_ty = arg_tys[0];
1426 let in_elem = arg_tys[0].simd_type(tcx);
1427 let in_len = arg_tys[0].simd_size(tcx);
1428
1429 let comparison = match name {
1430 "simd_eq" => Some(hir::BiEq),
1431 "simd_ne" => Some(hir::BiNe),
1432 "simd_lt" => Some(hir::BiLt),
1433 "simd_le" => Some(hir::BiLe),
1434 "simd_gt" => Some(hir::BiGt),
1435 "simd_ge" => Some(hir::BiGe),
1436 _ => None
1437 };
1438
1439 if let Some(cmp_op) = comparison {
1440 require_simd!(ret_ty, "return");
1441
1442 let out_len = ret_ty.simd_size(tcx);
1443 require!(in_len == out_len,
1444 "expected return type with length {} (same as input type `{}`), \
1445 found `{}` with length {}",
1446 in_len, in_ty,
1447 ret_ty, out_len);
1448 require!(llret_ty.element_type().kind() == llvm::Integer,
1449 "expected return type with integer elements, found `{}` with non-integer `{}`",
1450 ret_ty,
1451 ret_ty.simd_type(tcx));
1452
1453 return compare_simd_types(bcx,
1454 llargs[0],
1455 llargs[1],
1456 in_elem,
1457 llret_ty,
1458 cmp_op,
1459 call_debug_location)
1460 }
1461
1462 if name.starts_with("simd_shuffle") {
1463 let n: usize = match name["simd_shuffle".len()..].parse() {
1464 Ok(n) => n,
1465 Err(_) => span_bug!(span,
1466 "bad `simd_shuffle` instruction only caught in trans?")
1467 };
1468
1469 require_simd!(ret_ty, "return");
1470
1471 let out_len = ret_ty.simd_size(tcx);
1472 require!(out_len == n,
1473 "expected return type of length {}, found `{}` with length {}",
1474 n, ret_ty, out_len);
1475 require!(in_elem == ret_ty.simd_type(tcx),
1476 "expected return element type `{}` (element of input `{}`), \
1477 found `{}` with element type `{}`",
1478 in_elem, in_ty,
1479 ret_ty, ret_ty.simd_type(tcx));
1480
1481 let total_len = in_len as u64 * 2;
1482
1483 let (vector, indirect) = match args {
1484 Some(args) => {
1485 match consts::const_expr(bcx.ccx(), &args[2], substs, None,
1486 // this should probably help simd error reporting
1487 consts::TrueConst::Yes) {
1488 Ok((vector, _)) => (vector, false),
1489 Err(err) => bcx.sess().span_fatal(span, &err.description()),
1490 }
1491 }
1492 None => (llargs[2], !type_is_immediate(bcx.ccx(), arg_tys[2]))
1493 };
1494
1495 let indices: Option<Vec<_>> = (0..n)
1496 .map(|i| {
1497 let arg_idx = i;
1498 let val = if indirect {
1499 Load(bcx, StructGEP(bcx, vector, i))
1500 } else {
1501 const_get_elt(vector, &[i as libc::c_uint])
1502 };
1503 let c = const_to_opt_uint(val);
1504 match c {
1505 None => {
1506 emit_error!("shuffle index #{} is not a constant", arg_idx);
1507 None
1508 }
1509 Some(idx) if idx >= total_len => {
1510 emit_error!("shuffle index #{} is out of bounds (limit {})",
1511 arg_idx, total_len);
1512 None
1513 }
1514 Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1515 }
1516 })
1517 .collect();
1518 let indices = match indices {
1519 Some(i) => i,
1520 None => return C_null(llret_ty)
1521 };
1522
1523 return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1524 }
1525
1526 if name == "simd_insert" {
1527 require!(in_elem == arg_tys[2],
1528 "expected inserted type `{}` (element of input `{}`), found `{}`",
1529 in_elem, in_ty, arg_tys[2]);
1530 return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1531 }
1532 if name == "simd_extract" {
1533 require!(ret_ty == in_elem,
1534 "expected return type `{}` (element of input `{}`), found `{}`",
1535 in_elem, in_ty, ret_ty);
1536 return ExtractElement(bcx, llargs[0], llargs[1])
1537 }
1538
1539 if name == "simd_cast" {
1540 require_simd!(ret_ty, "return");
1541 let out_len = ret_ty.simd_size(tcx);
1542 require!(in_len == out_len,
1543 "expected return type with length {} (same as input type `{}`), \
1544 found `{}` with length {}",
1545 in_len, in_ty,
1546 ret_ty, out_len);
1547 // casting cares about nominal type, not just structural type
1548 let out_elem = ret_ty.simd_type(tcx);
1549
1550 if in_elem == out_elem { return llargs[0]; }
1551
1552 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1553
1554 let (in_style, in_width) = match in_elem.sty {
1555 // vectors of pointer-sized integers should've been
1556 // disallowed before here, so this unwrap is safe.
1557 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1558 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1559 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1560 _ => (Style::Unsupported, 0)
1561 };
1562 let (out_style, out_width) = match out_elem.sty {
1563 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1564 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1565 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1566 _ => (Style::Unsupported, 0)
1567 };
1568
1569 match (in_style, out_style) {
1570 (Style::Int(in_is_signed), Style::Int(_)) => {
1571 return match in_width.cmp(&out_width) {
1572 Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1573 Ordering::Equal => llargs[0],
1574 Ordering::Less => if in_is_signed {
1575 SExt(bcx, llargs[0], llret_ty)
1576 } else {
1577 ZExt(bcx, llargs[0], llret_ty)
1578 }
1579 }
1580 }
1581 (Style::Int(in_is_signed), Style::Float) => {
1582 return if in_is_signed {
1583 SIToFP(bcx, llargs[0], llret_ty)
1584 } else {
1585 UIToFP(bcx, llargs[0], llret_ty)
1586 }
1587 }
1588 (Style::Float, Style::Int(out_is_signed)) => {
1589 return if out_is_signed {
1590 FPToSI(bcx, llargs[0], llret_ty)
1591 } else {
1592 FPToUI(bcx, llargs[0], llret_ty)
1593 }
1594 }
1595 (Style::Float, Style::Float) => {
1596 return match in_width.cmp(&out_width) {
1597 Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1598 Ordering::Equal => llargs[0],
1599 Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1600 }
1601 }
1602 _ => {/* Unsupported. Fallthrough. */}
1603 }
1604 require!(false,
1605 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1606 in_ty, in_elem,
1607 ret_ty, out_elem);
1608 }
1609 macro_rules! arith {
1610 ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1611 $(
1612 if name == stringify!($name) {
1613 match in_elem.sty {
1614 $(
1615 $(ty::$p(_))|* => {
1616 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1617 }
1618 )*
1619 _ => {},
1620 }
1621 require!(false,
1622 "unsupported operation on `{}` with element `{}`",
1623 in_ty,
1624 in_elem)
1625 })*
1626 }
1627 }
1628 arith! {
1629 simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1630 simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1631 simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1632 simd_div: TyFloat => FDiv;
1633 simd_shl: TyUint, TyInt => Shl;
1634 simd_shr: TyUint => LShr, TyInt => AShr;
1635 simd_and: TyUint, TyInt => And;
1636 simd_or: TyUint, TyInt => Or;
1637 simd_xor: TyUint, TyInt => Xor;
1638 }
1639 span_bug!(span, "unknown SIMD intrinsic");
1640 }
1641
1642 // Returns the width of an int TypeVariant, and if it's signed or not
1643 // Returns None if the type is not an integer
1644 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1645 -> Option<(u64, bool)> {
1646 use rustc::ty::{TyInt, TyUint};
1647 match *sty {
1648 TyInt(t) => Some((match t {
1649 ast::IntTy::Is => {
1650 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1651 "32" => 32,
1652 "64" => 64,
1653 tws => bug!("Unsupported target word size for isize: {}", tws),
1654 }
1655 },
1656 ast::IntTy::I8 => 8,
1657 ast::IntTy::I16 => 16,
1658 ast::IntTy::I32 => 32,
1659 ast::IntTy::I64 => 64,
1660 }, true)),
1661 TyUint(t) => Some((match t {
1662 ast::UintTy::Us => {
1663 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1664 "32" => 32,
1665 "64" => 64,
1666 tws => bug!("Unsupported target word size for usize: {}", tws),
1667 }
1668 },
1669 ast::UintTy::U8 => 8,
1670 ast::UintTy::U16 => 16,
1671 ast::UintTy::U32 => 32,
1672 ast::UintTy::U64 => 64,
1673 }, false)),
1674 _ => None,
1675 }
1676 }
1677
1678 // Returns the width of a float TypeVariant
1679 // Returns None if the type is not a float
1680 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1681 -> Option<u64> {
1682 use rustc::ty::TyFloat;
1683 match *sty {
1684 TyFloat(t) => Some(match t {
1685 ast::FloatTy::F32 => 32,
1686 ast::FloatTy::F64 => 64,
1687 }),
1688 _ => None,
1689 }
1690 }