]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/intrinsic.rs
New upstream version 1.12.0+dfsg1
[rustc.git] / src / librustc_trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
15 use libc;
16 use llvm;
17 use llvm::{ValueRef, TypeKind};
18 use rustc::ty::subst;
19 use rustc::ty::subst::FnSpace;
20 use abi::{Abi, FnType};
21 use adt;
22 use base::*;
23 use build::*;
24 use callee::{self, Callee};
25 use cleanup;
26 use cleanup::CleanupMethods;
27 use common::*;
28 use consts;
29 use datum::*;
30 use debuginfo::DebugLoc;
31 use declare;
32 use expr;
33 use glue;
34 use type_of;
35 use machine;
36 use type_::Type;
37 use rustc::ty::{self, Ty};
38 use Disr;
39 use rustc::hir;
40 use syntax::ast;
41 use syntax::ptr::P;
42 use syntax::parse::token;
43
44 use rustc::session::Session;
45 use rustc_const_eval::fatal_const_eval_err;
46 use syntax_pos::{Span, DUMMY_SP};
47
48 use std::cmp::Ordering;
49
50 fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
51 let llvm_name = match name {
52 "sqrtf32" => "llvm.sqrt.f32",
53 "sqrtf64" => "llvm.sqrt.f64",
54 "powif32" => "llvm.powi.f32",
55 "powif64" => "llvm.powi.f64",
56 "sinf32" => "llvm.sin.f32",
57 "sinf64" => "llvm.sin.f64",
58 "cosf32" => "llvm.cos.f32",
59 "cosf64" => "llvm.cos.f64",
60 "powf32" => "llvm.pow.f32",
61 "powf64" => "llvm.pow.f64",
62 "expf32" => "llvm.exp.f32",
63 "expf64" => "llvm.exp.f64",
64 "exp2f32" => "llvm.exp2.f32",
65 "exp2f64" => "llvm.exp2.f64",
66 "logf32" => "llvm.log.f32",
67 "logf64" => "llvm.log.f64",
68 "log10f32" => "llvm.log10.f32",
69 "log10f64" => "llvm.log10.f64",
70 "log2f32" => "llvm.log2.f32",
71 "log2f64" => "llvm.log2.f64",
72 "fmaf32" => "llvm.fma.f32",
73 "fmaf64" => "llvm.fma.f64",
74 "fabsf32" => "llvm.fabs.f32",
75 "fabsf64" => "llvm.fabs.f64",
76 "copysignf32" => "llvm.copysign.f32",
77 "copysignf64" => "llvm.copysign.f64",
78 "floorf32" => "llvm.floor.f32",
79 "floorf64" => "llvm.floor.f64",
80 "ceilf32" => "llvm.ceil.f32",
81 "ceilf64" => "llvm.ceil.f64",
82 "truncf32" => "llvm.trunc.f32",
83 "truncf64" => "llvm.trunc.f64",
84 "rintf32" => "llvm.rint.f32",
85 "rintf64" => "llvm.rint.f64",
86 "nearbyintf32" => "llvm.nearbyint.f32",
87 "nearbyintf64" => "llvm.nearbyint.f64",
88 "roundf32" => "llvm.round.f32",
89 "roundf64" => "llvm.round.f64",
90 "assume" => "llvm.assume",
91 _ => return None
92 };
93 Some(ccx.get_intrinsic(&llvm_name))
94 }
95
96 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
97 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
98 /// add them to librustc_trans/trans/context.rs
99 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
100 callee_ty: Ty<'tcx>,
101 fn_ty: &FnType,
102 args: callee::CallArgs<'a, 'tcx>,
103 dest: expr::Dest,
104 call_debug_location: DebugLoc)
105 -> Result<'blk, 'tcx> {
106 let fcx = bcx.fcx;
107 let ccx = fcx.ccx;
108 let tcx = bcx.tcx();
109
110 let _icx = push_ctxt("trans_intrinsic_call");
111
112 let (def_id, substs, sig) = match callee_ty.sty {
113 ty::TyFnDef(def_id, substs, fty) => {
114 let sig = tcx.erase_late_bound_regions(&fty.sig);
115 (def_id, substs, tcx.normalize_associated_type(&sig))
116 }
117 _ => bug!("expected fn item type, found {}", callee_ty)
118 };
119 let arg_tys = sig.inputs;
120 let ret_ty = sig.output;
121 let name = tcx.item_name(def_id).as_str();
122
123 let span = match call_debug_location {
124 DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span,
125 DebugLoc::None => {
126 span_bug!(fcx.span.unwrap_or(DUMMY_SP),
127 "intrinsic `{}` called with missing span", name);
128 }
129 };
130
131 let cleanup_scope = fcx.push_custom_cleanup_scope();
132
133 // For `transmute` we can just trans the input expr directly into dest
134 if name == "transmute" {
135 let llret_ty = type_of::type_of(ccx, ret_ty);
136 match args {
137 callee::ArgExprs(arg_exprs) => {
138 assert_eq!(arg_exprs.len(), 1);
139
140 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
141 *substs.types.get(FnSpace, 1));
142 let llintype = type_of::type_of(ccx, in_type);
143 let llouttype = type_of::type_of(ccx, out_type);
144
145 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
146 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
147
148 if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
149 if out_type_size != 0 {
150 // FIXME #19925 Remove this hack after a release cycle.
151 let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
152 let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
153 let llfnty = val_ty(llfn);
154 let llresult = match dest {
155 expr::SaveIn(d) => d,
156 expr::Ignore => alloc_ty(bcx, out_type, "ret")
157 };
158 Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
159 if dest == expr::Ignore {
160 bcx = glue::drop_ty(bcx, llresult, out_type,
161 call_debug_location);
162 }
163 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
164 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
165 return Result::new(bcx, llresult);
166 }
167 }
168
169 // This should be caught by the intrinsicck pass
170 assert_eq!(in_type_size, out_type_size);
171
172 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
173 use llvm::TypeKind::*;
174 match llkind {
175 Half | Float | Double | X86_FP80 | FP128 |
176 PPC_FP128 | Integer | Vector | X86_MMX => true,
177 _ => false
178 }
179 };
180
181 // An approximation to which types can be directly cast via
182 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
183 // but does, importantly, cover SIMD types.
184 let in_kind = llintype.kind();
185 let ret_kind = llret_ty.kind();
186 let bitcast_compatible =
187 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
188 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
189 };
190
191 let dest = if bitcast_compatible {
192 // if we're here, the type is scalar-like (a primitive, a
193 // SIMD type or a pointer), and so can be handled as a
194 // by-value ValueRef and can also be directly bitcast to the
195 // target type. Doing this special case makes conversions
196 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
197 // efficient (these are done efficiently implicitly in C
198 // with the `__m128i` type and so this means Rust doesn't
199 // lose out there).
200 let expr = &arg_exprs[0];
201 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
202 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
203 let val = if datum.kind.is_by_ref() {
204 load_ty(bcx, datum.val, datum.ty)
205 } else {
206 from_immediate(bcx, datum.val)
207 };
208
209 let cast_val = BitCast(bcx, val, llret_ty);
210
211 match dest {
212 expr::SaveIn(d) => {
213 // this often occurs in a sequence like `Store(val,
214 // d); val2 = Load(d)`, so disappears easily.
215 Store(bcx, cast_val, d);
216 }
217 expr::Ignore => {}
218 }
219 dest
220 } else {
221 // The types are too complicated to do with a by-value
222 // bitcast, so pointer cast instead. We need to cast the
223 // dest so the types work out.
224 let dest = match dest {
225 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
226 expr::Ignore => expr::Ignore
227 };
228 bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
229 dest
230 };
231
232 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
233 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
234
235 return match dest {
236 expr::SaveIn(d) => Result::new(bcx, d),
237 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
238 };
239
240 }
241
242 _ => {
243 bug!("expected expr as argument for transmute");
244 }
245 }
246 }
247
248 // For `move_val_init` we can evaluate the destination address
249 // (the first argument) and then trans the source value (the
250 // second argument) directly into the resulting destination
251 // address.
252 if name == "move_val_init" {
253 if let callee::ArgExprs(ref exprs) = args {
254 let (dest_expr, source_expr) = if exprs.len() != 2 {
255 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
256 } else {
257 (&exprs[0], &exprs[1])
258 };
259
260 // evaluate destination address
261 let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
262 let dest_datum = unpack_datum!(
263 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
264 let dest_datum = unpack_datum!(
265 bcx, dest_datum.to_appropriate_datum(bcx));
266
267 // `expr::trans_into(bcx, expr, dest)` is equiv to
268 //
269 // `trans(bcx, expr).store_to_dest(dest)`,
270 //
271 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
272 //
273 // `trans(bcx, expr).store_to(bcx, addr)`.
274 let lldest = expr::Dest::SaveIn(dest_datum.val);
275 bcx = expr::trans_into(bcx, source_expr, lldest);
276
277 let llresult = C_nil(ccx);
278 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
279
280 return Result::new(bcx, llresult);
281 } else {
282 bug!("expected two exprs as arguments for `move_val_init` intrinsic");
283 }
284 }
285
286 // save the actual AST arguments for later (some places need to do
287 // const-evaluation on them)
288 let expr_arguments = match args {
289 callee::ArgExprs(args) => Some(args),
290 _ => None,
291 };
292
293 // Push the arguments.
294 let mut llargs = Vec::new();
295 bcx = callee::trans_args(bcx,
296 Abi::RustIntrinsic,
297 fn_ty,
298 &mut callee::Intrinsic,
299 args,
300 &mut llargs,
301 cleanup::CustomScope(cleanup_scope));
302
303 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
304
305 // These are the only intrinsic functions that diverge.
306 if name == "abort" {
307 let llfn = ccx.get_intrinsic(&("llvm.trap"));
308 Call(bcx, llfn, &[], call_debug_location);
309 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
310 Unreachable(bcx);
311 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
312 } else if &name[..] == "unreachable" {
313 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
314 Unreachable(bcx);
315 return Result::new(bcx, C_nil(ccx));
316 }
317
318 let llret_ty = type_of::type_of(ccx, ret_ty);
319
320 // Get location to store the result. If the user does
321 // not care about the result, just make a stack slot
322 let llresult = match dest {
323 expr::SaveIn(d) => d,
324 expr::Ignore => {
325 if !type_is_zero_size(ccx, ret_ty) {
326 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
327 call_lifetime_start(bcx, llresult);
328 llresult
329 } else {
330 C_undef(llret_ty.ptr_to())
331 }
332 }
333 };
334
335 let simple = get_simple_intrinsic(ccx, &name);
336 let llval = match (simple, &name[..]) {
337 (Some(llfn), _) => {
338 Call(bcx, llfn, &llargs, call_debug_location)
339 }
340 (_, "try") => {
341 bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
342 call_debug_location);
343 C_nil(ccx)
344 }
345 (_, "breakpoint") => {
346 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
347 Call(bcx, llfn, &[], call_debug_location)
348 }
349 (_, "size_of") => {
350 let tp_ty = *substs.types.get(FnSpace, 0);
351 let lltp_ty = type_of::type_of(ccx, tp_ty);
352 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
353 }
354 (_, "size_of_val") => {
355 let tp_ty = *substs.types.get(FnSpace, 0);
356 if !type_is_sized(tcx, tp_ty) {
357 let (llsize, _) =
358 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
359 llsize
360 } else {
361 let lltp_ty = type_of::type_of(ccx, tp_ty);
362 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
363 }
364 }
365 (_, "min_align_of") => {
366 let tp_ty = *substs.types.get(FnSpace, 0);
367 C_uint(ccx, type_of::align_of(ccx, tp_ty))
368 }
369 (_, "min_align_of_val") => {
370 let tp_ty = *substs.types.get(FnSpace, 0);
371 if !type_is_sized(tcx, tp_ty) {
372 let (_, llalign) =
373 glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
374 llalign
375 } else {
376 C_uint(ccx, type_of::align_of(ccx, tp_ty))
377 }
378 }
379 (_, "pref_align_of") => {
380 let tp_ty = *substs.types.get(FnSpace, 0);
381 let lltp_ty = type_of::type_of(ccx, tp_ty);
382 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
383 }
384 (_, "drop_in_place") => {
385 let tp_ty = *substs.types.get(FnSpace, 0);
386 let ptr = if type_is_sized(tcx, tp_ty) {
387 llargs[0]
388 } else {
389 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
390 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
391 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
392 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
393 scratch.val
394 };
395 glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
396 C_nil(ccx)
397 }
398 (_, "type_name") => {
399 let tp_ty = *substs.types.get(FnSpace, 0);
400 let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
401 C_str_slice(ccx, ty_name)
402 }
403 (_, "type_id") => {
404 C_u64(ccx, ccx.tcx().type_id_hash(*substs.types.get(FnSpace, 0)))
405 }
406 (_, "init_dropped") => {
407 let tp_ty = *substs.types.get(FnSpace, 0);
408 if !type_is_zero_size(ccx, tp_ty) {
409 drop_done_fill_mem(bcx, llresult, tp_ty);
410 }
411 C_nil(ccx)
412 }
413 (_, "init") => {
414 let tp_ty = *substs.types.get(FnSpace, 0);
415 if !type_is_zero_size(ccx, tp_ty) {
416 // Just zero out the stack slot. (See comment on base::memzero for explanation)
417 init_zero_mem(bcx, llresult, tp_ty);
418 }
419 C_nil(ccx)
420 }
421 // Effectively no-ops
422 (_, "uninit") | (_, "forget") => {
423 C_nil(ccx)
424 }
425 (_, "needs_drop") => {
426 let tp_ty = *substs.types.get(FnSpace, 0);
427
428 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
429 }
430 (_, "offset") => {
431 let ptr = llargs[0];
432 let offset = llargs[1];
433 InBoundsGEP(bcx, ptr, &[offset])
434 }
435 (_, "arith_offset") => {
436 let ptr = llargs[0];
437 let offset = llargs[1];
438 GEP(bcx, ptr, &[offset])
439 }
440
441 (_, "copy_nonoverlapping") => {
442 copy_intrinsic(bcx,
443 false,
444 false,
445 *substs.types.get(FnSpace, 0),
446 llargs[1],
447 llargs[0],
448 llargs[2],
449 call_debug_location)
450 }
451 (_, "copy") => {
452 copy_intrinsic(bcx,
453 true,
454 false,
455 *substs.types.get(FnSpace, 0),
456 llargs[1],
457 llargs[0],
458 llargs[2],
459 call_debug_location)
460 }
461 (_, "write_bytes") => {
462 memset_intrinsic(bcx,
463 false,
464 *substs.types.get(FnSpace, 0),
465 llargs[0],
466 llargs[1],
467 llargs[2],
468 call_debug_location)
469 }
470
471 (_, "volatile_copy_nonoverlapping_memory") => {
472 copy_intrinsic(bcx,
473 false,
474 true,
475 *substs.types.get(FnSpace, 0),
476 llargs[0],
477 llargs[1],
478 llargs[2],
479 call_debug_location)
480 }
481 (_, "volatile_copy_memory") => {
482 copy_intrinsic(bcx,
483 true,
484 true,
485 *substs.types.get(FnSpace, 0),
486 llargs[0],
487 llargs[1],
488 llargs[2],
489 call_debug_location)
490 }
491 (_, "volatile_set_memory") => {
492 memset_intrinsic(bcx,
493 true,
494 *substs.types.get(FnSpace, 0),
495 llargs[0],
496 llargs[1],
497 llargs[2],
498 call_debug_location)
499 }
500 (_, "volatile_load") => {
501 let tp_ty = *substs.types.get(FnSpace, 0);
502 let mut ptr = llargs[0];
503 if let Some(ty) = fn_ty.ret.cast {
504 ptr = PointerCast(bcx, ptr, ty.ptr_to());
505 }
506 let load = VolatileLoad(bcx, ptr);
507 unsafe {
508 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
509 }
510 to_immediate(bcx, load, tp_ty)
511 },
512 (_, "volatile_store") => {
513 let tp_ty = *substs.types.get(FnSpace, 0);
514 if type_is_fat_ptr(bcx.tcx(), tp_ty) {
515 VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
516 VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
517 } else {
518 let val = if fn_ty.args[1].is_indirect() {
519 Load(bcx, llargs[1])
520 } else {
521 from_immediate(bcx, llargs[1])
522 };
523 let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
524 let store = VolatileStore(bcx, val, ptr);
525 unsafe {
526 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
527 }
528 }
529 C_nil(ccx)
530 },
531
532 (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
533 (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
534 (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
535 (_, "unchecked_div") | (_, "unchecked_rem") => {
536 let sty = &arg_tys[0].sty;
537 match int_type_width_signed(sty, ccx) {
538 Some((width, signed)) =>
539 match &*name {
540 "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
541 llargs[0], call_debug_location),
542 "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
543 llargs[0], call_debug_location),
544 "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
545 &llargs, call_debug_location),
546 "bswap" => {
547 if width == 8 {
548 llargs[0] // byte swap a u8/i8 is just a no-op
549 } else {
550 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
551 &llargs, call_debug_location)
552 }
553 }
554 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
555 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
556 if signed { 's' } else { 'u' },
557 &name[..3], width);
558 with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
559 call_debug_location)
560 },
561 "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
562 "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
563 "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
564 "unchecked_div" =>
565 if signed {
566 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
567 } else {
568 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
569 },
570 "unchecked_rem" =>
571 if signed {
572 SRem(bcx, llargs[0], llargs[1], call_debug_location)
573 } else {
574 URem(bcx, llargs[0], llargs[1], call_debug_location)
575 },
576 _ => bug!(),
577 },
578 None => {
579 span_invalid_monomorphization_error(
580 tcx.sess, span,
581 &format!("invalid monomorphization of `{}` intrinsic: \
582 expected basic integer type, found `{}`", name, sty));
583 C_nil(ccx)
584 }
585 }
586
587 },
588 (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
589 (_, "frem_fast") => {
590 let sty = &arg_tys[0].sty;
591 match float_type_width(sty) {
592 Some(_width) =>
593 match &*name {
594 "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
595 "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
596 "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
597 "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
598 "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
599 _ => bug!(),
600 },
601 None => {
602 span_invalid_monomorphization_error(
603 tcx.sess, span,
604 &format!("invalid monomorphization of `{}` intrinsic: \
605 expected basic float type, found `{}`", name, sty));
606 C_nil(ccx)
607 }
608 }
609
610 },
611
612 (_, "discriminant_value") => {
613 let val_ty = substs.types.get(FnSpace, 0);
614 match val_ty.sty {
615 ty::TyEnum(..) => {
616 let repr = adt::represent_type(ccx, *val_ty);
617 adt::trans_get_discr(bcx, &repr, llargs[0],
618 Some(llret_ty), true)
619 }
620 _ => C_null(llret_ty)
621 }
622 }
623 (_, name) if name.starts_with("simd_") => {
624 generic_simd_intrinsic(bcx, name,
625 substs,
626 callee_ty,
627 expr_arguments,
628 &llargs,
629 ret_ty, llret_ty,
630 call_debug_location,
631 span)
632 }
633 // This requires that atomic intrinsics follow a specific naming pattern:
634 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
635 (_, name) if name.starts_with("atomic_") => {
636 use llvm::AtomicOrdering::*;
637
638 let split: Vec<&str> = name.split('_').collect();
639
640 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
641 let (order, failorder) = match split.len() {
642 2 => (SequentiallyConsistent, SequentiallyConsistent),
643 3 => match split[2] {
644 "unordered" => (Unordered, Unordered),
645 "relaxed" => (Monotonic, Monotonic),
646 "acq" => (Acquire, Acquire),
647 "rel" => (Release, Monotonic),
648 "acqrel" => (AcquireRelease, Acquire),
649 "failrelaxed" if is_cxchg =>
650 (SequentiallyConsistent, Monotonic),
651 "failacq" if is_cxchg =>
652 (SequentiallyConsistent, Acquire),
653 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
654 },
655 4 => match (split[2], split[3]) {
656 ("acq", "failrelaxed") if is_cxchg =>
657 (Acquire, Monotonic),
658 ("acqrel", "failrelaxed") if is_cxchg =>
659 (AcquireRelease, Monotonic),
660 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
661 },
662 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
663 };
664
665 match split[1] {
666 "cxchg" | "cxchgweak" => {
667 let sty = &substs.types.get(FnSpace, 0).sty;
668 if int_type_width_signed(sty, ccx).is_some() {
669 let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
670 let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
671 order, failorder, weak);
672 let result = ExtractValue(bcx, val, 0);
673 let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
674 Store(bcx, result, StructGEP(bcx, llresult, 0));
675 Store(bcx, success, StructGEP(bcx, llresult, 1));
676 } else {
677 span_invalid_monomorphization_error(
678 tcx.sess, span,
679 &format!("invalid monomorphization of `{}` intrinsic: \
680 expected basic integer type, found `{}`", name, sty));
681 }
682 C_nil(ccx)
683 }
684
685 "load" => {
686 let sty = &substs.types.get(FnSpace, 0).sty;
687 if int_type_width_signed(sty, ccx).is_some() {
688 AtomicLoad(bcx, llargs[0], order)
689 } else {
690 span_invalid_monomorphization_error(
691 tcx.sess, span,
692 &format!("invalid monomorphization of `{}` intrinsic: \
693 expected basic integer type, found `{}`", name, sty));
694 C_nil(ccx)
695 }
696 }
697
698 "store" => {
699 let sty = &substs.types.get(FnSpace, 0).sty;
700 if int_type_width_signed(sty, ccx).is_some() {
701 AtomicStore(bcx, llargs[1], llargs[0], order);
702 } else {
703 span_invalid_monomorphization_error(
704 tcx.sess, span,
705 &format!("invalid monomorphization of `{}` intrinsic: \
706 expected basic integer type, found `{}`", name, sty));
707 }
708 C_nil(ccx)
709 }
710
711 "fence" => {
712 AtomicFence(bcx, order, llvm::SynchronizationScope::CrossThread);
713 C_nil(ccx)
714 }
715
716 "singlethreadfence" => {
717 AtomicFence(bcx, order, llvm::SynchronizationScope::SingleThread);
718 C_nil(ccx)
719 }
720
721 // These are all AtomicRMW ops
722 op => {
723 let atom_op = match op {
724 "xchg" => llvm::AtomicXchg,
725 "xadd" => llvm::AtomicAdd,
726 "xsub" => llvm::AtomicSub,
727 "and" => llvm::AtomicAnd,
728 "nand" => llvm::AtomicNand,
729 "or" => llvm::AtomicOr,
730 "xor" => llvm::AtomicXor,
731 "max" => llvm::AtomicMax,
732 "min" => llvm::AtomicMin,
733 "umax" => llvm::AtomicUMax,
734 "umin" => llvm::AtomicUMin,
735 _ => ccx.sess().fatal("unknown atomic operation")
736 };
737
738 let sty = &substs.types.get(FnSpace, 0).sty;
739 if int_type_width_signed(sty, ccx).is_some() {
740 AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
741 } else {
742 span_invalid_monomorphization_error(
743 tcx.sess, span,
744 &format!("invalid monomorphization of `{}` intrinsic: \
745 expected basic integer type, found `{}`", name, sty));
746 C_nil(ccx)
747 }
748 }
749 }
750
751 }
752
753 (_, _) => {
754 let intr = match Intrinsic::find(&name) {
755 Some(intr) => intr,
756 None => bug!("unknown intrinsic '{}'", name),
757 };
758 fn one<T>(x: Vec<T>) -> T {
759 assert_eq!(x.len(), 1);
760 x.into_iter().next().unwrap()
761 }
762 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
763 any_changes_needed: &mut bool) -> Vec<Type> {
764 use intrinsics::Type::*;
765 match *t {
766 Void => vec![Type::void(ccx)],
767 Integer(_signed, width, llvm_width) => {
768 *any_changes_needed |= width != llvm_width;
769 vec![Type::ix(ccx, llvm_width as u64)]
770 }
771 Float(x) => {
772 match x {
773 32 => vec![Type::f32(ccx)],
774 64 => vec![Type::f64(ccx)],
775 _ => bug!()
776 }
777 }
778 Pointer(ref t, ref llvm_elem, _const) => {
779 *any_changes_needed |= llvm_elem.is_some();
780
781 let t = llvm_elem.as_ref().unwrap_or(t);
782 let elem = one(ty_to_type(ccx, t,
783 any_changes_needed));
784 vec![elem.ptr_to()]
785 }
786 Vector(ref t, ref llvm_elem, length) => {
787 *any_changes_needed |= llvm_elem.is_some();
788
789 let t = llvm_elem.as_ref().unwrap_or(t);
790 let elem = one(ty_to_type(ccx, t,
791 any_changes_needed));
792 vec![Type::vector(&elem,
793 length as u64)]
794 }
795 Aggregate(false, ref contents) => {
796 let elems = contents.iter()
797 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
798 .collect::<Vec<_>>();
799 vec![Type::struct_(ccx, &elems, false)]
800 }
801 Aggregate(true, ref contents) => {
802 *any_changes_needed = true;
803 contents.iter()
804 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
805 .collect()
806 }
807 }
808 }
809
810 // This allows an argument list like `foo, (bar, baz),
811 // qux` to be converted into `foo, bar, baz, qux`, integer
812 // arguments to be truncated as needed and pointers to be
813 // cast.
814 fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
815 t: &intrinsics::Type,
816 arg_type: Ty<'tcx>,
817 llarg: ValueRef)
818 -> Vec<ValueRef>
819 {
820 match *t {
821 intrinsics::Type::Aggregate(true, ref contents) => {
822 // We found a tuple that needs squishing! So
823 // run over the tuple and load each field.
824 //
825 // This assumes the type is "simple", i.e. no
826 // destructors, and the contents are SIMD
827 // etc.
828 assert!(!bcx.fcx.type_needs_drop(arg_type));
829
830 let repr = adt::represent_type(bcx.ccx(), arg_type);
831 let repr_ptr = &repr;
832 let arg = adt::MaybeSizedValue::sized(llarg);
833 (0..contents.len())
834 .map(|i| {
835 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
836 })
837 .collect()
838 }
839 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
840 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
841 vec![PointerCast(bcx, llarg,
842 llvm_elem.ptr_to())]
843 }
844 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
845 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
846 vec![BitCast(bcx, llarg,
847 Type::vector(&llvm_elem, length as u64))]
848 }
849 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
850 // the LLVM intrinsic uses a smaller integer
851 // size than the C intrinsic's signature, so
852 // we have to trim it down here.
853 vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
854 }
855 _ => vec![llarg],
856 }
857 }
858
859
860 let mut any_changes_needed = false;
861 let inputs = intr.inputs.iter()
862 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
863 .collect::<Vec<_>>();
864
865 let mut out_changes = false;
866 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
867 // outputting a flattened aggregate is nonsense
868 assert!(!out_changes);
869
870 let llargs = if !any_changes_needed {
871 // no aggregates to flatten, so no change needed
872 llargs
873 } else {
874 // there are some aggregates that need to be flattened
875 // in the LLVM call, so we need to run over the types
876 // again to find them and extract the arguments
877 intr.inputs.iter()
878 .zip(&llargs)
879 .zip(&arg_tys)
880 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
881 .collect()
882 };
883 assert_eq!(inputs.len(), llargs.len());
884
885 let val = match intr.definition {
886 intrinsics::IntrinsicDef::Named(name) => {
887 let f = declare::declare_cfn(ccx,
888 name,
889 Type::func(&inputs, &outputs));
890 Call(bcx, f, &llargs, call_debug_location)
891 }
892 };
893
894 match *intr.output {
895 intrinsics::Type::Aggregate(flatten, ref elems) => {
896 // the output is a tuple so we need to munge it properly
897 assert!(!flatten);
898
899 for i in 0..elems.len() {
900 let val = ExtractValue(bcx, val, i);
901 Store(bcx, val, StructGEP(bcx, llresult, i));
902 }
903 C_nil(ccx)
904 }
905 _ => val,
906 }
907 }
908 };
909
910 if val_ty(llval) != Type::void(ccx) &&
911 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
912 if let Some(ty) = fn_ty.ret.cast {
913 let ptr = PointerCast(bcx, llresult, ty.ptr_to());
914 let store = Store(bcx, llval, ptr);
915 unsafe {
916 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
917 }
918 } else {
919 store_ty(bcx, llval, llresult, ret_ty);
920 }
921 }
922
923 // If we made a temporary stack slot, let's clean it up
924 match dest {
925 expr::Ignore => {
926 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
927 call_lifetime_end(bcx, llresult);
928 }
929 expr::SaveIn(_) => {}
930 }
931
932 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
933
934 Result::new(bcx, llresult)
935 }
936
937 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
938 allow_overlap: bool,
939 volatile: bool,
940 tp_ty: Ty<'tcx>,
941 dst: ValueRef,
942 src: ValueRef,
943 count: ValueRef,
944 call_debug_location: DebugLoc)
945 -> ValueRef {
946 let ccx = bcx.ccx();
947 let lltp_ty = type_of::type_of(ccx, tp_ty);
948 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
949 let size = machine::llsize_of(ccx, lltp_ty);
950 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
951
952 let operation = if allow_overlap {
953 "memmove"
954 } else {
955 "memcpy"
956 };
957
958 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
959
960 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
961 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
962 let llfn = ccx.get_intrinsic(&name);
963
964 Call(bcx,
965 llfn,
966 &[dst_ptr,
967 src_ptr,
968 Mul(bcx, size, count, DebugLoc::None),
969 align,
970 C_bool(ccx, volatile)],
971 call_debug_location)
972 }
973
974 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
975 volatile: bool,
976 tp_ty: Ty<'tcx>,
977 dst: ValueRef,
978 val: ValueRef,
979 count: ValueRef,
980 call_debug_location: DebugLoc)
981 -> ValueRef {
982 let ccx = bcx.ccx();
983 let lltp_ty = type_of::type_of(ccx, tp_ty);
984 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
985 let size = machine::llsize_of(ccx, lltp_ty);
986 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
987
988 let name = format!("llvm.memset.p0i8.i{}", int_size);
989
990 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
991 let llfn = ccx.get_intrinsic(&name);
992
993 Call(bcx,
994 llfn,
995 &[dst_ptr,
996 val,
997 Mul(bcx, size, count, DebugLoc::None),
998 align,
999 C_bool(ccx, volatile)],
1000 call_debug_location)
1001 }
1002
1003 fn count_zeros_intrinsic(bcx: Block,
1004 name: &str,
1005 val: ValueRef,
1006 call_debug_location: DebugLoc)
1007 -> ValueRef {
1008 let y = C_bool(bcx.ccx(), false);
1009 let llfn = bcx.ccx().get_intrinsic(&name);
1010 Call(bcx, llfn, &[val, y], call_debug_location)
1011 }
1012
1013 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1014 name: &str,
1015 a: ValueRef,
1016 b: ValueRef,
1017 out: ValueRef,
1018 call_debug_location: DebugLoc)
1019 -> ValueRef {
1020 let llfn = bcx.ccx().get_intrinsic(&name);
1021
1022 // Convert `i1` to a `bool`, and write it to the out parameter
1023 let val = Call(bcx, llfn, &[a, b], call_debug_location);
1024 let result = ExtractValue(bcx, val, 0);
1025 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1026 Store(bcx, result, StructGEP(bcx, out, 0));
1027 Store(bcx, overflow, StructGEP(bcx, out, 1));
1028
1029 C_nil(bcx.ccx())
1030 }
1031
1032 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1033 func: ValueRef,
1034 data: ValueRef,
1035 local_ptr: ValueRef,
1036 dest: ValueRef,
1037 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1038 if bcx.sess().no_landing_pads() {
1039 Call(bcx, func, &[data], dloc);
1040 Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1041 bcx
1042 } else if wants_msvc_seh(bcx.sess()) {
1043 trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
1044 } else {
1045 trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
1046 }
1047 }
1048
1049 // MSVC's definition of the `rust_try` function.
1050 //
1051 // This implementation uses the new exception handling instructions in LLVM
1052 // which have support in LLVM for SEH on MSVC targets. Although these
1053 // instructions are meant to work for all targets, as of the time of this
1054 // writing, however, LLVM does not recommend the usage of these new instructions
1055 // as the old ones are still more optimized.
1056 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1057 func: ValueRef,
1058 data: ValueRef,
1059 local_ptr: ValueRef,
1060 dest: ValueRef,
1061 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1062 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1063 let ccx = bcx.ccx();
1064 let dloc = DebugLoc::None;
1065
1066 SetPersonalityFn(bcx, bcx.fcx.eh_personality());
1067
1068 let normal = bcx.fcx.new_temp_block("normal");
1069 let catchswitch = bcx.fcx.new_temp_block("catchswitch");
1070 let catchpad = bcx.fcx.new_temp_block("catchpad");
1071 let caught = bcx.fcx.new_temp_block("caught");
1072
1073 let func = llvm::get_param(bcx.fcx.llfn, 0);
1074 let data = llvm::get_param(bcx.fcx.llfn, 1);
1075 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1076
1077 // We're generating an IR snippet that looks like:
1078 //
1079 // declare i32 @rust_try(%func, %data, %ptr) {
1080 // %slot = alloca i64*
1081 // invoke %func(%data) to label %normal unwind label %catchswitch
1082 //
1083 // normal:
1084 // ret i32 0
1085 //
1086 // catchswitch:
1087 // %cs = catchswitch within none [%catchpad] unwind to caller
1088 //
1089 // catchpad:
1090 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
1091 // %ptr[0] = %slot[0]
1092 // %ptr[1] = %slot[1]
1093 // catchret from %tok to label %caught
1094 //
1095 // caught:
1096 // ret i32 1
1097 // }
1098 //
1099 // This structure follows the basic usage of throw/try/catch in LLVM.
1100 // For example, compile this C++ snippet to see what LLVM generates:
1101 //
1102 // #include <stdint.h>
1103 //
1104 // int bar(void (*foo)(void), uint64_t *ret) {
1105 // try {
1106 // foo();
1107 // return 0;
1108 // } catch(uint64_t a[2]) {
1109 // ret[0] = a[0];
1110 // ret[1] = a[1];
1111 // return 1;
1112 // }
1113 // }
1114 //
1115 // More information can be found in libstd's seh.rs implementation.
1116 let i64p = Type::i64(ccx).ptr_to();
1117 let slot = Alloca(bcx, i64p, "slot");
1118 Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
1119
1120 Ret(normal, C_i32(ccx, 0), dloc);
1121
1122 let cs = CatchSwitch(catchswitch, None, None, 1);
1123 AddHandler(catchswitch, cs, catchpad.llbb);
1124
1125 let tcx = ccx.tcx();
1126 let tydesc = match tcx.lang_items.msvc_try_filter() {
1127 Some(did) => ::consts::get_static(ccx, did).to_llref(),
1128 None => bug!("msvc_try_filter not defined"),
1129 };
1130 let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
1131 let addr = Load(catchpad, slot);
1132 let arg1 = Load(catchpad, addr);
1133 let val1 = C_i32(ccx, 1);
1134 let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1]));
1135 let local_ptr = BitCast(catchpad, local_ptr, i64p);
1136 Store(catchpad, arg1, local_ptr);
1137 Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1]));
1138 CatchRet(catchpad, tok, caught.llbb);
1139
1140 Ret(caught, C_i32(ccx, 1), dloc);
1141 });
1142
1143 // Note that no invoke is used here because by definition this function
1144 // can't panic (that's what it's catching).
1145 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1146 Store(bcx, ret, dest);
1147 return bcx
1148 }
1149
1150 // Definition of the standard "try" function for Rust using the GNU-like model
1151 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1152 // instructions).
1153 //
1154 // This translation is a little surprising because we always call a shim
1155 // function instead of inlining the call to `invoke` manually here. This is done
1156 // because in LLVM we're only allowed to have one personality per function
1157 // definition. The call to the `try` intrinsic is being inlined into the
1158 // function calling it, and that function may already have other personality
1159 // functions in play. By calling a shim we're guaranteed that our shim will have
1160 // the right personality function.
1161 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1162 func: ValueRef,
1163 data: ValueRef,
1164 local_ptr: ValueRef,
1165 dest: ValueRef,
1166 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1167 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1168 let ccx = bcx.ccx();
1169 let dloc = DebugLoc::None;
1170
1171 // Translates the shims described above:
1172 //
1173 // bcx:
1174 // invoke %func(%args...) normal %normal unwind %catch
1175 //
1176 // normal:
1177 // ret 0
1178 //
1179 // catch:
1180 // (ptr, _) = landingpad
1181 // store ptr, %local_ptr
1182 // ret 1
1183 //
1184 // Note that the `local_ptr` data passed into the `try` intrinsic is
1185 // expected to be `*mut *mut u8` for this to actually work, but that's
1186 // managed by the standard library.
1187
1188 let then = bcx.fcx.new_temp_block("then");
1189 let catch = bcx.fcx.new_temp_block("catch");
1190
1191 let func = llvm::get_param(bcx.fcx.llfn, 0);
1192 let data = llvm::get_param(bcx.fcx.llfn, 1);
1193 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1194 Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
1195 Ret(then, C_i32(ccx, 0), dloc);
1196
1197 // Type indicator for the exception being thrown.
1198 //
1199 // The first value in this tuple is a pointer to the exception object
1200 // being thrown. The second value is a "selector" indicating which of
1201 // the landing pad clauses the exception's type had been matched to.
1202 // rust_try ignores the selector.
1203 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1204 false);
1205 let vals = LandingPad(catch, lpad_ty, bcx.fcx.eh_personality(), 1);
1206 AddClause(catch, vals, C_null(Type::i8p(ccx)));
1207 let ptr = ExtractValue(catch, vals, 0);
1208 Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
1209 Ret(catch, C_i32(ccx, 1), dloc);
1210 });
1211
1212 // Note that no invoke is used here because by definition this function
1213 // can't panic (that's what it's catching).
1214 let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
1215 Store(bcx, ret, dest);
1216 return bcx;
1217 }
1218
1219 // Helper function to give a Block to a closure to translate a shim function.
1220 // This is currently primarily used for the `try` intrinsic functions above.
1221 fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1222 name: &str,
1223 inputs: Vec<Ty<'tcx>>,
1224 output: Ty<'tcx>,
1225 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1226 -> ValueRef {
1227 let ccx = fcx.ccx;
1228 let sig = ty::FnSig {
1229 inputs: inputs,
1230 output: output,
1231 variadic: false,
1232 };
1233 let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
1234
1235 let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
1236 unsafety: hir::Unsafety::Unsafe,
1237 abi: Abi::Rust,
1238 sig: ty::Binder(sig)
1239 }));
1240 let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
1241 let (fcx, block_arena);
1242 block_arena = TypedArena::new();
1243 fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
1244 let bcx = fcx.init(true, None);
1245 trans(bcx);
1246 fcx.cleanup();
1247 llfn
1248 }
1249
1250 // Helper function used to get a handle to the `__rust_try` function used to
1251 // catch exceptions.
1252 //
1253 // This function is only generated once and is then cached.
1254 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1255 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1256 -> ValueRef {
1257 let ccx = fcx.ccx;
1258 if let Some(llfn) = ccx.rust_try_fn().get() {
1259 return llfn;
1260 }
1261
1262 // Define the type up front for the signature of the rust_try function.
1263 let tcx = ccx.tcx();
1264 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1265 let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
1266 unsafety: hir::Unsafety::Unsafe,
1267 abi: Abi::Rust,
1268 sig: ty::Binder(ty::FnSig {
1269 inputs: vec![i8p],
1270 output: tcx.mk_nil(),
1271 variadic: false,
1272 }),
1273 }));
1274 let output = tcx.types.i32;
1275 let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
1276 ccx.rust_try_fn().set(Some(rust_try));
1277 return rust_try
1278 }
1279
1280 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1281 span_err!(a, b, E0511, "{}", c);
1282 }
1283
1284 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1285 (bcx: Block<'blk, 'tcx>,
1286 name: &str,
1287 substs: &'tcx subst::Substs<'tcx>,
1288 callee_ty: Ty<'tcx>,
1289 args: Option<&[P<hir::Expr>]>,
1290 llargs: &[ValueRef],
1291 ret_ty: Ty<'tcx>,
1292 llret_ty: Type,
1293 call_debug_location: DebugLoc,
1294 span: Span) -> ValueRef
1295 {
1296 // macros for error handling:
1297 macro_rules! emit_error {
1298 ($msg: tt) => {
1299 emit_error!($msg, )
1300 };
1301 ($msg: tt, $($fmt: tt)*) => {
1302 span_invalid_monomorphization_error(
1303 bcx.sess(), span,
1304 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1305 $msg),
1306 name, $($fmt)*));
1307 }
1308 }
1309 macro_rules! require {
1310 ($cond: expr, $($fmt: tt)*) => {
1311 if !$cond {
1312 emit_error!($($fmt)*);
1313 return C_nil(bcx.ccx())
1314 }
1315 }
1316 }
1317 macro_rules! require_simd {
1318 ($ty: expr, $position: expr) => {
1319 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1320 }
1321 }
1322
1323
1324
1325 let tcx = bcx.tcx();
1326 let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1327 let sig = tcx.normalize_associated_type(&sig);
1328 let arg_tys = sig.inputs;
1329
1330 // every intrinsic takes a SIMD vector as its first argument
1331 require_simd!(arg_tys[0], "input");
1332 let in_ty = arg_tys[0];
1333 let in_elem = arg_tys[0].simd_type(tcx);
1334 let in_len = arg_tys[0].simd_size(tcx);
1335
1336 let comparison = match name {
1337 "simd_eq" => Some(hir::BiEq),
1338 "simd_ne" => Some(hir::BiNe),
1339 "simd_lt" => Some(hir::BiLt),
1340 "simd_le" => Some(hir::BiLe),
1341 "simd_gt" => Some(hir::BiGt),
1342 "simd_ge" => Some(hir::BiGe),
1343 _ => None
1344 };
1345
1346 if let Some(cmp_op) = comparison {
1347 require_simd!(ret_ty, "return");
1348
1349 let out_len = ret_ty.simd_size(tcx);
1350 require!(in_len == out_len,
1351 "expected return type with length {} (same as input type `{}`), \
1352 found `{}` with length {}",
1353 in_len, in_ty,
1354 ret_ty, out_len);
1355 require!(llret_ty.element_type().kind() == llvm::Integer,
1356 "expected return type with integer elements, found `{}` with non-integer `{}`",
1357 ret_ty,
1358 ret_ty.simd_type(tcx));
1359
1360 return compare_simd_types(bcx,
1361 llargs[0],
1362 llargs[1],
1363 in_elem,
1364 llret_ty,
1365 cmp_op,
1366 call_debug_location)
1367 }
1368
1369 if name.starts_with("simd_shuffle") {
1370 let n: usize = match name["simd_shuffle".len()..].parse() {
1371 Ok(n) => n,
1372 Err(_) => span_bug!(span,
1373 "bad `simd_shuffle` instruction only caught in trans?")
1374 };
1375
1376 require_simd!(ret_ty, "return");
1377
1378 let out_len = ret_ty.simd_size(tcx);
1379 require!(out_len == n,
1380 "expected return type of length {}, found `{}` with length {}",
1381 n, ret_ty, out_len);
1382 require!(in_elem == ret_ty.simd_type(tcx),
1383 "expected return element type `{}` (element of input `{}`), \
1384 found `{}` with element type `{}`",
1385 in_elem, in_ty,
1386 ret_ty, ret_ty.simd_type(tcx));
1387
1388 let total_len = in_len as u64 * 2;
1389
1390 let vector = match args {
1391 Some(args) => {
1392 match consts::const_expr(bcx.ccx(), &args[2], substs, None,
1393 // this should probably help simd error reporting
1394 consts::TrueConst::Yes) {
1395 Ok((vector, _)) => vector,
1396 Err(err) => {
1397 fatal_const_eval_err(bcx.tcx(), err.as_inner(), span,
1398 "shuffle indices");
1399 }
1400 }
1401 }
1402 None => llargs[2]
1403 };
1404
1405 let indices: Option<Vec<_>> = (0..n)
1406 .map(|i| {
1407 let arg_idx = i;
1408 let val = const_get_elt(vector, &[i as libc::c_uint]);
1409 match const_to_opt_uint(val) {
1410 None => {
1411 emit_error!("shuffle index #{} is not a constant", arg_idx);
1412 None
1413 }
1414 Some(idx) if idx >= total_len => {
1415 emit_error!("shuffle index #{} is out of bounds (limit {})",
1416 arg_idx, total_len);
1417 None
1418 }
1419 Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1420 }
1421 })
1422 .collect();
1423 let indices = match indices {
1424 Some(i) => i,
1425 None => return C_null(llret_ty)
1426 };
1427
1428 return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1429 }
1430
1431 if name == "simd_insert" {
1432 require!(in_elem == arg_tys[2],
1433 "expected inserted type `{}` (element of input `{}`), found `{}`",
1434 in_elem, in_ty, arg_tys[2]);
1435 return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1436 }
1437 if name == "simd_extract" {
1438 require!(ret_ty == in_elem,
1439 "expected return type `{}` (element of input `{}`), found `{}`",
1440 in_elem, in_ty, ret_ty);
1441 return ExtractElement(bcx, llargs[0], llargs[1])
1442 }
1443
1444 if name == "simd_cast" {
1445 require_simd!(ret_ty, "return");
1446 let out_len = ret_ty.simd_size(tcx);
1447 require!(in_len == out_len,
1448 "expected return type with length {} (same as input type `{}`), \
1449 found `{}` with length {}",
1450 in_len, in_ty,
1451 ret_ty, out_len);
1452 // casting cares about nominal type, not just structural type
1453 let out_elem = ret_ty.simd_type(tcx);
1454
1455 if in_elem == out_elem { return llargs[0]; }
1456
1457 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1458
1459 let (in_style, in_width) = match in_elem.sty {
1460 // vectors of pointer-sized integers should've been
1461 // disallowed before here, so this unwrap is safe.
1462 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1463 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1464 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1465 _ => (Style::Unsupported, 0)
1466 };
1467 let (out_style, out_width) = match out_elem.sty {
1468 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1469 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1470 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1471 _ => (Style::Unsupported, 0)
1472 };
1473
1474 match (in_style, out_style) {
1475 (Style::Int(in_is_signed), Style::Int(_)) => {
1476 return match in_width.cmp(&out_width) {
1477 Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1478 Ordering::Equal => llargs[0],
1479 Ordering::Less => if in_is_signed {
1480 SExt(bcx, llargs[0], llret_ty)
1481 } else {
1482 ZExt(bcx, llargs[0], llret_ty)
1483 }
1484 }
1485 }
1486 (Style::Int(in_is_signed), Style::Float) => {
1487 return if in_is_signed {
1488 SIToFP(bcx, llargs[0], llret_ty)
1489 } else {
1490 UIToFP(bcx, llargs[0], llret_ty)
1491 }
1492 }
1493 (Style::Float, Style::Int(out_is_signed)) => {
1494 return if out_is_signed {
1495 FPToSI(bcx, llargs[0], llret_ty)
1496 } else {
1497 FPToUI(bcx, llargs[0], llret_ty)
1498 }
1499 }
1500 (Style::Float, Style::Float) => {
1501 return match in_width.cmp(&out_width) {
1502 Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1503 Ordering::Equal => llargs[0],
1504 Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1505 }
1506 }
1507 _ => {/* Unsupported. Fallthrough. */}
1508 }
1509 require!(false,
1510 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1511 in_ty, in_elem,
1512 ret_ty, out_elem);
1513 }
1514 macro_rules! arith {
1515 ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1516 $(
1517 if name == stringify!($name) {
1518 match in_elem.sty {
1519 $(
1520 $(ty::$p(_))|* => {
1521 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1522 }
1523 )*
1524 _ => {},
1525 }
1526 require!(false,
1527 "unsupported operation on `{}` with element `{}`",
1528 in_ty,
1529 in_elem)
1530 })*
1531 }
1532 }
1533 arith! {
1534 simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1535 simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1536 simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1537 simd_div: TyFloat => FDiv;
1538 simd_shl: TyUint, TyInt => Shl;
1539 simd_shr: TyUint => LShr, TyInt => AShr;
1540 simd_and: TyUint, TyInt => And;
1541 simd_or: TyUint, TyInt => Or;
1542 simd_xor: TyUint, TyInt => Xor;
1543 }
1544 span_bug!(span, "unknown SIMD intrinsic");
1545 }
1546
1547 // Returns the width of an int TypeVariant, and if it's signed or not
1548 // Returns None if the type is not an integer
1549 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1550 -> Option<(u64, bool)> {
1551 use rustc::ty::{TyInt, TyUint};
1552 match *sty {
1553 TyInt(t) => Some((match t {
1554 ast::IntTy::Is => {
1555 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1556 "16" => 16,
1557 "32" => 32,
1558 "64" => 64,
1559 tws => bug!("Unsupported target word size for isize: {}", tws),
1560 }
1561 },
1562 ast::IntTy::I8 => 8,
1563 ast::IntTy::I16 => 16,
1564 ast::IntTy::I32 => 32,
1565 ast::IntTy::I64 => 64,
1566 }, true)),
1567 TyUint(t) => Some((match t {
1568 ast::UintTy::Us => {
1569 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1570 "16" => 16,
1571 "32" => 32,
1572 "64" => 64,
1573 tws => bug!("Unsupported target word size for usize: {}", tws),
1574 }
1575 },
1576 ast::UintTy::U8 => 8,
1577 ast::UintTy::U16 => 16,
1578 ast::UintTy::U32 => 32,
1579 ast::UintTy::U64 => 64,
1580 }, false)),
1581 _ => None,
1582 }
1583 }
1584
1585 // Returns the width of a float TypeVariant
1586 // Returns None if the type is not a float
1587 fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
1588 -> Option<u64> {
1589 use rustc::ty::TyFloat;
1590 match *sty {
1591 TyFloat(t) => Some(match t {
1592 ast::FloatTy::F32 => 32,
1593 ast::FloatTy::F64 => 64,
1594 }),
1595 _ => None,
1596 }
1597 }