]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/intrinsic.rs
Imported Upstream version 1.8.0+dfsg1
[rustc.git] / src / librustc_trans / trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use arena::TypedArena;
14 use intrinsics::{self, Intrinsic};
15 use libc;
16 use llvm;
17 use llvm::{ValueRef, TypeKind};
18 use middle::infer;
19 use middle::subst;
20 use middle::subst::FnSpace;
21 use trans::adt;
22 use trans::attributes;
23 use trans::base::*;
24 use trans::build::*;
25 use trans::callee;
26 use trans::cleanup;
27 use trans::cleanup::CleanupMethods;
28 use trans::common::*;
29 use trans::consts;
30 use trans::datum::*;
31 use trans::debuginfo::DebugLoc;
32 use trans::declare;
33 use trans::expr;
34 use trans::glue;
35 use trans::type_of;
36 use trans::machine;
37 use trans::type_::Type;
38 use middle::ty::{self, Ty, TypeFoldable};
39 use trans::Disr;
40 use middle::subst::Substs;
41 use rustc::dep_graph::DepNode;
42 use rustc_front::hir;
43 use syntax::abi::Abi;
44 use syntax::ast;
45 use syntax::ptr::P;
46 use syntax::parse::token;
47
48 use rustc::session::Session;
49 use syntax::codemap::Span;
50
51 use std::cmp::Ordering;
52
53 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Option<ValueRef> {
54 let name = match &*item.name.as_str() {
55 "sqrtf32" => "llvm.sqrt.f32",
56 "sqrtf64" => "llvm.sqrt.f64",
57 "powif32" => "llvm.powi.f32",
58 "powif64" => "llvm.powi.f64",
59 "sinf32" => "llvm.sin.f32",
60 "sinf64" => "llvm.sin.f64",
61 "cosf32" => "llvm.cos.f32",
62 "cosf64" => "llvm.cos.f64",
63 "powf32" => "llvm.pow.f32",
64 "powf64" => "llvm.pow.f64",
65 "expf32" => "llvm.exp.f32",
66 "expf64" => "llvm.exp.f64",
67 "exp2f32" => "llvm.exp2.f32",
68 "exp2f64" => "llvm.exp2.f64",
69 "logf32" => "llvm.log.f32",
70 "logf64" => "llvm.log.f64",
71 "log10f32" => "llvm.log10.f32",
72 "log10f64" => "llvm.log10.f64",
73 "log2f32" => "llvm.log2.f32",
74 "log2f64" => "llvm.log2.f64",
75 "fmaf32" => "llvm.fma.f32",
76 "fmaf64" => "llvm.fma.f64",
77 "fabsf32" => "llvm.fabs.f32",
78 "fabsf64" => "llvm.fabs.f64",
79 "copysignf32" => "llvm.copysign.f32",
80 "copysignf64" => "llvm.copysign.f64",
81 "floorf32" => "llvm.floor.f32",
82 "floorf64" => "llvm.floor.f64",
83 "ceilf32" => "llvm.ceil.f32",
84 "ceilf64" => "llvm.ceil.f64",
85 "truncf32" => "llvm.trunc.f32",
86 "truncf64" => "llvm.trunc.f64",
87 "rintf32" => "llvm.rint.f32",
88 "rintf64" => "llvm.rint.f64",
89 "nearbyintf32" => "llvm.nearbyint.f32",
90 "nearbyintf64" => "llvm.nearbyint.f64",
91 "roundf32" => "llvm.round.f32",
92 "roundf64" => "llvm.round.f64",
93 "assume" => "llvm.assume",
94 _ => return None
95 };
96 Some(ccx.get_intrinsic(&name))
97 }
98
99 pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) {
100 span_err!(a, b, E0512, "{}", msg);
101 }
102
103 /// Performs late verification that intrinsics are used correctly. At present,
104 /// the only intrinsic that needs such verification is `transmute`.
105 pub fn check_intrinsics(ccx: &CrateContext) {
106 let _task = ccx.tcx().dep_graph.in_task(DepNode::IntrinsicUseCheck);
107 let mut last_failing_id = None;
108 for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
109 // Sometimes, a single call to transmute will push multiple
110 // type pairs to test in order to exhaustively test the
111 // possibility around a type parameter. If one of those fails,
112 // there is no sense reporting errors on the others.
113 if last_failing_id == Some(transmute_restriction.id) {
114 continue;
115 }
116
117 debug!("transmute_restriction: {:?}", transmute_restriction);
118
119 assert!(!transmute_restriction.substituted_from.has_param_types());
120 assert!(!transmute_restriction.substituted_to.has_param_types());
121
122 let llfromtype = type_of::sizing_type_of(ccx,
123 transmute_restriction.substituted_from);
124 let lltotype = type_of::sizing_type_of(ccx,
125 transmute_restriction.substituted_to);
126 let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
127 let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
128 if from_type_size != to_type_size {
129 last_failing_id = Some(transmute_restriction.id);
130
131 if transmute_restriction.original_from != transmute_restriction.substituted_from {
132 span_transmute_size_error(ccx.sess(), transmute_restriction.span,
133 &format!("transmute called with differently sized types: \
134 {} (could be {} bit{}) to {} (could be {} bit{})",
135 transmute_restriction.original_from,
136 from_type_size as usize,
137 if from_type_size == 1 {""} else {"s"},
138 transmute_restriction.original_to,
139 to_type_size as usize,
140 if to_type_size == 1 {""} else {"s"}));
141 } else {
142 span_transmute_size_error(ccx.sess(), transmute_restriction.span,
143 &format!("transmute called with differently sized types: \
144 {} ({} bit{}) to {} ({} bit{})",
145 transmute_restriction.original_from,
146 from_type_size as usize,
147 if from_type_size == 1 {""} else {"s"},
148 transmute_restriction.original_to,
149 to_type_size as usize,
150 if to_type_size == 1 {""} else {"s"}));
151 }
152 }
153 }
154 ccx.sess().abort_if_errors();
155 }
156
157 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
158 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
159 /// add them to librustc_trans/trans/context.rs
160 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
161 node: ast::NodeId,
162 callee_ty: Ty<'tcx>,
163 cleanup_scope: cleanup::CustomScopeIndex,
164 args: callee::CallArgs<'a, 'tcx>,
165 dest: expr::Dest,
166 substs: subst::Substs<'tcx>,
167 call_info: NodeIdAndSpan)
168 -> Result<'blk, 'tcx> {
169 let fcx = bcx.fcx;
170 let ccx = fcx.ccx;
171 let tcx = bcx.tcx();
172
173 let _icx = push_ctxt("trans_intrinsic_call");
174
175 let sig = ccx.tcx().erase_late_bound_regions(callee_ty.fn_sig());
176 let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
177 let arg_tys = sig.inputs;
178 let ret_ty = sig.output;
179 let foreign_item = tcx.map.expect_foreign_item(node);
180 let name = foreign_item.name.as_str();
181
182 // For `transmute` we can just trans the input expr directly into dest
183 if name == "transmute" {
184 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
185 match args {
186 callee::ArgExprs(arg_exprs) => {
187 assert_eq!(arg_exprs.len(), 1);
188
189 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
190 *substs.types.get(FnSpace, 1));
191 let llintype = type_of::type_of(ccx, in_type);
192 let llouttype = type_of::type_of(ccx, out_type);
193
194 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
195 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
196
197 // This should be caught by the intrinsicck pass
198 assert_eq!(in_type_size, out_type_size);
199
200 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
201 use llvm::TypeKind::*;
202 match llkind {
203 Half | Float | Double | X86_FP80 | FP128 |
204 PPC_FP128 | Integer | Vector | X86_MMX => true,
205 _ => false
206 }
207 };
208
209 // An approximation to which types can be directly cast via
210 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
211 // but does, importantly, cover SIMD types.
212 let in_kind = llintype.kind();
213 let ret_kind = llret_ty.kind();
214 let bitcast_compatible =
215 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
216 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
217 };
218
219 let dest = if bitcast_compatible {
220 // if we're here, the type is scalar-like (a primitive, a
221 // SIMD type or a pointer), and so can be handled as a
222 // by-value ValueRef and can also be directly bitcast to the
223 // target type. Doing this special case makes conversions
224 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
225 // efficient (these are done efficiently implicitly in C
226 // with the `__m128i` type and so this means Rust doesn't
227 // lose out there).
228 let expr = &arg_exprs[0];
229 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
230 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
231 let val = if datum.kind.is_by_ref() {
232 load_ty(bcx, datum.val, datum.ty)
233 } else {
234 from_arg_ty(bcx, datum.val, datum.ty)
235 };
236
237 let cast_val = BitCast(bcx, val, llret_ty);
238
239 match dest {
240 expr::SaveIn(d) => {
241 // this often occurs in a sequence like `Store(val,
242 // d); val2 = Load(d)`, so disappears easily.
243 Store(bcx, cast_val, d);
244 }
245 expr::Ignore => {}
246 }
247 dest
248 } else {
249 // The types are too complicated to do with a by-value
250 // bitcast, so pointer cast instead. We need to cast the
251 // dest so the types work out.
252 let dest = match dest {
253 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
254 expr::Ignore => expr::Ignore
255 };
256 bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
257 dest
258 };
259
260 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
261 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
262
263 return match dest {
264 expr::SaveIn(d) => Result::new(bcx, d),
265 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
266 };
267
268 }
269
270 _ => {
271 ccx.sess().bug("expected expr as argument for transmute");
272 }
273 }
274 }
275
276 // For `move_val_init` we can evaluate the destination address
277 // (the first argument) and then trans the source value (the
278 // second argument) directly into the resulting destination
279 // address.
280 if name == "move_val_init" {
281 if let callee::ArgExprs(ref exprs) = args {
282 let (dest_expr, source_expr) = if exprs.len() != 2 {
283 ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
284 } else {
285 (&exprs[0], &exprs[1])
286 };
287
288 // evaluate destination address
289 let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
290 let dest_datum = unpack_datum!(
291 bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
292 let dest_datum = unpack_datum!(
293 bcx, dest_datum.to_appropriate_datum(bcx));
294
295 // `expr::trans_into(bcx, expr, dest)` is equiv to
296 //
297 // `trans(bcx, expr).store_to_dest(dest)`,
298 //
299 // which for `dest == expr::SaveIn(addr)`, is equivalent to:
300 //
301 // `trans(bcx, expr).store_to(bcx, addr)`.
302 let lldest = expr::Dest::SaveIn(dest_datum.val);
303 bcx = expr::trans_into(bcx, source_expr, lldest);
304
305 let llresult = C_nil(ccx);
306 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
307
308 return Result::new(bcx, llresult);
309 } else {
310 ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic");
311 }
312 }
313
314 let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
315
316 // For `try` we need some custom control flow
317 if &name[..] == "try" {
318 if let callee::ArgExprs(ref exprs) = args {
319 let (func, data, local_ptr) = if exprs.len() != 3 {
320 ccx.sess().bug("expected three exprs as arguments for \
321 `try` intrinsic");
322 } else {
323 (&exprs[0], &exprs[1], &exprs[2])
324 };
325
326 // translate arguments
327 let func = unpack_datum!(bcx, expr::trans(bcx, func));
328 let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
329 let data = unpack_datum!(bcx, expr::trans(bcx, data));
330 let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
331 let local_ptr = unpack_datum!(bcx, expr::trans(bcx, local_ptr));
332 let local_ptr = local_ptr.to_rvalue_datum(bcx, "local_ptr");
333 let local_ptr = unpack_datum!(bcx, local_ptr);
334
335 let dest = match dest {
336 expr::SaveIn(d) => d,
337 expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8),
338 "try_result"),
339 };
340
341 // do the invoke
342 bcx = try_intrinsic(bcx, func.val, data.val, local_ptr.val, dest,
343 call_debug_location);
344
345 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
346 return Result::new(bcx, dest);
347 } else {
348 ccx.sess().bug("expected two exprs as arguments for \
349 `try` intrinsic");
350 }
351 }
352
353 // save the actual AST arguments for later (some places need to do
354 // const-evaluation on them)
355 let expr_arguments = match args {
356 callee::ArgExprs(args) => Some(args),
357 _ => None,
358 };
359
360 // Push the arguments.
361 let mut llargs = Vec::new();
362 bcx = callee::trans_args(bcx,
363 args,
364 callee_ty,
365 &mut llargs,
366 cleanup::CustomScope(cleanup_scope),
367 false,
368 Abi::RustIntrinsic);
369
370 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
371
372 // These are the only intrinsic functions that diverge.
373 if name == "abort" {
374 let llfn = ccx.get_intrinsic(&("llvm.trap"));
375 Call(bcx, llfn, &[], None, call_debug_location);
376 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
377 Unreachable(bcx);
378 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
379 } else if &name[..] == "unreachable" {
380 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
381 Unreachable(bcx);
382 return Result::new(bcx, C_nil(ccx));
383 }
384
385 let ret_ty = match ret_ty {
386 ty::FnConverging(ret_ty) => ret_ty,
387 ty::FnDiverging => unreachable!()
388 };
389
390 let llret_ty = type_of::type_of(ccx, ret_ty);
391
392 // Get location to store the result. If the user does
393 // not care about the result, just make a stack slot
394 let llresult = match dest {
395 expr::SaveIn(d) => d,
396 expr::Ignore => {
397 if !type_is_zero_size(ccx, ret_ty) {
398 let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
399 call_lifetime_start(bcx, llresult);
400 llresult
401 } else {
402 C_undef(llret_ty.ptr_to())
403 }
404 }
405 };
406
407 let simple = get_simple_intrinsic(ccx, &foreign_item);
408 let llval = match (simple, &*name) {
409 (Some(llfn), _) => {
410 Call(bcx, llfn, &llargs, None, call_debug_location)
411 }
412 (_, "breakpoint") => {
413 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
414 Call(bcx, llfn, &[], None, call_debug_location)
415 }
416 (_, "size_of") => {
417 let tp_ty = *substs.types.get(FnSpace, 0);
418 let lltp_ty = type_of::type_of(ccx, tp_ty);
419 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
420 }
421 (_, "size_of_val") => {
422 let tp_ty = *substs.types.get(FnSpace, 0);
423 if !type_is_sized(tcx, tp_ty) {
424 let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
425 llsize
426 } else {
427 let lltp_ty = type_of::type_of(ccx, tp_ty);
428 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
429 }
430 }
431 (_, "min_align_of") => {
432 let tp_ty = *substs.types.get(FnSpace, 0);
433 C_uint(ccx, type_of::align_of(ccx, tp_ty))
434 }
435 (_, "min_align_of_val") => {
436 let tp_ty = *substs.types.get(FnSpace, 0);
437 if !type_is_sized(tcx, tp_ty) {
438 let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
439 llalign
440 } else {
441 C_uint(ccx, type_of::align_of(ccx, tp_ty))
442 }
443 }
444 (_, "pref_align_of") => {
445 let tp_ty = *substs.types.get(FnSpace, 0);
446 let lltp_ty = type_of::type_of(ccx, tp_ty);
447 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
448 }
449 (_, "drop_in_place") => {
450 let tp_ty = *substs.types.get(FnSpace, 0);
451 let ptr = if type_is_sized(tcx, tp_ty) {
452 llargs[0]
453 } else {
454 let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
455 Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
456 Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
457 fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
458 scratch.val
459 };
460 glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
461 C_nil(ccx)
462 }
463 (_, "type_name") => {
464 let tp_ty = *substs.types.get(FnSpace, 0);
465 let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
466 C_str_slice(ccx, ty_name)
467 }
468 (_, "type_id") => {
469 let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
470 &ccx.link_meta().crate_hash);
471 C_u64(ccx, hash)
472 }
473 (_, "init_dropped") => {
474 let tp_ty = *substs.types.get(FnSpace, 0);
475 if !return_type_is_void(ccx, tp_ty) {
476 drop_done_fill_mem(bcx, llresult, tp_ty);
477 }
478 C_nil(ccx)
479 }
480 (_, "init") => {
481 let tp_ty = *substs.types.get(FnSpace, 0);
482 if !return_type_is_void(ccx, tp_ty) {
483 // Just zero out the stack slot. (See comment on base::memzero for explanation)
484 init_zero_mem(bcx, llresult, tp_ty);
485 }
486 C_nil(ccx)
487 }
488 // Effectively no-ops
489 (_, "uninit") | (_, "forget") => {
490 C_nil(ccx)
491 }
492 (_, "needs_drop") => {
493 let tp_ty = *substs.types.get(FnSpace, 0);
494
495 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
496 }
497 (_, "offset") => {
498 let ptr = llargs[0];
499 let offset = llargs[1];
500 InBoundsGEP(bcx, ptr, &[offset])
501 }
502 (_, "arith_offset") => {
503 let ptr = llargs[0];
504 let offset = llargs[1];
505 GEP(bcx, ptr, &[offset])
506 }
507
508 (_, "copy_nonoverlapping") => {
509 copy_intrinsic(bcx,
510 false,
511 false,
512 *substs.types.get(FnSpace, 0),
513 llargs[1],
514 llargs[0],
515 llargs[2],
516 call_debug_location)
517 }
518 (_, "copy") => {
519 copy_intrinsic(bcx,
520 true,
521 false,
522 *substs.types.get(FnSpace, 0),
523 llargs[1],
524 llargs[0],
525 llargs[2],
526 call_debug_location)
527 }
528 (_, "write_bytes") => {
529 memset_intrinsic(bcx,
530 false,
531 *substs.types.get(FnSpace, 0),
532 llargs[0],
533 llargs[1],
534 llargs[2],
535 call_debug_location)
536 }
537
538 (_, "volatile_copy_nonoverlapping_memory") => {
539 copy_intrinsic(bcx,
540 false,
541 true,
542 *substs.types.get(FnSpace, 0),
543 llargs[0],
544 llargs[1],
545 llargs[2],
546 call_debug_location)
547 }
548 (_, "volatile_copy_memory") => {
549 copy_intrinsic(bcx,
550 true,
551 true,
552 *substs.types.get(FnSpace, 0),
553 llargs[0],
554 llargs[1],
555 llargs[2],
556 call_debug_location)
557 }
558 (_, "volatile_set_memory") => {
559 memset_intrinsic(bcx,
560 true,
561 *substs.types.get(FnSpace, 0),
562 llargs[0],
563 llargs[1],
564 llargs[2],
565 call_debug_location)
566 }
567 (_, "volatile_load") => {
568 let tp_ty = *substs.types.get(FnSpace, 0);
569 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
570 let load = VolatileLoad(bcx, ptr);
571 unsafe {
572 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
573 }
574 to_arg_ty(bcx, load, tp_ty)
575 },
576 (_, "volatile_store") => {
577 let tp_ty = *substs.types.get(FnSpace, 0);
578 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
579 let val = from_arg_ty(bcx, llargs[1], tp_ty);
580 let store = VolatileStore(bcx, val, ptr);
581 unsafe {
582 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
583 }
584 C_nil(ccx)
585 },
586
587 (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
588 (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
589 (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
590 (_, "unchecked_div") | (_, "unchecked_rem") => {
591 let sty = &arg_tys[0].sty;
592 match int_type_width_signed(sty, ccx) {
593 Some((width, signed)) =>
594 match &*name {
595 "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
596 llargs[0], call_debug_location),
597 "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
598 llargs[0], call_debug_location),
599 "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
600 &llargs, None, call_debug_location),
601 "bswap" => {
602 if width == 8 {
603 llargs[0] // byte swap a u8/i8 is just a no-op
604 } else {
605 Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
606 &llargs, None, call_debug_location)
607 }
608 }
609 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
610 let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
611 if signed { 's' } else { 'u' },
612 &name[..3], width);
613 with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
614 call_debug_location)
615 },
616 "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
617 "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
618 "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
619 "unchecked_div" =>
620 if signed {
621 SDiv(bcx, llargs[0], llargs[1], call_debug_location)
622 } else {
623 UDiv(bcx, llargs[0], llargs[1], call_debug_location)
624 },
625 "unchecked_rem" =>
626 if signed {
627 SRem(bcx, llargs[0], llargs[1], call_debug_location)
628 } else {
629 URem(bcx, llargs[0], llargs[1], call_debug_location)
630 },
631 _ => unreachable!(),
632 },
633 None => {
634 span_invalid_monomorphization_error(
635 tcx.sess, call_info.span,
636 &format!("invalid monomorphization of `{}` intrinsic: \
637 expected basic integer type, found `{}`", name, sty));
638 C_null(llret_ty)
639 }
640 }
641
642 },
643
644
645 (_, "return_address") => {
646 if !fcx.caller_expects_out_pointer {
647 span_err!(tcx.sess, call_info.span, E0510,
648 "invalid use of `return_address` intrinsic: function \
649 does not use out pointer");
650 C_null(Type::i8p(ccx))
651 } else {
652 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
653 }
654 }
655
656 (_, "discriminant_value") => {
657 let val_ty = substs.types.get(FnSpace, 0);
658 match val_ty.sty {
659 ty::TyEnum(..) => {
660 let repr = adt::represent_type(ccx, *val_ty);
661 adt::trans_get_discr(bcx, &repr, llargs[0],
662 Some(llret_ty), true)
663 }
664 _ => C_null(llret_ty)
665 }
666 }
667 (_, name) if name.starts_with("simd_") => {
668 generic_simd_intrinsic(bcx, name,
669 substs,
670 callee_ty,
671 expr_arguments,
672 &llargs,
673 ret_ty, llret_ty,
674 call_debug_location,
675 call_info)
676 }
677 // This requires that atomic intrinsics follow a specific naming pattern:
678 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
679 (_, name) if name.starts_with("atomic_") => {
680 let split: Vec<&str> = name.split('_').collect();
681
682 let (order, failorder) = match split.len() {
683 2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
684 3 => match split[2] {
685 "unordered" => (llvm::Unordered, llvm::Unordered),
686 "relaxed" => (llvm::Monotonic, llvm::Monotonic),
687 "acq" => (llvm::Acquire, llvm::Acquire),
688 "rel" => (llvm::Release, llvm::Monotonic),
689 "acqrel" => (llvm::AcquireRelease, llvm::Acquire),
690 "failrelaxed" if split[1] == "cxchg" || split[1] == "cxchgweak" =>
691 (llvm::SequentiallyConsistent, llvm::Monotonic),
692 "failacq" if split[1] == "cxchg" || split[1] == "cxchgweak" =>
693 (llvm::SequentiallyConsistent, llvm::Acquire),
694 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
695 },
696 4 => match (split[2], split[3]) {
697 ("acq", "failrelaxed") if split[1] == "cxchg" || split[1] == "cxchgweak" =>
698 (llvm::Acquire, llvm::Monotonic),
699 ("acqrel", "failrelaxed") if split[1] == "cxchg" || split[1] == "cxchgweak" =>
700 (llvm::AcquireRelease, llvm::Monotonic),
701 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
702 },
703 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
704 };
705
706 match split[1] {
707 "cxchg" => {
708 let tp_ty = *substs.types.get(FnSpace, 0);
709 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
710 let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
711 let src = from_arg_ty(bcx, llargs[2], tp_ty);
712 let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::False);
713 ExtractValue(bcx, res, 0)
714 }
715
716 "cxchgweak" => {
717 let tp_ty = *substs.types.get(FnSpace, 0);
718 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
719 let cmp = from_arg_ty(bcx, llargs[1], tp_ty);
720 let src = from_arg_ty(bcx, llargs[2], tp_ty);
721 let val = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::True);
722 let result = ExtractValue(bcx, val, 0);
723 let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
724 Store(bcx, result, StructGEP(bcx, llresult, 0));
725 Store(bcx, success, StructGEP(bcx, llresult, 1));
726 C_nil(ccx)
727 }
728
729 "load" => {
730 let tp_ty = *substs.types.get(FnSpace, 0);
731 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
732 to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
733 }
734 "store" => {
735 let tp_ty = *substs.types.get(FnSpace, 0);
736 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
737 let val = from_arg_ty(bcx, llargs[1], tp_ty);
738 AtomicStore(bcx, val, ptr, order);
739 C_nil(ccx)
740 }
741
742 "fence" => {
743 AtomicFence(bcx, order, llvm::CrossThread);
744 C_nil(ccx)
745 }
746
747 "singlethreadfence" => {
748 AtomicFence(bcx, order, llvm::SingleThread);
749 C_nil(ccx)
750 }
751
752 // These are all AtomicRMW ops
753 op => {
754 let atom_op = match op {
755 "xchg" => llvm::AtomicXchg,
756 "xadd" => llvm::AtomicAdd,
757 "xsub" => llvm::AtomicSub,
758 "and" => llvm::AtomicAnd,
759 "nand" => llvm::AtomicNand,
760 "or" => llvm::AtomicOr,
761 "xor" => llvm::AtomicXor,
762 "max" => llvm::AtomicMax,
763 "min" => llvm::AtomicMin,
764 "umax" => llvm::AtomicUMax,
765 "umin" => llvm::AtomicUMin,
766 _ => ccx.sess().fatal("unknown atomic operation")
767 };
768
769 let tp_ty = *substs.types.get(FnSpace, 0);
770 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
771 let val = from_arg_ty(bcx, llargs[1], tp_ty);
772 AtomicRMW(bcx, atom_op, ptr, val, order)
773 }
774 }
775
776 }
777
778 (_, _) => {
779 let intr = match Intrinsic::find(tcx, &name) {
780 Some(intr) => intr,
781 None => ccx.sess().span_bug(foreign_item.span,
782 &format!("unknown intrinsic '{}'", name)),
783 };
784 fn one<T>(x: Vec<T>) -> T {
785 assert_eq!(x.len(), 1);
786 x.into_iter().next().unwrap()
787 }
788 fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
789 any_changes_needed: &mut bool) -> Vec<Type> {
790 use intrinsics::Type::*;
791 match *t {
792 Void => vec![Type::void(ccx)],
793 Integer(_signed, width, llvm_width) => {
794 *any_changes_needed |= width != llvm_width;
795 vec![Type::ix(ccx, llvm_width as u64)]
796 }
797 Float(x) => {
798 match x {
799 32 => vec![Type::f32(ccx)],
800 64 => vec![Type::f64(ccx)],
801 _ => unreachable!()
802 }
803 }
804 Pointer(ref t, ref llvm_elem, _const) => {
805 *any_changes_needed |= llvm_elem.is_some();
806
807 let t = llvm_elem.as_ref().unwrap_or(t);
808 let elem = one(ty_to_type(ccx, t,
809 any_changes_needed));
810 vec![elem.ptr_to()]
811 }
812 Vector(ref t, ref llvm_elem, length) => {
813 *any_changes_needed |= llvm_elem.is_some();
814
815 let t = llvm_elem.as_ref().unwrap_or(t);
816 let elem = one(ty_to_type(ccx, t,
817 any_changes_needed));
818 vec![Type::vector(&elem,
819 length as u64)]
820 }
821 Aggregate(false, ref contents) => {
822 let elems = contents.iter()
823 .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
824 .collect::<Vec<_>>();
825 vec![Type::struct_(ccx, &elems, false)]
826 }
827 Aggregate(true, ref contents) => {
828 *any_changes_needed = true;
829 contents.iter()
830 .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
831 .collect()
832 }
833 }
834 }
835
836 // This allows an argument list like `foo, (bar, baz),
837 // qux` to be converted into `foo, bar, baz, qux`, integer
838 // arguments to be truncated as needed and pointers to be
839 // cast.
840 fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
841 t: &intrinsics::Type,
842 arg_type: Ty<'tcx>,
843 llarg: ValueRef)
844 -> Vec<ValueRef>
845 {
846 match *t {
847 intrinsics::Type::Aggregate(true, ref contents) => {
848 // We found a tuple that needs squishing! So
849 // run over the tuple and load each field.
850 //
851 // This assumes the type is "simple", i.e. no
852 // destructors, and the contents are SIMD
853 // etc.
854 assert!(!bcx.fcx.type_needs_drop(arg_type));
855
856 let repr = adt::represent_type(bcx.ccx(), arg_type);
857 let repr_ptr = &repr;
858 let arg = adt::MaybeSizedValue::sized(llarg);
859 (0..contents.len())
860 .map(|i| {
861 Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
862 })
863 .collect()
864 }
865 intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
866 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
867 vec![PointerCast(bcx, llarg,
868 llvm_elem.ptr_to())]
869 }
870 intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
871 let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
872 vec![BitCast(bcx, llarg,
873 Type::vector(&llvm_elem, length as u64))]
874 }
875 intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
876 // the LLVM intrinsic uses a smaller integer
877 // size than the C intrinsic's signature, so
878 // we have to trim it down here.
879 vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
880 }
881 _ => vec![llarg],
882 }
883 }
884
885
886 let mut any_changes_needed = false;
887 let inputs = intr.inputs.iter()
888 .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
889 .collect::<Vec<_>>();
890
891 let mut out_changes = false;
892 let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
893 // outputting a flattened aggregate is nonsense
894 assert!(!out_changes);
895
896 let llargs = if !any_changes_needed {
897 // no aggregates to flatten, so no change needed
898 llargs
899 } else {
900 // there are some aggregates that need to be flattened
901 // in the LLVM call, so we need to run over the types
902 // again to find them and extract the arguments
903 intr.inputs.iter()
904 .zip(&llargs)
905 .zip(&arg_tys)
906 .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
907 .collect()
908 };
909 assert_eq!(inputs.len(), llargs.len());
910
911 let val = match intr.definition {
912 intrinsics::IntrinsicDef::Named(name) => {
913 let f = declare::declare_cfn(ccx,
914 name,
915 Type::func(&inputs, &outputs),
916 tcx.mk_nil());
917 Call(bcx, f, &llargs, None, call_debug_location)
918 }
919 };
920
921 match intr.output {
922 intrinsics::Type::Aggregate(flatten, ref elems) => {
923 // the output is a tuple so we need to munge it properly
924 assert!(!flatten);
925
926 for i in 0..elems.len() {
927 let val = ExtractValue(bcx, val, i);
928 Store(bcx, val, StructGEP(bcx, llresult, i));
929 }
930 C_nil(ccx)
931 }
932 _ => val,
933 }
934 }
935 };
936
937 if val_ty(llval) != Type::void(ccx) &&
938 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
939 store_ty(bcx, llval, llresult, ret_ty);
940 }
941
942 // If we made a temporary stack slot, let's clean it up
943 match dest {
944 expr::Ignore => {
945 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
946 call_lifetime_end(bcx, llresult);
947 }
948 expr::SaveIn(_) => {}
949 }
950
951 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
952
953 Result::new(bcx, llresult)
954 }
955
956 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
957 allow_overlap: bool,
958 volatile: bool,
959 tp_ty: Ty<'tcx>,
960 dst: ValueRef,
961 src: ValueRef,
962 count: ValueRef,
963 call_debug_location: DebugLoc)
964 -> ValueRef {
965 let ccx = bcx.ccx();
966 let lltp_ty = type_of::type_of(ccx, tp_ty);
967 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
968 let size = machine::llsize_of(ccx, lltp_ty);
969 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
970
971 let operation = if allow_overlap {
972 "memmove"
973 } else {
974 "memcpy"
975 };
976
977 let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
978
979 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
980 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
981 let llfn = ccx.get_intrinsic(&name);
982
983 Call(bcx,
984 llfn,
985 &[dst_ptr,
986 src_ptr,
987 Mul(bcx, size, count, DebugLoc::None),
988 align,
989 C_bool(ccx, volatile)],
990 None,
991 call_debug_location)
992 }
993
994 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
995 volatile: bool,
996 tp_ty: Ty<'tcx>,
997 dst: ValueRef,
998 val: ValueRef,
999 count: ValueRef,
1000 call_debug_location: DebugLoc)
1001 -> ValueRef {
1002 let ccx = bcx.ccx();
1003 let lltp_ty = type_of::type_of(ccx, tp_ty);
1004 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
1005 let size = machine::llsize_of(ccx, lltp_ty);
1006 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
1007
1008 let name = format!("llvm.memset.p0i8.i{}", int_size);
1009
1010 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
1011 let llfn = ccx.get_intrinsic(&name);
1012
1013 Call(bcx,
1014 llfn,
1015 &[dst_ptr,
1016 val,
1017 Mul(bcx, size, count, DebugLoc::None),
1018 align,
1019 C_bool(ccx, volatile)],
1020 None,
1021 call_debug_location)
1022 }
1023
1024 fn count_zeros_intrinsic(bcx: Block,
1025 name: &str,
1026 val: ValueRef,
1027 call_debug_location: DebugLoc)
1028 -> ValueRef {
1029 let y = C_bool(bcx.ccx(), false);
1030 let llfn = bcx.ccx().get_intrinsic(&name);
1031 Call(bcx, llfn, &[val, y], None, call_debug_location)
1032 }
1033
1034 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1035 name: &str,
1036 a: ValueRef,
1037 b: ValueRef,
1038 out: ValueRef,
1039 call_debug_location: DebugLoc)
1040 -> ValueRef {
1041 let llfn = bcx.ccx().get_intrinsic(&name);
1042
1043 // Convert `i1` to a `bool`, and write it to the out parameter
1044 let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
1045 let result = ExtractValue(bcx, val, 0);
1046 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
1047 Store(bcx, result, StructGEP(bcx, out, 0));
1048 Store(bcx, overflow, StructGEP(bcx, out, 1));
1049
1050 C_nil(bcx.ccx())
1051 }
1052
1053 fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1054 func: ValueRef,
1055 data: ValueRef,
1056 local_ptr: ValueRef,
1057 dest: ValueRef,
1058 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1059 if bcx.sess().no_landing_pads() {
1060 Call(bcx, func, &[data], None, dloc);
1061 Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
1062 bcx
1063 } else if wants_msvc_seh(bcx.sess()) {
1064 trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
1065 } else {
1066 trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
1067 }
1068 }
1069
1070 // MSVC's definition of the `rust_try` function.
1071 //
1072 // This implementation uses the new exception handling instructions in LLVM
1073 // which have support in LLVM for SEH on MSVC targets. Although these
1074 // instructions are meant to work for all targets, as of the time of this
1075 // writing, however, LLVM does not recommend the usage of these new instructions
1076 // as the old ones are still more optimized.
1077 fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1078 func: ValueRef,
1079 data: ValueRef,
1080 local_ptr: ValueRef,
1081 dest: ValueRef,
1082 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1083 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1084 let ccx = bcx.ccx();
1085 let dloc = DebugLoc::None;
1086
1087 SetPersonalityFn(bcx, bcx.fcx.eh_personality());
1088
1089 let normal = bcx.fcx.new_temp_block("normal");
1090 let catchswitch = bcx.fcx.new_temp_block("catchswitch");
1091 let catchpad = bcx.fcx.new_temp_block("catchpad");
1092 let caught = bcx.fcx.new_temp_block("caught");
1093
1094 let func = llvm::get_param(bcx.fcx.llfn, 0);
1095 let data = llvm::get_param(bcx.fcx.llfn, 1);
1096 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1097
1098 // We're generating an IR snippet that looks like:
1099 //
1100 // declare i32 @rust_try(%func, %data, %ptr) {
1101 // %slot = alloca i8*
1102 // call @llvm.localescape(%slot)
1103 // store %ptr, %slot
1104 // invoke %func(%data) to label %normal unwind label %catchswitch
1105 //
1106 // normal:
1107 // ret i32 0
1108 //
1109 // catchswitch:
1110 // %cs = catchswitch within none [%catchpad] unwind to caller
1111 //
1112 // catchpad:
1113 // %tok = catchpad within %cs [%rust_try_filter]
1114 // catchret from %tok to label %caught
1115 //
1116 // caught:
1117 // ret i32 1
1118 // }
1119 //
1120 // This structure follows the basic usage of the instructions in LLVM
1121 // (see their documentation/test cases for examples), but a
1122 // perhaps-surprising part here is the usage of the `localescape`
1123 // intrinsic. This is used to allow the filter function (also generated
1124 // here) to access variables on the stack of this intrinsic. This
1125 // ability enables us to transfer information about the exception being
1126 // thrown to this point, where we're catching the exception.
1127 //
1128 // More information can be found in libstd's seh.rs implementation.
1129 let slot = Alloca(bcx, Type::i8p(ccx), "slot");
1130 let localescape = ccx.get_intrinsic(&"llvm.localescape");
1131 Call(bcx, localescape, &[slot], None, dloc);
1132 Store(bcx, local_ptr, slot);
1133 Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, None, dloc);
1134
1135 Ret(normal, C_i32(ccx, 0), dloc);
1136
1137 let cs = CatchSwitch(catchswitch, None, None, 1);
1138 AddHandler(catchswitch, cs, catchpad.llbb);
1139
1140 let filter = generate_filter_fn(bcx.fcx, bcx.fcx.llfn);
1141 let filter = BitCast(catchpad, filter, Type::i8p(ccx));
1142 let tok = CatchPad(catchpad, cs, &[filter]);
1143 CatchRet(catchpad, tok, caught.llbb);
1144
1145 Ret(caught, C_i32(ccx, 1), dloc);
1146 });
1147
1148 // Note that no invoke is used here because by definition this function
1149 // can't panic (that's what it's catching).
1150 let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc);
1151 Store(bcx, ret, dest);
1152 return bcx
1153 }
1154
1155 // Definition of the standard "try" function for Rust using the GNU-like model
1156 // of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
1157 // instructions).
1158 //
1159 // This translation is a little surprising because we always call a shim
1160 // function instead of inlining the call to `invoke` manually here. This is done
1161 // because in LLVM we're only allowed to have one personality per function
1162 // definition. The call to the `try` intrinsic is being inlined into the
1163 // function calling it, and that function may already have other personality
1164 // functions in play. By calling a shim we're guaranteed that our shim will have
1165 // the right personality function.
1166 fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1167 func: ValueRef,
1168 data: ValueRef,
1169 local_ptr: ValueRef,
1170 dest: ValueRef,
1171 dloc: DebugLoc) -> Block<'blk, 'tcx> {
1172 let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
1173 let ccx = bcx.ccx();
1174 let dloc = DebugLoc::None;
1175
1176 // Translates the shims described above:
1177 //
1178 // bcx:
1179 // invoke %func(%args...) normal %normal unwind %catch
1180 //
1181 // normal:
1182 // ret 0
1183 //
1184 // catch:
1185 // (ptr, _) = landingpad
1186 // store ptr, %local_ptr
1187 // ret 1
1188 //
1189 // Note that the `local_ptr` data passed into the `try` intrinsic is
1190 // expected to be `*mut *mut u8` for this to actually work, but that's
1191 // managed by the standard library.
1192
1193 attributes::emit_uwtable(bcx.fcx.llfn, true);
1194 let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() {
1195 Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1196 bcx.fcx.param_substs).val,
1197 None => bcx.tcx().sess.bug("eh_personality_catch not defined"),
1198 };
1199
1200 let then = bcx.fcx.new_temp_block("then");
1201 let catch = bcx.fcx.new_temp_block("catch");
1202
1203 let func = llvm::get_param(bcx.fcx.llfn, 0);
1204 let data = llvm::get_param(bcx.fcx.llfn, 1);
1205 let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
1206 Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc);
1207 Ret(then, C_i32(ccx, 0), dloc);
1208
1209 // Type indicator for the exception being thrown.
1210 //
1211 // The first value in this tuple is a pointer to the exception object
1212 // being thrown. The second value is a "selector" indicating which of
1213 // the landing pad clauses the exception's type had been matched to.
1214 // rust_try ignores the selector.
1215 let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
1216 false);
1217 let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
1218 AddClause(catch, vals, C_null(Type::i8p(ccx)));
1219 let ptr = ExtractValue(catch, vals, 0);
1220 Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
1221 Ret(catch, C_i32(ccx, 1), dloc);
1222 });
1223
1224 // Note that no invoke is used here because by definition this function
1225 // can't panic (that's what it's catching).
1226 let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc);
1227 Store(bcx, ret, dest);
1228 return bcx;
1229 }
1230
1231 // Helper function to give a Block to a closure to translate a shim function.
1232 // This is currently primarily used for the `try` intrinsic functions above.
1233 fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1234 name: &str,
1235 ty: Ty<'tcx>,
1236 output: ty::FnOutput<'tcx>,
1237 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1238 -> ValueRef {
1239 let ccx = fcx.ccx;
1240 let llfn = declare::define_internal_rust_fn(ccx, name, ty);
1241 let (fcx, block_arena);
1242 block_arena = TypedArena::new();
1243 fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false,
1244 output, ccx.tcx().mk_substs(Substs::trans_empty()),
1245 None, &block_arena);
1246 let bcx = init_function(&fcx, true, output);
1247 trans(bcx);
1248 fcx.cleanup();
1249 return llfn
1250 }
1251
1252 // Helper function used to get a handle to the `__rust_try` function used to
1253 // catch exceptions.
1254 //
1255 // This function is only generated once and is then cached.
1256 fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1257 trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
1258 -> ValueRef {
1259 let ccx = fcx.ccx;
1260 if let Some(llfn) = *ccx.rust_try_fn().borrow() {
1261 return llfn
1262 }
1263
1264 // Define the type up front for the signature of the rust_try function.
1265 let tcx = ccx.tcx();
1266 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1267 let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1268 unsafety: hir::Unsafety::Unsafe,
1269 abi: Abi::Rust,
1270 sig: ty::Binder(ty::FnSig {
1271 inputs: vec![i8p],
1272 output: ty::FnOutput::FnConverging(tcx.mk_nil()),
1273 variadic: false,
1274 }),
1275 });
1276 let fn_ty = tcx.mk_fn(None, fn_ty);
1277 let output = ty::FnOutput::FnConverging(tcx.types.i32);
1278 let try_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1279 unsafety: hir::Unsafety::Unsafe,
1280 abi: Abi::Rust,
1281 sig: ty::Binder(ty::FnSig {
1282 inputs: vec![fn_ty, i8p, i8p],
1283 output: output,
1284 variadic: false,
1285 }),
1286 });
1287 let rust_try = gen_fn(fcx, "__rust_try", tcx.mk_fn(None, try_fn_ty), output,
1288 trans);
1289 *ccx.rust_try_fn().borrow_mut() = Some(rust_try);
1290 return rust_try
1291 }
1292
1293 // For MSVC-style exceptions (SEH), the compiler generates a filter function
1294 // which is used to determine whether an exception is being caught (e.g. if it's
1295 // a Rust exception or some other).
1296 //
1297 // This function is used to generate said filter function. The shim generated
1298 // here is actually just a thin wrapper to call the real implementation in the
1299 // standard library itself. For reasons as to why, see seh.rs in the standard
1300 // library.
1301 fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1302 rust_try_fn: ValueRef)
1303 -> ValueRef {
1304 let ccx = fcx.ccx;
1305 let tcx = ccx.tcx();
1306 let dloc = DebugLoc::None;
1307
1308 let rust_try_filter = match ccx.tcx().lang_items.msvc_try_filter() {
1309 Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0),
1310 fcx.param_substs).val,
1311 None => ccx.sess().bug("msvc_try_filter not defined"),
1312 };
1313
1314 let output = ty::FnOutput::FnConverging(tcx.types.i32);
1315 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1316
1317 let frameaddress = ccx.get_intrinsic(&"llvm.frameaddress");
1318 let recoverfp = ccx.get_intrinsic(&"llvm.x86.seh.recoverfp");
1319 let localrecover = ccx.get_intrinsic(&"llvm.localrecover");
1320
1321 // On all platforms, once we have the EXCEPTION_POINTERS handle as well as
1322 // the base pointer, we follow the standard layout of:
1323 //
1324 // block:
1325 // %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
1326 // %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
1327 // %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
1328 // ret i32 %ret
1329 //
1330 // The recoverfp intrinsic is used to recover the frame frame pointer of the
1331 // `rust_try_fn` function, which is then in turn passed to the
1332 // `localrecover` intrinsic (pairing with the `localescape` intrinsic
1333 // mentioned above). Putting all this together means that we now have a
1334 // handle to the arguments passed into the `try` function, allowing writing
1335 // to the stack over there.
1336 //
1337 // For more info, see seh.rs in the standard library.
1338 let do_trans = |bcx: Block, ehptrs, base_pointer| {
1339 let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
1340 let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer],
1341 None, dloc);
1342 let arg = Call(bcx, localrecover,
1343 &[rust_try_fn, parentfp, C_i32(ccx, 0)], None, dloc);
1344 let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], None, dloc);
1345 Ret(bcx, ret, dloc);
1346 };
1347
1348 if ccx.tcx().sess.target.target.arch == "x86" {
1349 // On x86 the filter function doesn't actually receive any arguments.
1350 // Instead the %ebp register contains some contextual information.
1351 //
1352 // Unfortunately I don't know of any great documentation as to what's
1353 // going on here, all I can say is that there's a few tests cases in
1354 // LLVM's test suite which follow this pattern of instructions, so we
1355 // just do the same.
1356 let filter_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1357 unsafety: hir::Unsafety::Unsafe,
1358 abi: Abi::Rust,
1359 sig: ty::Binder(ty::FnSig {
1360 inputs: vec![],
1361 output: output,
1362 variadic: false,
1363 }),
1364 });
1365 let filter_fn_ty = tcx.mk_fn(None, filter_fn_ty);
1366 gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
1367 let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], None, dloc);
1368 let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
1369 let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
1370 do_trans(bcx, exn, ebp);
1371 })
1372 } else if ccx.tcx().sess.target.target.arch == "x86_64" {
1373 // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
1374 // are passed in as arguments to the filter function, so we just pass
1375 // those along.
1376 let filter_fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
1377 unsafety: hir::Unsafety::Unsafe,
1378 abi: Abi::Rust,
1379 sig: ty::Binder(ty::FnSig {
1380 inputs: vec![i8p, i8p],
1381 output: output,
1382 variadic: false,
1383 }),
1384 });
1385 let filter_fn_ty = tcx.mk_fn(None, filter_fn_ty);
1386 gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
1387 let exn = llvm::get_param(bcx.fcx.llfn, 0);
1388 let rbp = llvm::get_param(bcx.fcx.llfn, 1);
1389 do_trans(bcx, exn, rbp);
1390 })
1391 } else {
1392 panic!("unknown target to generate a filter function")
1393 }
1394 }
1395
1396 fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
1397 span_err!(a, b, E0511, "{}", c);
1398 }
1399
1400 fn generic_simd_intrinsic<'blk, 'tcx, 'a>
1401 (bcx: Block<'blk, 'tcx>,
1402 name: &str,
1403 substs: subst::Substs<'tcx>,
1404 callee_ty: Ty<'tcx>,
1405 args: Option<&[P<hir::Expr>]>,
1406 llargs: &[ValueRef],
1407 ret_ty: Ty<'tcx>,
1408 llret_ty: Type,
1409 call_debug_location: DebugLoc,
1410 call_info: NodeIdAndSpan) -> ValueRef
1411 {
1412 // macros for error handling:
1413 macro_rules! emit_error {
1414 ($msg: tt) => {
1415 emit_error!($msg, )
1416 };
1417 ($msg: tt, $($fmt: tt)*) => {
1418 span_invalid_monomorphization_error(
1419 bcx.sess(), call_info.span,
1420 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
1421 $msg),
1422 name, $($fmt)*));
1423 }
1424 }
1425 macro_rules! require {
1426 ($cond: expr, $($fmt: tt)*) => {
1427 if !$cond {
1428 emit_error!($($fmt)*);
1429 return C_null(llret_ty)
1430 }
1431 }
1432 }
1433 macro_rules! require_simd {
1434 ($ty: expr, $position: expr) => {
1435 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1436 }
1437 }
1438
1439
1440
1441 let tcx = bcx.tcx();
1442 let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
1443 let sig = infer::normalize_associated_type(tcx, &sig);
1444 let arg_tys = sig.inputs;
1445
1446 // every intrinsic takes a SIMD vector as its first argument
1447 require_simd!(arg_tys[0], "input");
1448 let in_ty = arg_tys[0];
1449 let in_elem = arg_tys[0].simd_type(tcx);
1450 let in_len = arg_tys[0].simd_size(tcx);
1451
1452 let comparison = match name {
1453 "simd_eq" => Some(hir::BiEq),
1454 "simd_ne" => Some(hir::BiNe),
1455 "simd_lt" => Some(hir::BiLt),
1456 "simd_le" => Some(hir::BiLe),
1457 "simd_gt" => Some(hir::BiGt),
1458 "simd_ge" => Some(hir::BiGe),
1459 _ => None
1460 };
1461
1462 if let Some(cmp_op) = comparison {
1463 require_simd!(ret_ty, "return");
1464
1465 let out_len = ret_ty.simd_size(tcx);
1466 require!(in_len == out_len,
1467 "expected return type with length {} (same as input type `{}`), \
1468 found `{}` with length {}",
1469 in_len, in_ty,
1470 ret_ty, out_len);
1471 require!(llret_ty.element_type().kind() == llvm::Integer,
1472 "expected return type with integer elements, found `{}` with non-integer `{}`",
1473 ret_ty,
1474 ret_ty.simd_type(tcx));
1475
1476 return compare_simd_types(bcx,
1477 llargs[0],
1478 llargs[1],
1479 in_elem,
1480 llret_ty,
1481 cmp_op,
1482 call_debug_location)
1483 }
1484
1485 if name.starts_with("simd_shuffle") {
1486 let n: usize = match name["simd_shuffle".len()..].parse() {
1487 Ok(n) => n,
1488 Err(_) => tcx.sess.span_bug(call_info.span,
1489 "bad `simd_shuffle` instruction only caught in trans?")
1490 };
1491
1492 require_simd!(ret_ty, "return");
1493
1494 let out_len = ret_ty.simd_size(tcx);
1495 require!(out_len == n,
1496 "expected return type of length {}, found `{}` with length {}",
1497 n, ret_ty, out_len);
1498 require!(in_elem == ret_ty.simd_type(tcx),
1499 "expected return element type `{}` (element of input `{}`), \
1500 found `{}` with element type `{}`",
1501 in_elem, in_ty,
1502 ret_ty, ret_ty.simd_type(tcx));
1503
1504 let total_len = in_len as u64 * 2;
1505
1506 let vector = match args {
1507 Some(args) => &args[2],
1508 None => bcx.sess().span_bug(call_info.span,
1509 "intrinsic call with unexpected argument shape"),
1510 };
1511 let vector = match consts::const_expr(
1512 bcx.ccx(),
1513 vector,
1514 tcx.mk_substs(substs),
1515 None,
1516 consts::TrueConst::Yes, // this should probably help simd error reporting
1517 ) {
1518 Ok((vector, _)) => vector,
1519 Err(err) => bcx.sess().span_fatal(call_info.span, &err.description()),
1520 };
1521
1522 let indices: Option<Vec<_>> = (0..n)
1523 .map(|i| {
1524 let arg_idx = i;
1525 let val = const_get_elt(bcx.ccx(), vector, &[i as libc::c_uint]);
1526 let c = const_to_opt_uint(val);
1527 match c {
1528 None => {
1529 emit_error!("shuffle index #{} is not a constant", arg_idx);
1530 None
1531 }
1532 Some(idx) if idx >= total_len => {
1533 emit_error!("shuffle index #{} is out of bounds (limit {})",
1534 arg_idx, total_len);
1535 None
1536 }
1537 Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
1538 }
1539 })
1540 .collect();
1541 let indices = match indices {
1542 Some(i) => i,
1543 None => return C_null(llret_ty)
1544 };
1545
1546 return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
1547 }
1548
1549 if name == "simd_insert" {
1550 require!(in_elem == arg_tys[2],
1551 "expected inserted type `{}` (element of input `{}`), found `{}`",
1552 in_elem, in_ty, arg_tys[2]);
1553 return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
1554 }
1555 if name == "simd_extract" {
1556 require!(ret_ty == in_elem,
1557 "expected return type `{}` (element of input `{}`), found `{}`",
1558 in_elem, in_ty, ret_ty);
1559 return ExtractElement(bcx, llargs[0], llargs[1])
1560 }
1561
1562 if name == "simd_cast" {
1563 require_simd!(ret_ty, "return");
1564 let out_len = ret_ty.simd_size(tcx);
1565 require!(in_len == out_len,
1566 "expected return type with length {} (same as input type `{}`), \
1567 found `{}` with length {}",
1568 in_len, in_ty,
1569 ret_ty, out_len);
1570 // casting cares about nominal type, not just structural type
1571 let out_elem = ret_ty.simd_type(tcx);
1572
1573 if in_elem == out_elem { return llargs[0]; }
1574
1575 enum Style { Float, Int(/* is signed? */ bool), Unsupported }
1576
1577 let (in_style, in_width) = match in_elem.sty {
1578 // vectors of pointer-sized integers should've been
1579 // disallowed before here, so this unwrap is safe.
1580 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1581 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1582 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1583 _ => (Style::Unsupported, 0)
1584 };
1585 let (out_style, out_width) = match out_elem.sty {
1586 ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
1587 ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
1588 ty::TyFloat(f) => (Style::Float, f.bit_width()),
1589 _ => (Style::Unsupported, 0)
1590 };
1591
1592 match (in_style, out_style) {
1593 (Style::Int(in_is_signed), Style::Int(_)) => {
1594 return match in_width.cmp(&out_width) {
1595 Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
1596 Ordering::Equal => llargs[0],
1597 Ordering::Less => if in_is_signed {
1598 SExt(bcx, llargs[0], llret_ty)
1599 } else {
1600 ZExt(bcx, llargs[0], llret_ty)
1601 }
1602 }
1603 }
1604 (Style::Int(in_is_signed), Style::Float) => {
1605 return if in_is_signed {
1606 SIToFP(bcx, llargs[0], llret_ty)
1607 } else {
1608 UIToFP(bcx, llargs[0], llret_ty)
1609 }
1610 }
1611 (Style::Float, Style::Int(out_is_signed)) => {
1612 return if out_is_signed {
1613 FPToSI(bcx, llargs[0], llret_ty)
1614 } else {
1615 FPToUI(bcx, llargs[0], llret_ty)
1616 }
1617 }
1618 (Style::Float, Style::Float) => {
1619 return match in_width.cmp(&out_width) {
1620 Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
1621 Ordering::Equal => llargs[0],
1622 Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
1623 }
1624 }
1625 _ => {/* Unsupported. Fallthrough. */}
1626 }
1627 require!(false,
1628 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1629 in_ty, in_elem,
1630 ret_ty, out_elem);
1631 }
1632 macro_rules! arith {
1633 ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
1634 $(
1635 if name == stringify!($name) {
1636 match in_elem.sty {
1637 $(
1638 $(ty::$p(_))|* => {
1639 return $call(bcx, llargs[0], llargs[1], call_debug_location)
1640 }
1641 )*
1642 _ => {},
1643 }
1644 require!(false,
1645 "unsupported operation on `{}` with element `{}`",
1646 in_ty,
1647 in_elem)
1648 })*
1649 }
1650 }
1651 arith! {
1652 simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
1653 simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
1654 simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
1655 simd_div: TyFloat => FDiv;
1656 simd_shl: TyUint, TyInt => Shl;
1657 simd_shr: TyUint => LShr, TyInt => AShr;
1658 simd_and: TyUint, TyInt => And;
1659 simd_or: TyUint, TyInt => Or;
1660 simd_xor: TyUint, TyInt => Xor;
1661 }
1662 bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic");
1663 }
1664
1665 // Returns the width of an int TypeVariant, and if it's signed or not
1666 // Returns None if the type is not an integer
1667 fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
1668 -> Option<(u64, bool)> {
1669 use rustc::middle::ty::{TyInt, TyUint};
1670 match *sty {
1671 TyInt(t) => Some((match t {
1672 ast::IntTy::Is => {
1673 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1674 "32" => 32,
1675 "64" => 64,
1676 tws => panic!("Unsupported target word size for isize: {}", tws),
1677 }
1678 },
1679 ast::IntTy::I8 => 8,
1680 ast::IntTy::I16 => 16,
1681 ast::IntTy::I32 => 32,
1682 ast::IntTy::I64 => 64,
1683 }, true)),
1684 TyUint(t) => Some((match t {
1685 ast::UintTy::Us => {
1686 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
1687 "32" => 32,
1688 "64" => 64,
1689 tws => panic!("Unsupported target word size for usize: {}", tws),
1690 }
1691 },
1692 ast::UintTy::U8 => 8,
1693 ast::UintTy::U16 => 16,
1694 ast::UintTy::U32 => 32,
1695 ast::UintTy::U64 => 64,
1696 }, false)),
1697 _ => None,
1698 }
1699 }