]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/intrinsic.rs
Imported Upstream version 1.1.0+dfsg1
[rustc.git] / src / librustc_trans / trans / intrinsic.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #![allow(non_upper_case_globals)]
12
13 use llvm;
14 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
15 use middle::subst;
16 use middle::subst::FnSpace;
17 use trans::adt;
18 use trans::base::*;
19 use trans::build::*;
20 use trans::callee;
21 use trans::cleanup;
22 use trans::cleanup::CleanupMethods;
23 use trans::common::*;
24 use trans::datum::*;
25 use trans::debuginfo::DebugLoc;
26 use trans::expr;
27 use trans::glue;
28 use trans::type_of::*;
29 use trans::type_of;
30 use trans::machine;
31 use trans::machine::llsize_of;
32 use trans::type_::Type;
33 use middle::ty::{self, Ty};
34 use syntax::abi::RustIntrinsic;
35 use syntax::ast;
36 use syntax::parse::token;
37 use util::ppaux::{Repr, ty_to_string};
38
39 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
40 let name = match &token::get_ident(item.ident)[..] {
41 "sqrtf32" => "llvm.sqrt.f32",
42 "sqrtf64" => "llvm.sqrt.f64",
43 "powif32" => "llvm.powi.f32",
44 "powif64" => "llvm.powi.f64",
45 "sinf32" => "llvm.sin.f32",
46 "sinf64" => "llvm.sin.f64",
47 "cosf32" => "llvm.cos.f32",
48 "cosf64" => "llvm.cos.f64",
49 "powf32" => "llvm.pow.f32",
50 "powf64" => "llvm.pow.f64",
51 "expf32" => "llvm.exp.f32",
52 "expf64" => "llvm.exp.f64",
53 "exp2f32" => "llvm.exp2.f32",
54 "exp2f64" => "llvm.exp2.f64",
55 "logf32" => "llvm.log.f32",
56 "logf64" => "llvm.log.f64",
57 "log10f32" => "llvm.log10.f32",
58 "log10f64" => "llvm.log10.f64",
59 "log2f32" => "llvm.log2.f32",
60 "log2f64" => "llvm.log2.f64",
61 "fmaf32" => "llvm.fma.f32",
62 "fmaf64" => "llvm.fma.f64",
63 "fabsf32" => "llvm.fabs.f32",
64 "fabsf64" => "llvm.fabs.f64",
65 "copysignf32" => "llvm.copysign.f32",
66 "copysignf64" => "llvm.copysign.f64",
67 "floorf32" => "llvm.floor.f32",
68 "floorf64" => "llvm.floor.f64",
69 "ceilf32" => "llvm.ceil.f32",
70 "ceilf64" => "llvm.ceil.f64",
71 "truncf32" => "llvm.trunc.f32",
72 "truncf64" => "llvm.trunc.f64",
73 "rintf32" => "llvm.rint.f32",
74 "rintf64" => "llvm.rint.f64",
75 "nearbyintf32" => "llvm.nearbyint.f32",
76 "nearbyintf64" => "llvm.nearbyint.f64",
77 "roundf32" => "llvm.round.f32",
78 "roundf64" => "llvm.round.f64",
79 "ctpop8" => "llvm.ctpop.i8",
80 "ctpop16" => "llvm.ctpop.i16",
81 "ctpop32" => "llvm.ctpop.i32",
82 "ctpop64" => "llvm.ctpop.i64",
83 "bswap16" => "llvm.bswap.i16",
84 "bswap32" => "llvm.bswap.i32",
85 "bswap64" => "llvm.bswap.i64",
86 "assume" => "llvm.assume",
87 _ => return None
88 };
89 Some(ccx.get_intrinsic(&name))
90 }
91
92 /// Performs late verification that intrinsics are used correctly. At present,
93 /// the only intrinsic that needs such verification is `transmute`.
94 pub fn check_intrinsics(ccx: &CrateContext) {
95 let mut last_failing_id = None;
96 for transmute_restriction in &*ccx.tcx().transmute_restrictions.borrow() {
97 // Sometimes, a single call to transmute will push multiple
98 // type pairs to test in order to exhaustively test the
99 // possibility around a type parameter. If one of those fails,
100 // there is no sense reporting errors on the others.
101 if last_failing_id == Some(transmute_restriction.id) {
102 continue;
103 }
104
105 debug!("transmute_restriction: {}", transmute_restriction.repr(ccx.tcx()));
106
107 assert!(!ty::type_has_params(transmute_restriction.substituted_from));
108 assert!(!ty::type_has_params(transmute_restriction.substituted_to));
109
110 let llfromtype = type_of::sizing_type_of(ccx,
111 transmute_restriction.substituted_from);
112 let lltotype = type_of::sizing_type_of(ccx,
113 transmute_restriction.substituted_to);
114 let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
115 let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
116 if from_type_size != to_type_size {
117 last_failing_id = Some(transmute_restriction.id);
118
119 if transmute_restriction.original_from != transmute_restriction.substituted_from {
120 ccx.sess().span_err(
121 transmute_restriction.span,
122 &format!("transmute called on types with potentially different sizes: \
123 {} (could be {} bit{}) to {} (could be {} bit{})",
124 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
125 from_type_size as usize,
126 if from_type_size == 1 {""} else {"s"},
127 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
128 to_type_size as usize,
129 if to_type_size == 1 {""} else {"s"}));
130 } else {
131 ccx.sess().span_err(
132 transmute_restriction.span,
133 &format!("transmute called on types with different sizes: \
134 {} ({} bit{}) to {} ({} bit{})",
135 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
136 from_type_size as usize,
137 if from_type_size == 1 {""} else {"s"},
138 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
139 to_type_size as usize,
140 if to_type_size == 1 {""} else {"s"}));
141 }
142 }
143 }
144 ccx.sess().abort_if_errors();
145 }
146
147 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
148 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
149 /// add them to librustc_trans/trans/context.rs
150 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
151 node: ast::NodeId,
152 callee_ty: Ty<'tcx>,
153 cleanup_scope: cleanup::CustomScopeIndex,
154 args: callee::CallArgs<'a, 'tcx>,
155 dest: expr::Dest,
156 substs: subst::Substs<'tcx>,
157 call_info: NodeIdAndSpan)
158 -> Result<'blk, 'tcx> {
159 let fcx = bcx.fcx;
160 let ccx = fcx.ccx;
161 let tcx = bcx.tcx();
162
163 let _icx = push_ctxt("trans_intrinsic_call");
164
165 let ret_ty = match callee_ty.sty {
166 ty::ty_bare_fn(_, ref f) => {
167 ty::erase_late_bound_regions(bcx.tcx(), &f.sig.output())
168 }
169 _ => panic!("expected bare_fn in trans_intrinsic_call")
170 };
171 let foreign_item = tcx.map.expect_foreign_item(node);
172 let name = token::get_ident(foreign_item.ident);
173
174 // For `transmute` we can just trans the input expr directly into dest
175 if &name[..] == "transmute" {
176 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
177 match args {
178 callee::ArgExprs(arg_exprs) => {
179 assert_eq!(arg_exprs.len(), 1);
180
181 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
182 *substs.types.get(FnSpace, 1));
183 let llintype = type_of::type_of(ccx, in_type);
184 let llouttype = type_of::type_of(ccx, out_type);
185
186 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
187 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
188
189 // This should be caught by the intrinsicck pass
190 assert_eq!(in_type_size, out_type_size);
191
192 let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
193 use llvm::TypeKind::*;
194 match llkind {
195 Half | Float | Double | X86_FP80 | FP128 |
196 PPC_FP128 | Integer | Vector | X86_MMX => true,
197 _ => false
198 }
199 };
200
201 // An approximation to which types can be directly cast via
202 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
203 // but does, importantly, cover SIMD types.
204 let in_kind = llintype.kind();
205 let ret_kind = llret_ty.kind();
206 let bitcast_compatible =
207 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
208 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
209 };
210
211 let dest = if bitcast_compatible {
212 // if we're here, the type is scalar-like (a primitive, a
213 // SIMD type or a pointer), and so can be handled as a
214 // by-value ValueRef and can also be directly bitcast to the
215 // target type. Doing this special case makes conversions
216 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
217 // efficient (these are done efficiently implicitly in C
218 // with the `__m128i` type and so this means Rust doesn't
219 // lose out there).
220 let expr = &*arg_exprs[0];
221 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
222 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
223 let val = if datum.kind.is_by_ref() {
224 load_ty(bcx, datum.val, datum.ty)
225 } else {
226 datum.val
227 };
228
229 let cast_val = BitCast(bcx, val, llret_ty);
230
231 match dest {
232 expr::SaveIn(d) => {
233 // this often occurs in a sequence like `Store(val,
234 // d); val2 = Load(d)`, so disappears easily.
235 Store(bcx, cast_val, d);
236 }
237 expr::Ignore => {}
238 }
239 dest
240 } else {
241 // The types are too complicated to do with a by-value
242 // bitcast, so pointer cast instead. We need to cast the
243 // dest so the types work out.
244 let dest = match dest {
245 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
246 expr::Ignore => expr::Ignore
247 };
248 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
249 dest
250 };
251
252 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
253 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
254
255 return match dest {
256 expr::SaveIn(d) => Result::new(bcx, d),
257 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
258 };
259
260 }
261
262 _ => {
263 ccx.sess().bug("expected expr as argument for transmute");
264 }
265 }
266 }
267
268 // Push the arguments.
269 let mut llargs = Vec::new();
270 bcx = callee::trans_args(bcx,
271 args,
272 callee_ty,
273 &mut llargs,
274 cleanup::CustomScope(cleanup_scope),
275 false,
276 RustIntrinsic);
277
278 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
279
280 let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
281
282 // These are the only intrinsic functions that diverge.
283 if &name[..] == "abort" {
284 let llfn = ccx.get_intrinsic(&("llvm.trap"));
285 Call(bcx, llfn, &[], None, call_debug_location);
286 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
287 Unreachable(bcx);
288 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
289 } else if &name[..] == "unreachable" {
290 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
291 Unreachable(bcx);
292 return Result::new(bcx, C_nil(ccx));
293 }
294
295 let ret_ty = match ret_ty {
296 ty::FnConverging(ret_ty) => ret_ty,
297 ty::FnDiverging => unreachable!()
298 };
299
300 let llret_ty = type_of::type_of(ccx, ret_ty);
301
302 // Get location to store the result. If the user does
303 // not care about the result, just make a stack slot
304 let llresult = match dest {
305 expr::SaveIn(d) => d,
306 expr::Ignore => {
307 if !type_is_zero_size(ccx, ret_ty) {
308 alloc_ty(bcx, ret_ty, "intrinsic_result")
309 } else {
310 C_undef(llret_ty.ptr_to())
311 }
312 }
313 };
314
315 let simple = get_simple_intrinsic(ccx, &*foreign_item);
316 let llval = match (simple, &name[..]) {
317 (Some(llfn), _) => {
318 Call(bcx, llfn, &llargs, None, call_debug_location)
319 }
320 (_, "breakpoint") => {
321 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
322 Call(bcx, llfn, &[], None, call_debug_location)
323 }
324 (_, "size_of") => {
325 let tp_ty = *substs.types.get(FnSpace, 0);
326 let lltp_ty = type_of::type_of(ccx, tp_ty);
327 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
328 }
329 (_, "size_of_val") => {
330 let tp_ty = *substs.types.get(FnSpace, 0);
331 if !type_is_sized(tcx, tp_ty) {
332 let info = Load(bcx, expr::get_len(bcx, llargs[0]));
333 let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, info);
334 llsize
335 } else {
336 let lltp_ty = type_of::type_of(ccx, tp_ty);
337 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
338 }
339 }
340 (_, "min_align_of") => {
341 let tp_ty = *substs.types.get(FnSpace, 0);
342 C_uint(ccx, type_of::align_of(ccx, tp_ty))
343 }
344 (_, "min_align_of_val") => {
345 let tp_ty = *substs.types.get(FnSpace, 0);
346 if !type_is_sized(tcx, tp_ty) {
347 let info = Load(bcx, expr::get_len(bcx, llargs[0]));
348 let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, info);
349 llalign
350 } else {
351 C_uint(ccx, type_of::align_of(ccx, tp_ty))
352 }
353 }
354 (_, "pref_align_of") => {
355 let tp_ty = *substs.types.get(FnSpace, 0);
356 let lltp_ty = type_of::type_of(ccx, tp_ty);
357 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
358 }
359 (_, "move_val_init") => {
360 // Create a datum reflecting the value being moved.
361 // Use `appropriate_mode` so that the datum is by ref
362 // if the value is non-immediate. Note that, with
363 // intrinsics, there are no argument cleanups to
364 // concern ourselves with, so we can use an rvalue datum.
365 let tp_ty = *substs.types.get(FnSpace, 0);
366 let mode = appropriate_rvalue_mode(ccx, tp_ty);
367 let src = Datum {
368 val: llargs[1],
369 ty: tp_ty,
370 kind: Rvalue::new(mode)
371 };
372 bcx = src.store_to(bcx, llargs[0]);
373 C_nil(ccx)
374 }
375 (_, "drop_in_place") => {
376 let tp_ty = *substs.types.get(FnSpace, 0);
377 glue::drop_ty(bcx, llargs[0], tp_ty, call_debug_location);
378 C_nil(ccx)
379 }
380 (_, "type_name") => {
381 let tp_ty = *substs.types.get(FnSpace, 0);
382 let ty_name = token::intern_and_get_ident(&ty_to_string(ccx.tcx(), tp_ty));
383 C_str_slice(ccx, ty_name)
384 }
385 (_, "type_id") => {
386 let hash = ty::hash_crate_independent(
387 ccx.tcx(),
388 *substs.types.get(FnSpace, 0),
389 &ccx.link_meta().crate_hash);
390 C_u64(ccx, hash)
391 }
392 (_, "init_dropped") => {
393 let tp_ty = *substs.types.get(FnSpace, 0);
394 if !return_type_is_void(ccx, tp_ty) {
395 drop_done_fill_mem(bcx, llresult, tp_ty);
396 }
397 C_nil(ccx)
398 }
399 (_, "init") => {
400 let tp_ty = *substs.types.get(FnSpace, 0);
401 if !return_type_is_void(ccx, tp_ty) {
402 // Just zero out the stack slot. (See comment on base::memzero for explanation)
403 init_zero_mem(bcx, llresult, tp_ty);
404 }
405 C_nil(ccx)
406 }
407 // Effectively no-ops
408 (_, "uninit") | (_, "forget") => {
409 C_nil(ccx)
410 }
411 (_, "needs_drop") => {
412 let tp_ty = *substs.types.get(FnSpace, 0);
413
414 C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
415 }
416 (_, "offset") => {
417 let ptr = llargs[0];
418 let offset = llargs[1];
419 InBoundsGEP(bcx, ptr, &[offset])
420 }
421
422 (_, "copy_nonoverlapping") => {
423 copy_intrinsic(bcx,
424 false,
425 false,
426 *substs.types.get(FnSpace, 0),
427 llargs[1],
428 llargs[0],
429 llargs[2],
430 call_debug_location)
431 }
432 (_, "copy") => {
433 copy_intrinsic(bcx,
434 true,
435 false,
436 *substs.types.get(FnSpace, 0),
437 llargs[1],
438 llargs[0],
439 llargs[2],
440 call_debug_location)
441 }
442 (_, "write_bytes") => {
443 memset_intrinsic(bcx,
444 false,
445 *substs.types.get(FnSpace, 0),
446 llargs[0],
447 llargs[1],
448 llargs[2],
449 call_debug_location)
450 }
451
452 (_, "volatile_copy_nonoverlapping_memory") => {
453 copy_intrinsic(bcx,
454 false,
455 true,
456 *substs.types.get(FnSpace, 0),
457 llargs[0],
458 llargs[1],
459 llargs[2],
460 call_debug_location)
461 }
462 (_, "volatile_copy_memory") => {
463 copy_intrinsic(bcx,
464 true,
465 true,
466 *substs.types.get(FnSpace, 0),
467 llargs[0],
468 llargs[1],
469 llargs[2],
470 call_debug_location)
471 }
472 (_, "volatile_set_memory") => {
473 memset_intrinsic(bcx,
474 true,
475 *substs.types.get(FnSpace, 0),
476 llargs[0],
477 llargs[1],
478 llargs[2],
479 call_debug_location)
480 }
481 (_, "volatile_load") => {
482 let tp_ty = *substs.types.get(FnSpace, 0);
483 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
484 let load = VolatileLoad(bcx, ptr);
485 unsafe {
486 llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
487 }
488 from_arg_ty(bcx, load, tp_ty)
489 },
490 (_, "volatile_store") => {
491 let tp_ty = *substs.types.get(FnSpace, 0);
492 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
493 let val = to_arg_ty(bcx, llargs[1], tp_ty);
494 let store = VolatileStore(bcx, val, ptr);
495 unsafe {
496 llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
497 }
498 C_nil(ccx)
499 },
500
501 (_, "ctlz8") => count_zeros_intrinsic(bcx,
502 "llvm.ctlz.i8",
503 llargs[0],
504 call_debug_location),
505 (_, "ctlz16") => count_zeros_intrinsic(bcx,
506 "llvm.ctlz.i16",
507 llargs[0],
508 call_debug_location),
509 (_, "ctlz32") => count_zeros_intrinsic(bcx,
510 "llvm.ctlz.i32",
511 llargs[0],
512 call_debug_location),
513 (_, "ctlz64") => count_zeros_intrinsic(bcx,
514 "llvm.ctlz.i64",
515 llargs[0],
516 call_debug_location),
517 (_, "cttz8") => count_zeros_intrinsic(bcx,
518 "llvm.cttz.i8",
519 llargs[0],
520 call_debug_location),
521 (_, "cttz16") => count_zeros_intrinsic(bcx,
522 "llvm.cttz.i16",
523 llargs[0],
524 call_debug_location),
525 (_, "cttz32") => count_zeros_intrinsic(bcx,
526 "llvm.cttz.i32",
527 llargs[0],
528 call_debug_location),
529 (_, "cttz64") => count_zeros_intrinsic(bcx,
530 "llvm.cttz.i64",
531 llargs[0],
532 call_debug_location),
533
534 (_, "i8_add_with_overflow") =>
535 with_overflow_intrinsic(bcx,
536 "llvm.sadd.with.overflow.i8",
537 ret_ty,
538 llargs[0],
539 llargs[1],
540 call_debug_location),
541 (_, "i16_add_with_overflow") =>
542 with_overflow_intrinsic(bcx,
543 "llvm.sadd.with.overflow.i16",
544 ret_ty,
545 llargs[0],
546 llargs[1],
547 call_debug_location),
548 (_, "i32_add_with_overflow") =>
549 with_overflow_intrinsic(bcx,
550 "llvm.sadd.with.overflow.i32",
551 ret_ty,
552 llargs[0],
553 llargs[1],
554 call_debug_location),
555 (_, "i64_add_with_overflow") =>
556 with_overflow_intrinsic(bcx,
557 "llvm.sadd.with.overflow.i64",
558 ret_ty,
559 llargs[0],
560 llargs[1],
561 call_debug_location),
562
563 (_, "u8_add_with_overflow") =>
564 with_overflow_intrinsic(bcx,
565 "llvm.uadd.with.overflow.i8",
566 ret_ty,
567 llargs[0],
568 llargs[1],
569 call_debug_location),
570 (_, "u16_add_with_overflow") =>
571 with_overflow_intrinsic(bcx,
572 "llvm.uadd.with.overflow.i16",
573 ret_ty,
574 llargs[0],
575 llargs[1],
576 call_debug_location),
577 (_, "u32_add_with_overflow") =>
578 with_overflow_intrinsic(bcx,
579 "llvm.uadd.with.overflow.i32",
580 ret_ty,
581 llargs[0],
582 llargs[1],
583 call_debug_location),
584 (_, "u64_add_with_overflow") =>
585 with_overflow_intrinsic(bcx,
586 "llvm.uadd.with.overflow.i64",
587 ret_ty,
588 llargs[0],
589 llargs[1],
590 call_debug_location),
591 (_, "i8_sub_with_overflow") =>
592 with_overflow_intrinsic(bcx,
593 "llvm.ssub.with.overflow.i8",
594 ret_ty,
595 llargs[0],
596 llargs[1],
597 call_debug_location),
598 (_, "i16_sub_with_overflow") =>
599 with_overflow_intrinsic(bcx,
600 "llvm.ssub.with.overflow.i16",
601 ret_ty,
602 llargs[0],
603 llargs[1],
604 call_debug_location),
605 (_, "i32_sub_with_overflow") =>
606 with_overflow_intrinsic(bcx,
607 "llvm.ssub.with.overflow.i32",
608 ret_ty,
609 llargs[0],
610 llargs[1],
611 call_debug_location),
612 (_, "i64_sub_with_overflow") =>
613 with_overflow_intrinsic(bcx,
614 "llvm.ssub.with.overflow.i64",
615 ret_ty,
616 llargs[0],
617 llargs[1],
618 call_debug_location),
619 (_, "u8_sub_with_overflow") =>
620 with_overflow_intrinsic(bcx,
621 "llvm.usub.with.overflow.i8",
622 ret_ty,
623 llargs[0],
624 llargs[1],
625 call_debug_location),
626 (_, "u16_sub_with_overflow") =>
627 with_overflow_intrinsic(bcx,
628 "llvm.usub.with.overflow.i16",
629 ret_ty,
630 llargs[0],
631 llargs[1],
632 call_debug_location),
633 (_, "u32_sub_with_overflow") =>
634 with_overflow_intrinsic(bcx,
635 "llvm.usub.with.overflow.i32",
636 ret_ty,
637 llargs[0],
638 llargs[1],
639 call_debug_location),
640 (_, "u64_sub_with_overflow") =>
641 with_overflow_intrinsic(bcx,
642 "llvm.usub.with.overflow.i64",
643 ret_ty,
644 llargs[0],
645 llargs[1],
646 call_debug_location),
647 (_, "i8_mul_with_overflow") =>
648 with_overflow_intrinsic(bcx,
649 "llvm.smul.with.overflow.i8",
650 ret_ty,
651 llargs[0],
652 llargs[1],
653 call_debug_location),
654 (_, "i16_mul_with_overflow") =>
655 with_overflow_intrinsic(bcx,
656 "llvm.smul.with.overflow.i16",
657 ret_ty,
658 llargs[0],
659 llargs[1],
660 call_debug_location),
661 (_, "i32_mul_with_overflow") =>
662 with_overflow_intrinsic(bcx,
663 "llvm.smul.with.overflow.i32",
664 ret_ty,
665 llargs[0],
666 llargs[1],
667 call_debug_location),
668 (_, "i64_mul_with_overflow") =>
669 with_overflow_intrinsic(bcx,
670 "llvm.smul.with.overflow.i64",
671 ret_ty,
672 llargs[0],
673 llargs[1],
674 call_debug_location),
675 (_, "u8_mul_with_overflow") =>
676 with_overflow_intrinsic(bcx,
677 "llvm.umul.with.overflow.i8",
678 ret_ty,
679 llargs[0],
680 llargs[1],
681 call_debug_location),
682 (_, "u16_mul_with_overflow") =>
683 with_overflow_intrinsic(bcx,
684 "llvm.umul.with.overflow.i16",
685 ret_ty,
686 llargs[0],
687 llargs[1],
688 call_debug_location),
689 (_, "u32_mul_with_overflow") =>
690 with_overflow_intrinsic(bcx,
691 "llvm.umul.with.overflow.i32",
692 ret_ty,
693 llargs[0],
694 llargs[1],
695 call_debug_location),
696 (_, "u64_mul_with_overflow") =>
697 with_overflow_intrinsic(bcx,
698 "llvm.umul.with.overflow.i64",
699 ret_ty,
700 llargs[0],
701 llargs[1],
702 call_debug_location),
703
704 (_, "unchecked_udiv") => UDiv(bcx, llargs[0], llargs[1], call_debug_location),
705 (_, "unchecked_sdiv") => SDiv(bcx, llargs[0], llargs[1], call_debug_location),
706 (_, "unchecked_urem") => URem(bcx, llargs[0], llargs[1], call_debug_location),
707 (_, "unchecked_srem") => SRem(bcx, llargs[0], llargs[1], call_debug_location),
708
709 (_, "overflowing_add") => Add(bcx, llargs[0], llargs[1], call_debug_location),
710 (_, "overflowing_sub") => Sub(bcx, llargs[0], llargs[1], call_debug_location),
711 (_, "overflowing_mul") => Mul(bcx, llargs[0], llargs[1], call_debug_location),
712
713 (_, "return_address") => {
714 if !fcx.caller_expects_out_pointer {
715 tcx.sess.span_err(call_info.span,
716 "invalid use of `return_address` intrinsic: function \
717 does not use out pointer");
718 C_null(Type::i8p(ccx))
719 } else {
720 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
721 }
722 }
723
724 (_, "discriminant_value") => {
725 let val_ty = substs.types.get(FnSpace, 0);
726 match val_ty.sty {
727 ty::ty_enum(..) => {
728 let repr = adt::represent_type(ccx, *val_ty);
729 adt::trans_get_discr(bcx, &*repr, llargs[0], Some(llret_ty))
730 }
731 _ => C_null(llret_ty)
732 }
733 }
734
735 // This requires that atomic intrinsics follow a specific naming pattern:
736 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
737 (_, name) if name.starts_with("atomic_") => {
738 let split: Vec<&str> = name.split('_').collect();
739 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
740
741 let order = if split.len() == 2 {
742 llvm::SequentiallyConsistent
743 } else {
744 match split[2] {
745 "unordered" => llvm::Unordered,
746 "relaxed" => llvm::Monotonic,
747 "acq" => llvm::Acquire,
748 "rel" => llvm::Release,
749 "acqrel" => llvm::AcquireRelease,
750 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
751 }
752 };
753
754 match split[1] {
755 "cxchg" => {
756 // See include/llvm/IR/Instructions.h for their implementation
757 // of this, I assume that it's good enough for us to use for
758 // now.
759 let strongest_failure_ordering = match order {
760 llvm::NotAtomic | llvm::Unordered =>
761 ccx.sess().fatal("cmpxchg must be atomic"),
762
763 llvm::Monotonic | llvm::Release =>
764 llvm::Monotonic,
765
766 llvm::Acquire | llvm::AcquireRelease =>
767 llvm::Acquire,
768
769 llvm::SequentiallyConsistent =>
770 llvm::SequentiallyConsistent
771 };
772
773 let tp_ty = *substs.types.get(FnSpace, 0);
774 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
775 let cmp = to_arg_ty(bcx, llargs[1], tp_ty);
776 let src = to_arg_ty(bcx, llargs[2], tp_ty);
777 let res = AtomicCmpXchg(bcx, ptr, cmp, src, order,
778 strongest_failure_ordering);
779 ExtractValue(bcx, res, 0)
780 }
781
782 "load" => {
783 let tp_ty = *substs.types.get(FnSpace, 0);
784 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
785 from_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
786 }
787 "store" => {
788 let tp_ty = *substs.types.get(FnSpace, 0);
789 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
790 let val = to_arg_ty(bcx, llargs[1], tp_ty);
791 AtomicStore(bcx, val, ptr, order);
792 C_nil(ccx)
793 }
794
795 "fence" => {
796 AtomicFence(bcx, order, llvm::CrossThread);
797 C_nil(ccx)
798 }
799
800 "singlethreadfence" => {
801 AtomicFence(bcx, order, llvm::SingleThread);
802 C_nil(ccx)
803 }
804
805 // These are all AtomicRMW ops
806 op => {
807 let atom_op = match op {
808 "xchg" => llvm::AtomicXchg,
809 "xadd" => llvm::AtomicAdd,
810 "xsub" => llvm::AtomicSub,
811 "and" => llvm::AtomicAnd,
812 "nand" => llvm::AtomicNand,
813 "or" => llvm::AtomicOr,
814 "xor" => llvm::AtomicXor,
815 "max" => llvm::AtomicMax,
816 "min" => llvm::AtomicMin,
817 "umax" => llvm::AtomicUMax,
818 "umin" => llvm::AtomicUMin,
819 _ => ccx.sess().fatal("unknown atomic operation")
820 };
821
822 let tp_ty = *substs.types.get(FnSpace, 0);
823 let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
824 let val = to_arg_ty(bcx, llargs[1], tp_ty);
825 AtomicRMW(bcx, atom_op, ptr, val, order)
826 }
827 }
828
829 }
830
831 (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
832 };
833
834 if val_ty(llval) != Type::void(ccx) &&
835 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
836 store_ty(bcx, llval, llresult, ret_ty);
837 }
838
839 // If we made a temporary stack slot, let's clean it up
840 match dest {
841 expr::Ignore => {
842 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
843 }
844 expr::SaveIn(_) => {}
845 }
846
847 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
848
849 Result::new(bcx, llresult)
850 }
851
852 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
853 allow_overlap: bool,
854 volatile: bool,
855 tp_ty: Ty<'tcx>,
856 dst: ValueRef,
857 src: ValueRef,
858 count: ValueRef,
859 call_debug_location: DebugLoc)
860 -> ValueRef {
861 let ccx = bcx.ccx();
862 let lltp_ty = type_of::type_of(ccx, tp_ty);
863 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
864 let size = machine::llsize_of(ccx, lltp_ty);
865 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
866 let name = if allow_overlap {
867 if int_size == 32 {
868 "llvm.memmove.p0i8.p0i8.i32"
869 } else {
870 "llvm.memmove.p0i8.p0i8.i64"
871 }
872 } else {
873 if int_size == 32 {
874 "llvm.memcpy.p0i8.p0i8.i32"
875 } else {
876 "llvm.memcpy.p0i8.p0i8.i64"
877 }
878 };
879
880 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
881 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
882 let llfn = ccx.get_intrinsic(&name);
883
884 Call(bcx,
885 llfn,
886 &[dst_ptr,
887 src_ptr,
888 Mul(bcx, size, count, DebugLoc::None),
889 align,
890 C_bool(ccx, volatile)],
891 None,
892 call_debug_location)
893 }
894
895 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
896 volatile: bool,
897 tp_ty: Ty<'tcx>,
898 dst: ValueRef,
899 val: ValueRef,
900 count: ValueRef,
901 call_debug_location: DebugLoc)
902 -> ValueRef {
903 let ccx = bcx.ccx();
904 let lltp_ty = type_of::type_of(ccx, tp_ty);
905 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
906 let size = machine::llsize_of(ccx, lltp_ty);
907 let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
908 "llvm.memset.p0i8.i32"
909 } else {
910 "llvm.memset.p0i8.i64"
911 };
912
913 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
914 let llfn = ccx.get_intrinsic(&name);
915
916 Call(bcx,
917 llfn,
918 &[dst_ptr,
919 val,
920 Mul(bcx, size, count, DebugLoc::None),
921 align,
922 C_bool(ccx, volatile)],
923 None,
924 call_debug_location)
925 }
926
927 fn count_zeros_intrinsic(bcx: Block,
928 name: &'static str,
929 val: ValueRef,
930 call_debug_location: DebugLoc)
931 -> ValueRef {
932 let y = C_bool(bcx.ccx(), false);
933 let llfn = bcx.ccx().get_intrinsic(&name);
934 Call(bcx, llfn, &[val, y], None, call_debug_location)
935 }
936
937 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
938 name: &'static str,
939 t: Ty<'tcx>,
940 a: ValueRef,
941 b: ValueRef,
942 call_debug_location: DebugLoc)
943 -> ValueRef {
944 let llfn = bcx.ccx().get_intrinsic(&name);
945
946 // Convert `i1` to a `bool`, and write it to the out parameter
947 let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
948 let result = ExtractValue(bcx, val, 0);
949 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
950 let ret = C_undef(type_of::type_of(bcx.ccx(), t));
951 let ret = InsertValue(bcx, ret, result, 0);
952 let ret = InsertValue(bcx, ret, overflow, 1);
953 if type_is_immediate(bcx.ccx(), t) {
954 let tmp = alloc_ty(bcx, t, "tmp");
955 Store(bcx, ret, tmp);
956 load_ty(bcx, tmp, t)
957 } else {
958 ret
959 }
960 }