]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
New upstream version 1.48.0~beta.8+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::FunctionCx;
4 use crate::common::{span_invalid_monomorphization_error, IntPredicate};
5 use crate::glue;
6 use crate::traits::*;
7 use crate::MemFlags;
8
9 use rustc_middle::ty::{self, Ty, TyCtxt};
10 use rustc_span::{sym, Span};
11 use rustc_target::abi::call::{FnAbi, PassMode};
12
13 fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
14 bx: &mut Bx,
15 allow_overlap: bool,
16 volatile: bool,
17 ty: Ty<'tcx>,
18 dst: Bx::Value,
19 src: Bx::Value,
20 count: Bx::Value,
21 ) {
22 let layout = bx.layout_of(ty);
23 let size = layout.size;
24 let align = layout.align.abi;
25 let size = bx.mul(bx.const_usize(size.bytes()), count);
26 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
27 if allow_overlap {
28 bx.memmove(dst, align, src, align, size, flags);
29 } else {
30 bx.memcpy(dst, align, src, align, size, flags);
31 }
32 }
33
34 fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
35 bx: &mut Bx,
36 volatile: bool,
37 ty: Ty<'tcx>,
38 dst: Bx::Value,
39 val: Bx::Value,
40 count: Bx::Value,
41 ) {
42 let layout = bx.layout_of(ty);
43 let size = layout.size;
44 let align = layout.align.abi;
45 let size = bx.mul(bx.const_usize(size.bytes()), count);
46 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
47 bx.memset(dst, val, size, align, flags);
48 }
49
50 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
51 pub fn codegen_intrinsic_call(
52 bx: &mut Bx,
53 instance: ty::Instance<'tcx>,
54 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
55 args: &[OperandRef<'tcx, Bx::Value>],
56 llresult: Bx::Value,
57 span: Span,
58 ) {
59 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
60
61 let (def_id, substs) = match *callee_ty.kind() {
62 ty::FnDef(def_id, substs) => (def_id, substs),
63 _ => bug!("expected fn item type, found {}", callee_ty),
64 };
65
66 let sig = callee_ty.fn_sig(bx.tcx());
67 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
68 let arg_tys = sig.inputs();
69 let ret_ty = sig.output();
70 let name = bx.tcx().item_name(def_id);
71 let name_str = &*name.as_str();
72
73 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
74 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
75
76 let llval = match name {
77 sym::assume => {
78 bx.assume(args[0].immediate());
79 return;
80 }
81 sym::abort => {
82 bx.abort();
83 return;
84 }
85
86 sym::unreachable => {
87 return;
88 }
89 sym::va_start => bx.va_start(args[0].immediate()),
90 sym::va_end => bx.va_end(args[0].immediate()),
91 sym::size_of_val => {
92 let tp_ty = substs.type_at(0);
93 if let OperandValue::Pair(_, meta) = args[0].val {
94 let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
95 llsize
96 } else {
97 bx.const_usize(bx.layout_of(tp_ty).size.bytes())
98 }
99 }
100 sym::min_align_of_val => {
101 let tp_ty = substs.type_at(0);
102 if let OperandValue::Pair(_, meta) = args[0].val {
103 let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
104 llalign
105 } else {
106 bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
107 }
108 }
109 sym::size_of
110 | sym::pref_align_of
111 | sym::min_align_of
112 | sym::needs_drop
113 | sym::type_id
114 | sym::type_name
115 | sym::variant_count => {
116 let value = bx
117 .tcx()
118 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
119 .unwrap();
120 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
121 }
122 // Effectively no-op
123 sym::forget => {
124 return;
125 }
126 sym::offset => {
127 let ptr = args[0].immediate();
128 let offset = args[1].immediate();
129 bx.inbounds_gep(ptr, &[offset])
130 }
131 sym::arith_offset => {
132 let ptr = args[0].immediate();
133 let offset = args[1].immediate();
134 bx.gep(ptr, &[offset])
135 }
136
137 sym::copy_nonoverlapping => {
138 copy_intrinsic(
139 bx,
140 false,
141 false,
142 substs.type_at(0),
143 args[1].immediate(),
144 args[0].immediate(),
145 args[2].immediate(),
146 );
147 return;
148 }
149 sym::copy => {
150 copy_intrinsic(
151 bx,
152 true,
153 false,
154 substs.type_at(0),
155 args[1].immediate(),
156 args[0].immediate(),
157 args[2].immediate(),
158 );
159 return;
160 }
161 sym::write_bytes => {
162 memset_intrinsic(
163 bx,
164 false,
165 substs.type_at(0),
166 args[0].immediate(),
167 args[1].immediate(),
168 args[2].immediate(),
169 );
170 return;
171 }
172
173 sym::volatile_copy_nonoverlapping_memory => {
174 copy_intrinsic(
175 bx,
176 false,
177 true,
178 substs.type_at(0),
179 args[0].immediate(),
180 args[1].immediate(),
181 args[2].immediate(),
182 );
183 return;
184 }
185 sym::volatile_copy_memory => {
186 copy_intrinsic(
187 bx,
188 true,
189 true,
190 substs.type_at(0),
191 args[0].immediate(),
192 args[1].immediate(),
193 args[2].immediate(),
194 );
195 return;
196 }
197 sym::volatile_set_memory => {
198 memset_intrinsic(
199 bx,
200 true,
201 substs.type_at(0),
202 args[0].immediate(),
203 args[1].immediate(),
204 args[2].immediate(),
205 );
206 return;
207 }
208 sym::volatile_store => {
209 let dst = args[0].deref(bx.cx());
210 args[1].val.volatile_store(bx, dst);
211 return;
212 }
213 sym::unaligned_volatile_store => {
214 let dst = args[0].deref(bx.cx());
215 args[1].val.unaligned_volatile_store(bx, dst);
216 return;
217 }
218 sym::add_with_overflow
219 | sym::sub_with_overflow
220 | sym::mul_with_overflow
221 | sym::wrapping_add
222 | sym::wrapping_sub
223 | sym::wrapping_mul
224 | sym::unchecked_div
225 | sym::unchecked_rem
226 | sym::unchecked_shl
227 | sym::unchecked_shr
228 | sym::unchecked_add
229 | sym::unchecked_sub
230 | sym::unchecked_mul
231 | sym::exact_div => {
232 let ty = arg_tys[0];
233 match int_type_width_signed(ty, bx.tcx()) {
234 Some((_width, signed)) => match name {
235 sym::add_with_overflow
236 | sym::sub_with_overflow
237 | sym::mul_with_overflow => {
238 let op = match name {
239 sym::add_with_overflow => OverflowOp::Add,
240 sym::sub_with_overflow => OverflowOp::Sub,
241 sym::mul_with_overflow => OverflowOp::Mul,
242 _ => bug!(),
243 };
244 let (val, overflow) =
245 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
246 // Convert `i1` to a `bool`, and write it to the out parameter
247 let val = bx.from_immediate(val);
248 let overflow = bx.from_immediate(overflow);
249
250 let dest = result.project_field(bx, 0);
251 bx.store(val, dest.llval, dest.align);
252 let dest = result.project_field(bx, 1);
253 bx.store(overflow, dest.llval, dest.align);
254
255 return;
256 }
257 sym::wrapping_add => bx.add(args[0].immediate(), args[1].immediate()),
258 sym::wrapping_sub => bx.sub(args[0].immediate(), args[1].immediate()),
259 sym::wrapping_mul => bx.mul(args[0].immediate(), args[1].immediate()),
260 sym::exact_div => {
261 if signed {
262 bx.exactsdiv(args[0].immediate(), args[1].immediate())
263 } else {
264 bx.exactudiv(args[0].immediate(), args[1].immediate())
265 }
266 }
267 sym::unchecked_div => {
268 if signed {
269 bx.sdiv(args[0].immediate(), args[1].immediate())
270 } else {
271 bx.udiv(args[0].immediate(), args[1].immediate())
272 }
273 }
274 sym::unchecked_rem => {
275 if signed {
276 bx.srem(args[0].immediate(), args[1].immediate())
277 } else {
278 bx.urem(args[0].immediate(), args[1].immediate())
279 }
280 }
281 sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
282 sym::unchecked_shr => {
283 if signed {
284 bx.ashr(args[0].immediate(), args[1].immediate())
285 } else {
286 bx.lshr(args[0].immediate(), args[1].immediate())
287 }
288 }
289 sym::unchecked_add => {
290 if signed {
291 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
292 } else {
293 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
294 }
295 }
296 sym::unchecked_sub => {
297 if signed {
298 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
299 } else {
300 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
301 }
302 }
303 sym::unchecked_mul => {
304 if signed {
305 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
306 } else {
307 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
308 }
309 }
310 _ => bug!(),
311 },
312 None => {
313 span_invalid_monomorphization_error(
314 bx.tcx().sess,
315 span,
316 &format!(
317 "invalid monomorphization of `{}` intrinsic: \
318 expected basic integer type, found `{}`",
319 name, ty
320 ),
321 );
322 return;
323 }
324 }
325 }
326 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
327 match float_type_width(arg_tys[0]) {
328 Some(_width) => match name {
329 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
330 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
331 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
332 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
333 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
334 _ => bug!(),
335 },
336 None => {
337 span_invalid_monomorphization_error(
338 bx.tcx().sess,
339 span,
340 &format!(
341 "invalid monomorphization of `{}` intrinsic: \
342 expected basic float type, found `{}`",
343 name, arg_tys[0]
344 ),
345 );
346 return;
347 }
348 }
349 }
350
351 sym::float_to_int_unchecked => {
352 if float_type_width(arg_tys[0]).is_none() {
353 span_invalid_monomorphization_error(
354 bx.tcx().sess,
355 span,
356 &format!(
357 "invalid monomorphization of `float_to_int_unchecked` \
358 intrinsic: expected basic float type, \
359 found `{}`",
360 arg_tys[0]
361 ),
362 );
363 return;
364 }
365 let (_width, signed) = match int_type_width_signed(ret_ty, bx.tcx()) {
366 Some(pair) => pair,
367 None => {
368 span_invalid_monomorphization_error(
369 bx.tcx().sess,
370 span,
371 &format!(
372 "invalid monomorphization of `float_to_int_unchecked` \
373 intrinsic: expected basic integer type, \
374 found `{}`",
375 ret_ty
376 ),
377 );
378 return;
379 }
380 };
381 if signed {
382 bx.fptosi(args[0].immediate(), llret_ty)
383 } else {
384 bx.fptoui(args[0].immediate(), llret_ty)
385 }
386 }
387
388 sym::discriminant_value => {
389 if ret_ty.is_integral() {
390 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
391 } else {
392 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
393 }
394 }
395
396 // This requires that atomic intrinsics follow a specific naming pattern:
397 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
398 name if name_str.starts_with("atomic_") => {
399 use crate::common::AtomicOrdering::*;
400 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
401
402 let split: Vec<&str> = name_str.split('_').collect();
403
404 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
405 let (order, failorder) = match split.len() {
406 2 => (SequentiallyConsistent, SequentiallyConsistent),
407 3 => match split[2] {
408 "unordered" => (Unordered, Unordered),
409 "relaxed" => (Monotonic, Monotonic),
410 "acq" => (Acquire, Acquire),
411 "rel" => (Release, Monotonic),
412 "acqrel" => (AcquireRelease, Acquire),
413 "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
414 "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
415 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
416 },
417 4 => match (split[2], split[3]) {
418 ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
419 ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
420 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
421 },
422 _ => bx.sess().fatal("Atomic intrinsic not in correct format"),
423 };
424
425 let invalid_monomorphization = |ty| {
426 span_invalid_monomorphization_error(
427 bx.tcx().sess,
428 span,
429 &format!(
430 "invalid monomorphization of `{}` intrinsic: \
431 expected basic integer type, found `{}`",
432 name, ty
433 ),
434 );
435 };
436
437 match split[1] {
438 "cxchg" | "cxchgweak" => {
439 let ty = substs.type_at(0);
440 if int_type_width_signed(ty, bx.tcx()).is_some() {
441 let weak = split[1] == "cxchgweak";
442 let pair = bx.atomic_cmpxchg(
443 args[0].immediate(),
444 args[1].immediate(),
445 args[2].immediate(),
446 order,
447 failorder,
448 weak,
449 );
450 let val = bx.extract_value(pair, 0);
451 let success = bx.extract_value(pair, 1);
452 let val = bx.from_immediate(val);
453 let success = bx.from_immediate(success);
454
455 let dest = result.project_field(bx, 0);
456 bx.store(val, dest.llval, dest.align);
457 let dest = result.project_field(bx, 1);
458 bx.store(success, dest.llval, dest.align);
459 return;
460 } else {
461 return invalid_monomorphization(ty);
462 }
463 }
464
465 "load" => {
466 let ty = substs.type_at(0);
467 if int_type_width_signed(ty, bx.tcx()).is_some() {
468 let size = bx.layout_of(ty).size;
469 bx.atomic_load(args[0].immediate(), order, size)
470 } else {
471 return invalid_monomorphization(ty);
472 }
473 }
474
475 "store" => {
476 let ty = substs.type_at(0);
477 if int_type_width_signed(ty, bx.tcx()).is_some() {
478 let size = bx.layout_of(ty).size;
479 bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size);
480 return;
481 } else {
482 return invalid_monomorphization(ty);
483 }
484 }
485
486 "fence" => {
487 bx.atomic_fence(order, SynchronizationScope::CrossThread);
488 return;
489 }
490
491 "singlethreadfence" => {
492 bx.atomic_fence(order, SynchronizationScope::SingleThread);
493 return;
494 }
495
496 // These are all AtomicRMW ops
497 op => {
498 let atom_op = match op {
499 "xchg" => AtomicRmwBinOp::AtomicXchg,
500 "xadd" => AtomicRmwBinOp::AtomicAdd,
501 "xsub" => AtomicRmwBinOp::AtomicSub,
502 "and" => AtomicRmwBinOp::AtomicAnd,
503 "nand" => AtomicRmwBinOp::AtomicNand,
504 "or" => AtomicRmwBinOp::AtomicOr,
505 "xor" => AtomicRmwBinOp::AtomicXor,
506 "max" => AtomicRmwBinOp::AtomicMax,
507 "min" => AtomicRmwBinOp::AtomicMin,
508 "umax" => AtomicRmwBinOp::AtomicUMax,
509 "umin" => AtomicRmwBinOp::AtomicUMin,
510 _ => bx.sess().fatal("unknown atomic operation"),
511 };
512
513 let ty = substs.type_at(0);
514 if int_type_width_signed(ty, bx.tcx()).is_some() {
515 bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
516 } else {
517 return invalid_monomorphization(ty);
518 }
519 }
520 }
521 }
522
523 sym::nontemporal_store => {
524 let dst = args[0].deref(bx.cx());
525 args[1].val.nontemporal_store(bx, dst);
526 return;
527 }
528
529 sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
530 let a = args[0].immediate();
531 let b = args[1].immediate();
532 if name == sym::ptr_guaranteed_eq {
533 bx.icmp(IntPredicate::IntEQ, a, b)
534 } else {
535 bx.icmp(IntPredicate::IntNE, a, b)
536 }
537 }
538
539 sym::ptr_offset_from => {
540 let ty = substs.type_at(0);
541 let pointee_size = bx.layout_of(ty).size;
542
543 // This is the same sequence that Clang emits for pointer subtraction.
544 // It can be neither `nsw` nor `nuw` because the input is treated as
545 // unsigned but then the output is treated as signed, so neither works.
546 let a = args[0].immediate();
547 let b = args[1].immediate();
548 let a = bx.ptrtoint(a, bx.type_isize());
549 let b = bx.ptrtoint(b, bx.type_isize());
550 let d = bx.sub(a, b);
551 let pointee_size = bx.const_usize(pointee_size.bytes());
552 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
553 bx.exactsdiv(d, pointee_size)
554 }
555
556 _ => {
557 // Need to use backend-specific things in the implementation.
558 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
559 return;
560 }
561 };
562
563 if !fn_abi.ret.is_ignore() {
564 if let PassMode::Cast(ty) = fn_abi.ret.mode {
565 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
566 let ptr = bx.pointercast(result.llval, ptr_llty);
567 bx.store(llval, ptr, result.align);
568 } else {
569 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
570 .val
571 .store(bx, result);
572 }
573 }
574 }
575 }
576
577 // Returns the width of an int Ty, and if it's signed or not
578 // Returns None if the type is not an integer
579 // FIXME: there’s multiple of this functions, investigate using some of the already existing
580 // stuffs.
581 fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
582 match ty.kind() {
583 ty::Int(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), true)),
584 ty::Uint(t) => Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.ptr_width)), false)),
585 _ => None,
586 }
587 }
588
589 // Returns the width of a float Ty
590 // Returns None if the type is not a float
591 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
592 match ty.kind() {
593 ty::Float(t) => Some(t.bit_width()),
594 _ => None,
595 }
596 }