]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
New upstream version 1.57.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
CommitLineData
1b1a35ee
XL
1use super::operand::{OperandRef, OperandValue};
2use super::place::PlaceRef;
3use super::FunctionCx;
4use crate::common::{span_invalid_monomorphization_error, IntPredicate};
5use crate::glue;
6use crate::traits::*;
7use crate::MemFlags;
8
9use rustc_middle::ty::{self, Ty, TyCtxt};
10use rustc_span::{sym, Span};
11use rustc_target::abi::call::{FnAbi, PassMode};
12
13fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
14 bx: &mut Bx,
15 allow_overlap: bool,
16 volatile: bool,
17 ty: Ty<'tcx>,
18 dst: Bx::Value,
19 src: Bx::Value,
20 count: Bx::Value,
21) {
22 let layout = bx.layout_of(ty);
23 let size = layout.size;
24 let align = layout.align.abi;
25 let size = bx.mul(bx.const_usize(size.bytes()), count);
26 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
27 if allow_overlap {
28 bx.memmove(dst, align, src, align, size, flags);
29 } else {
30 bx.memcpy(dst, align, src, align, size, flags);
31 }
32}
33
34fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
35 bx: &mut Bx,
36 volatile: bool,
37 ty: Ty<'tcx>,
38 dst: Bx::Value,
39 val: Bx::Value,
40 count: Bx::Value,
41) {
42 let layout = bx.layout_of(ty);
43 let size = layout.size;
44 let align = layout.align.abi;
45 let size = bx.mul(bx.const_usize(size.bytes()), count);
46 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
47 bx.memset(dst, val, size, align, flags);
48}
49
50impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
51 pub fn codegen_intrinsic_call(
52 bx: &mut Bx,
53 instance: ty::Instance<'tcx>,
54 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
55 args: &[OperandRef<'tcx, Bx::Value>],
56 llresult: Bx::Value,
57 span: Span,
58 ) {
59 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
60
61 let (def_id, substs) = match *callee_ty.kind() {
62 ty::FnDef(def_id, substs) => (def_id, substs),
63 _ => bug!("expected fn item type, found {}", callee_ty),
64 };
65
66 let sig = callee_ty.fn_sig(bx.tcx());
fc512014 67 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
1b1a35ee
XL
68 let arg_tys = sig.inputs();
69 let ret_ty = sig.output();
70 let name = bx.tcx().item_name(def_id);
71 let name_str = &*name.as_str();
72
73 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
74 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
75
76 let llval = match name {
77 sym::assume => {
78 bx.assume(args[0].immediate());
79 return;
80 }
81 sym::abort => {
82 bx.abort();
83 return;
84 }
85
1b1a35ee
XL
86 sym::va_start => bx.va_start(args[0].immediate()),
87 sym::va_end => bx.va_end(args[0].immediate()),
88 sym::size_of_val => {
89 let tp_ty = substs.type_at(0);
90 if let OperandValue::Pair(_, meta) = args[0].val {
91 let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
92 llsize
93 } else {
94 bx.const_usize(bx.layout_of(tp_ty).size.bytes())
95 }
96 }
97 sym::min_align_of_val => {
98 let tp_ty = substs.type_at(0);
99 if let OperandValue::Pair(_, meta) = args[0].val {
100 let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
101 llalign
102 } else {
103 bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
104 }
105 }
fc512014 106 sym::pref_align_of
1b1a35ee
XL
107 | sym::needs_drop
108 | sym::type_id
109 | sym::type_name
110 | sym::variant_count => {
111 let value = bx
112 .tcx()
113 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
114 .unwrap();
115 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
116 }
1b1a35ee 117 sym::offset => {
94222f64
XL
118 let ty = substs.type_at(0);
119 let layout = bx.layout_of(ty);
1b1a35ee
XL
120 let ptr = args[0].immediate();
121 let offset = args[1].immediate();
94222f64 122 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
1b1a35ee
XL
123 }
124 sym::arith_offset => {
94222f64
XL
125 let ty = substs.type_at(0);
126 let layout = bx.layout_of(ty);
1b1a35ee
XL
127 let ptr = args[0].immediate();
128 let offset = args[1].immediate();
94222f64 129 bx.gep(bx.backend_type(layout), ptr, &[offset])
1b1a35ee 130 }
1b1a35ee
XL
131 sym::copy => {
132 copy_intrinsic(
133 bx,
134 true,
135 false,
136 substs.type_at(0),
137 args[1].immediate(),
138 args[0].immediate(),
139 args[2].immediate(),
140 );
141 return;
142 }
143 sym::write_bytes => {
144 memset_intrinsic(
145 bx,
146 false,
147 substs.type_at(0),
148 args[0].immediate(),
149 args[1].immediate(),
150 args[2].immediate(),
151 );
152 return;
153 }
154
155 sym::volatile_copy_nonoverlapping_memory => {
156 copy_intrinsic(
157 bx,
158 false,
159 true,
160 substs.type_at(0),
161 args[0].immediate(),
162 args[1].immediate(),
163 args[2].immediate(),
164 );
165 return;
166 }
167 sym::volatile_copy_memory => {
168 copy_intrinsic(
169 bx,
170 true,
171 true,
172 substs.type_at(0),
173 args[0].immediate(),
174 args[1].immediate(),
175 args[2].immediate(),
176 );
177 return;
178 }
179 sym::volatile_set_memory => {
180 memset_intrinsic(
181 bx,
182 true,
183 substs.type_at(0),
184 args[0].immediate(),
185 args[1].immediate(),
186 args[2].immediate(),
187 );
188 return;
189 }
190 sym::volatile_store => {
191 let dst = args[0].deref(bx.cx());
192 args[1].val.volatile_store(bx, dst);
193 return;
194 }
195 sym::unaligned_volatile_store => {
196 let dst = args[0].deref(bx.cx());
197 args[1].val.unaligned_volatile_store(bx, dst);
198 return;
199 }
200 sym::add_with_overflow
201 | sym::sub_with_overflow
202 | sym::mul_with_overflow
1b1a35ee
XL
203 | sym::unchecked_div
204 | sym::unchecked_rem
205 | sym::unchecked_shl
206 | sym::unchecked_shr
207 | sym::unchecked_add
208 | sym::unchecked_sub
209 | sym::unchecked_mul
210 | sym::exact_div => {
211 let ty = arg_tys[0];
212 match int_type_width_signed(ty, bx.tcx()) {
213 Some((_width, signed)) => match name {
214 sym::add_with_overflow
215 | sym::sub_with_overflow
216 | sym::mul_with_overflow => {
217 let op = match name {
218 sym::add_with_overflow => OverflowOp::Add,
219 sym::sub_with_overflow => OverflowOp::Sub,
220 sym::mul_with_overflow => OverflowOp::Mul,
221 _ => bug!(),
222 };
223 let (val, overflow) =
224 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
225 // Convert `i1` to a `bool`, and write it to the out parameter
226 let val = bx.from_immediate(val);
227 let overflow = bx.from_immediate(overflow);
228
229 let dest = result.project_field(bx, 0);
230 bx.store(val, dest.llval, dest.align);
231 let dest = result.project_field(bx, 1);
232 bx.store(overflow, dest.llval, dest.align);
233
234 return;
235 }
1b1a35ee
XL
236 sym::exact_div => {
237 if signed {
238 bx.exactsdiv(args[0].immediate(), args[1].immediate())
239 } else {
240 bx.exactudiv(args[0].immediate(), args[1].immediate())
241 }
242 }
243 sym::unchecked_div => {
244 if signed {
245 bx.sdiv(args[0].immediate(), args[1].immediate())
246 } else {
247 bx.udiv(args[0].immediate(), args[1].immediate())
248 }
249 }
250 sym::unchecked_rem => {
251 if signed {
252 bx.srem(args[0].immediate(), args[1].immediate())
253 } else {
254 bx.urem(args[0].immediate(), args[1].immediate())
255 }
256 }
257 sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
258 sym::unchecked_shr => {
259 if signed {
260 bx.ashr(args[0].immediate(), args[1].immediate())
261 } else {
262 bx.lshr(args[0].immediate(), args[1].immediate())
263 }
264 }
265 sym::unchecked_add => {
266 if signed {
267 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
268 } else {
269 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
270 }
271 }
272 sym::unchecked_sub => {
273 if signed {
274 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
275 } else {
276 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
277 }
278 }
279 sym::unchecked_mul => {
280 if signed {
281 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
282 } else {
283 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
284 }
285 }
286 _ => bug!(),
287 },
288 None => {
289 span_invalid_monomorphization_error(
290 bx.tcx().sess,
291 span,
292 &format!(
293 "invalid monomorphization of `{}` intrinsic: \
294 expected basic integer type, found `{}`",
295 name, ty
296 ),
297 );
298 return;
299 }
300 }
301 }
302 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
303 match float_type_width(arg_tys[0]) {
304 Some(_width) => match name {
305 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
306 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
307 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
308 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
309 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
310 _ => bug!(),
311 },
312 None => {
313 span_invalid_monomorphization_error(
314 bx.tcx().sess,
315 span,
316 &format!(
317 "invalid monomorphization of `{}` intrinsic: \
318 expected basic float type, found `{}`",
319 name, arg_tys[0]
320 ),
321 );
322 return;
323 }
324 }
325 }
326
327 sym::float_to_int_unchecked => {
328 if float_type_width(arg_tys[0]).is_none() {
329 span_invalid_monomorphization_error(
330 bx.tcx().sess,
331 span,
332 &format!(
333 "invalid monomorphization of `float_to_int_unchecked` \
334 intrinsic: expected basic float type, \
335 found `{}`",
336 arg_tys[0]
337 ),
338 );
339 return;
340 }
341 let (_width, signed) = match int_type_width_signed(ret_ty, bx.tcx()) {
342 Some(pair) => pair,
343 None => {
344 span_invalid_monomorphization_error(
345 bx.tcx().sess,
346 span,
347 &format!(
348 "invalid monomorphization of `float_to_int_unchecked` \
349 intrinsic: expected basic integer type, \
350 found `{}`",
351 ret_ty
352 ),
353 );
354 return;
355 }
356 };
357 if signed {
358 bx.fptosi(args[0].immediate(), llret_ty)
359 } else {
360 bx.fptoui(args[0].immediate(), llret_ty)
361 }
362 }
363
364 sym::discriminant_value => {
365 if ret_ty.is_integral() {
366 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
367 } else {
368 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
369 }
370 }
371
372 // This requires that atomic intrinsics follow a specific naming pattern:
373 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
374 name if name_str.starts_with("atomic_") => {
375 use crate::common::AtomicOrdering::*;
376 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
377
378 let split: Vec<&str> = name_str.split('_').collect();
379
380 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
381 let (order, failorder) = match split.len() {
382 2 => (SequentiallyConsistent, SequentiallyConsistent),
383 3 => match split[2] {
384 "unordered" => (Unordered, Unordered),
385 "relaxed" => (Monotonic, Monotonic),
386 "acq" => (Acquire, Acquire),
387 "rel" => (Release, Monotonic),
388 "acqrel" => (AcquireRelease, Acquire),
389 "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
390 "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
391 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
392 },
393 4 => match (split[2], split[3]) {
394 ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
395 ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
396 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
397 },
398 _ => bx.sess().fatal("Atomic intrinsic not in correct format"),
399 };
400
401 let invalid_monomorphization = |ty| {
402 span_invalid_monomorphization_error(
403 bx.tcx().sess,
404 span,
405 &format!(
406 "invalid monomorphization of `{}` intrinsic: \
407 expected basic integer type, found `{}`",
408 name, ty
409 ),
410 );
411 };
412
413 match split[1] {
414 "cxchg" | "cxchgweak" => {
415 let ty = substs.type_at(0);
fc512014 416 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
1b1a35ee 417 let weak = split[1] == "cxchgweak";
fc512014
XL
418 let mut dst = args[0].immediate();
419 let mut cmp = args[1].immediate();
420 let mut src = args[2].immediate();
421 if ty.is_unsafe_ptr() {
422 // Some platforms do not support atomic operations on pointers,
423 // so we cast to integer first.
424 let ptr_llty = bx.type_ptr_to(bx.type_isize());
425 dst = bx.pointercast(dst, ptr_llty);
426 cmp = bx.ptrtoint(cmp, bx.type_isize());
427 src = bx.ptrtoint(src, bx.type_isize());
428 }
429 let pair = bx.atomic_cmpxchg(dst, cmp, src, order, failorder, weak);
1b1a35ee
XL
430 let val = bx.extract_value(pair, 0);
431 let success = bx.extract_value(pair, 1);
432 let val = bx.from_immediate(val);
433 let success = bx.from_immediate(success);
434
435 let dest = result.project_field(bx, 0);
436 bx.store(val, dest.llval, dest.align);
437 let dest = result.project_field(bx, 1);
438 bx.store(success, dest.llval, dest.align);
439 return;
440 } else {
441 return invalid_monomorphization(ty);
442 }
443 }
444
445 "load" => {
446 let ty = substs.type_at(0);
fc512014
XL
447 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
448 let layout = bx.layout_of(ty);
449 let size = layout.size;
450 let mut source = args[0].immediate();
451 if ty.is_unsafe_ptr() {
452 // Some platforms do not support atomic operations on pointers,
453 // so we cast to integer first...
136023e0
XL
454 let llty = bx.type_isize();
455 let ptr_llty = bx.type_ptr_to(llty);
fc512014 456 source = bx.pointercast(source, ptr_llty);
136023e0 457 let result = bx.atomic_load(llty, source, order, size);
fc512014
XL
458 // ... and then cast the result back to a pointer
459 bx.inttoptr(result, bx.backend_type(layout))
460 } else {
136023e0 461 bx.atomic_load(bx.backend_type(layout), source, order, size)
fc512014 462 }
1b1a35ee
XL
463 } else {
464 return invalid_monomorphization(ty);
465 }
466 }
467
468 "store" => {
469 let ty = substs.type_at(0);
fc512014 470 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
1b1a35ee 471 let size = bx.layout_of(ty).size;
fc512014
XL
472 let mut val = args[1].immediate();
473 let mut ptr = args[0].immediate();
474 if ty.is_unsafe_ptr() {
475 // Some platforms do not support atomic operations on pointers,
476 // so we cast to integer first.
477 let ptr_llty = bx.type_ptr_to(bx.type_isize());
478 ptr = bx.pointercast(ptr, ptr_llty);
479 val = bx.ptrtoint(val, bx.type_isize());
480 }
481 bx.atomic_store(val, ptr, order, size);
1b1a35ee
XL
482 return;
483 } else {
484 return invalid_monomorphization(ty);
485 }
486 }
487
488 "fence" => {
489 bx.atomic_fence(order, SynchronizationScope::CrossThread);
490 return;
491 }
492
493 "singlethreadfence" => {
494 bx.atomic_fence(order, SynchronizationScope::SingleThread);
495 return;
496 }
497
498 // These are all AtomicRMW ops
499 op => {
500 let atom_op = match op {
501 "xchg" => AtomicRmwBinOp::AtomicXchg,
502 "xadd" => AtomicRmwBinOp::AtomicAdd,
503 "xsub" => AtomicRmwBinOp::AtomicSub,
504 "and" => AtomicRmwBinOp::AtomicAnd,
505 "nand" => AtomicRmwBinOp::AtomicNand,
506 "or" => AtomicRmwBinOp::AtomicOr,
507 "xor" => AtomicRmwBinOp::AtomicXor,
508 "max" => AtomicRmwBinOp::AtomicMax,
509 "min" => AtomicRmwBinOp::AtomicMin,
510 "umax" => AtomicRmwBinOp::AtomicUMax,
511 "umin" => AtomicRmwBinOp::AtomicUMin,
512 _ => bx.sess().fatal("unknown atomic operation"),
513 };
514
515 let ty = substs.type_at(0);
fc512014
XL
516 if int_type_width_signed(ty, bx.tcx()).is_some()
517 || (ty.is_unsafe_ptr() && op == "xchg")
518 {
519 let mut ptr = args[0].immediate();
520 let mut val = args[1].immediate();
521 if ty.is_unsafe_ptr() {
522 // Some platforms do not support atomic operations on pointers,
523 // so we cast to integer first.
524 let ptr_llty = bx.type_ptr_to(bx.type_isize());
525 ptr = bx.pointercast(ptr, ptr_llty);
526 val = bx.ptrtoint(val, bx.type_isize());
527 }
528 bx.atomic_rmw(atom_op, ptr, val, order)
1b1a35ee
XL
529 } else {
530 return invalid_monomorphization(ty);
531 }
532 }
533 }
534 }
535
536 sym::nontemporal_store => {
537 let dst = args[0].deref(bx.cx());
538 args[1].val.nontemporal_store(bx, dst);
539 return;
540 }
541
542 sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
543 let a = args[0].immediate();
544 let b = args[1].immediate();
545 if name == sym::ptr_guaranteed_eq {
546 bx.icmp(IntPredicate::IntEQ, a, b)
547 } else {
548 bx.icmp(IntPredicate::IntNE, a, b)
549 }
550 }
551
552 sym::ptr_offset_from => {
553 let ty = substs.type_at(0);
554 let pointee_size = bx.layout_of(ty).size;
555
556 // This is the same sequence that Clang emits for pointer subtraction.
557 // It can be neither `nsw` nor `nuw` because the input is treated as
558 // unsigned but then the output is treated as signed, so neither works.
559 let a = args[0].immediate();
560 let b = args[1].immediate();
561 let a = bx.ptrtoint(a, bx.type_isize());
562 let b = bx.ptrtoint(b, bx.type_isize());
563 let d = bx.sub(a, b);
564 let pointee_size = bx.const_usize(pointee_size.bytes());
565 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
566 bx.exactsdiv(d, pointee_size)
567 }
568
569 _ => {
570 // Need to use backend-specific things in the implementation.
571 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
572 return;
573 }
574 };
575
576 if !fn_abi.ret.is_ignore() {
577 if let PassMode::Cast(ty) = fn_abi.ret.mode {
578 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
579 let ptr = bx.pointercast(result.llval, ptr_llty);
580 bx.store(llval, ptr, result.align);
581 } else {
582 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
583 .val
584 .store(bx, result);
585 }
586 }
587 }
588}
589
590// Returns the width of an int Ty, and if it's signed or not
591// Returns None if the type is not an integer
592// FIXME: there’s multiple of this functions, investigate using some of the already existing
593// stuffs.
594fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
595 match ty.kind() {
29967ef6
XL
596 ty::Int(t) => {
597 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
598 }
599 ty::Uint(t) => {
600 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
601 }
1b1a35ee
XL
602 _ => None,
603 }
604}
605
606// Returns the width of a float Ty
607// Returns None if the type is not a float
608fn float_type_width(ty: Ty<'_>) -> Option<u64> {
609 match ty.kind() {
610 ty::Float(t) => Some(t.bit_width()),
611 _ => None,
612 }
613}