]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
CommitLineData
1b1a35ee
XL
1use super::operand::{OperandRef, OperandValue};
2use super::place::PlaceRef;
3use super::FunctionCx;
4use crate::common::{span_invalid_monomorphization_error, IntPredicate};
5use crate::glue;
6use crate::traits::*;
7use crate::MemFlags;
8
9use rustc_middle::ty::{self, Ty, TyCtxt};
10use rustc_span::{sym, Span};
11use rustc_target::abi::call::{FnAbi, PassMode};
12
13fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
14 bx: &mut Bx,
15 allow_overlap: bool,
16 volatile: bool,
17 ty: Ty<'tcx>,
18 dst: Bx::Value,
19 src: Bx::Value,
20 count: Bx::Value,
21) {
22 let layout = bx.layout_of(ty);
23 let size = layout.size;
24 let align = layout.align.abi;
25 let size = bx.mul(bx.const_usize(size.bytes()), count);
26 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
27 if allow_overlap {
28 bx.memmove(dst, align, src, align, size, flags);
29 } else {
30 bx.memcpy(dst, align, src, align, size, flags);
31 }
32}
33
34fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
35 bx: &mut Bx,
36 volatile: bool,
37 ty: Ty<'tcx>,
38 dst: Bx::Value,
39 val: Bx::Value,
40 count: Bx::Value,
41) {
42 let layout = bx.layout_of(ty);
43 let size = layout.size;
44 let align = layout.align.abi;
45 let size = bx.mul(bx.const_usize(size.bytes()), count);
46 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
47 bx.memset(dst, val, size, align, flags);
48}
49
50impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
51 pub fn codegen_intrinsic_call(
52 bx: &mut Bx,
53 instance: ty::Instance<'tcx>,
54 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
55 args: &[OperandRef<'tcx, Bx::Value>],
56 llresult: Bx::Value,
57 span: Span,
58 ) {
59 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
60
5e7ed085
FG
61 let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
62 bug!("expected fn item type, found {}", callee_ty);
1b1a35ee
XL
63 };
64
65 let sig = callee_ty.fn_sig(bx.tcx());
fc512014 66 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
1b1a35ee
XL
67 let arg_tys = sig.inputs();
68 let ret_ty = sig.output();
69 let name = bx.tcx().item_name(def_id);
a2a8927a 70 let name_str = name.as_str();
1b1a35ee
XL
71
72 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
73 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
74
75 let llval = match name {
76 sym::assume => {
77 bx.assume(args[0].immediate());
78 return;
79 }
80 sym::abort => {
81 bx.abort();
82 return;
83 }
84
1b1a35ee
XL
85 sym::va_start => bx.va_start(args[0].immediate()),
86 sym::va_end => bx.va_end(args[0].immediate()),
87 sym::size_of_val => {
88 let tp_ty = substs.type_at(0);
89 if let OperandValue::Pair(_, meta) = args[0].val {
90 let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
91 llsize
92 } else {
93 bx.const_usize(bx.layout_of(tp_ty).size.bytes())
94 }
95 }
96 sym::min_align_of_val => {
97 let tp_ty = substs.type_at(0);
98 if let OperandValue::Pair(_, meta) = args[0].val {
99 let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
100 llalign
101 } else {
102 bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
103 }
104 }
fc512014 105 sym::pref_align_of
1b1a35ee
XL
106 | sym::needs_drop
107 | sym::type_id
108 | sym::type_name
109 | sym::variant_count => {
110 let value = bx
111 .tcx()
112 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
113 .unwrap();
114 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
115 }
1b1a35ee 116 sym::offset => {
94222f64
XL
117 let ty = substs.type_at(0);
118 let layout = bx.layout_of(ty);
1b1a35ee
XL
119 let ptr = args[0].immediate();
120 let offset = args[1].immediate();
94222f64 121 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
1b1a35ee
XL
122 }
123 sym::arith_offset => {
94222f64
XL
124 let ty = substs.type_at(0);
125 let layout = bx.layout_of(ty);
1b1a35ee
XL
126 let ptr = args[0].immediate();
127 let offset = args[1].immediate();
94222f64 128 bx.gep(bx.backend_type(layout), ptr, &[offset])
1b1a35ee 129 }
1b1a35ee
XL
130 sym::copy => {
131 copy_intrinsic(
132 bx,
133 true,
134 false,
135 substs.type_at(0),
136 args[1].immediate(),
137 args[0].immediate(),
138 args[2].immediate(),
139 );
140 return;
141 }
142 sym::write_bytes => {
143 memset_intrinsic(
144 bx,
145 false,
146 substs.type_at(0),
147 args[0].immediate(),
148 args[1].immediate(),
149 args[2].immediate(),
150 );
151 return;
152 }
153
154 sym::volatile_copy_nonoverlapping_memory => {
155 copy_intrinsic(
156 bx,
157 false,
158 true,
159 substs.type_at(0),
160 args[0].immediate(),
161 args[1].immediate(),
162 args[2].immediate(),
163 );
164 return;
165 }
166 sym::volatile_copy_memory => {
167 copy_intrinsic(
168 bx,
169 true,
170 true,
171 substs.type_at(0),
172 args[0].immediate(),
173 args[1].immediate(),
174 args[2].immediate(),
175 );
176 return;
177 }
178 sym::volatile_set_memory => {
179 memset_intrinsic(
180 bx,
181 true,
182 substs.type_at(0),
183 args[0].immediate(),
184 args[1].immediate(),
185 args[2].immediate(),
186 );
187 return;
188 }
189 sym::volatile_store => {
190 let dst = args[0].deref(bx.cx());
191 args[1].val.volatile_store(bx, dst);
192 return;
193 }
194 sym::unaligned_volatile_store => {
195 let dst = args[0].deref(bx.cx());
196 args[1].val.unaligned_volatile_store(bx, dst);
197 return;
198 }
199 sym::add_with_overflow
200 | sym::sub_with_overflow
201 | sym::mul_with_overflow
1b1a35ee
XL
202 | sym::unchecked_div
203 | sym::unchecked_rem
204 | sym::unchecked_shl
205 | sym::unchecked_shr
206 | sym::unchecked_add
207 | sym::unchecked_sub
208 | sym::unchecked_mul
209 | sym::exact_div => {
210 let ty = arg_tys[0];
211 match int_type_width_signed(ty, bx.tcx()) {
212 Some((_width, signed)) => match name {
213 sym::add_with_overflow
214 | sym::sub_with_overflow
215 | sym::mul_with_overflow => {
216 let op = match name {
217 sym::add_with_overflow => OverflowOp::Add,
218 sym::sub_with_overflow => OverflowOp::Sub,
219 sym::mul_with_overflow => OverflowOp::Mul,
220 _ => bug!(),
221 };
222 let (val, overflow) =
223 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
224 // Convert `i1` to a `bool`, and write it to the out parameter
225 let val = bx.from_immediate(val);
226 let overflow = bx.from_immediate(overflow);
227
228 let dest = result.project_field(bx, 0);
229 bx.store(val, dest.llval, dest.align);
230 let dest = result.project_field(bx, 1);
231 bx.store(overflow, dest.llval, dest.align);
232
233 return;
234 }
1b1a35ee
XL
235 sym::exact_div => {
236 if signed {
237 bx.exactsdiv(args[0].immediate(), args[1].immediate())
238 } else {
239 bx.exactudiv(args[0].immediate(), args[1].immediate())
240 }
241 }
242 sym::unchecked_div => {
243 if signed {
244 bx.sdiv(args[0].immediate(), args[1].immediate())
245 } else {
246 bx.udiv(args[0].immediate(), args[1].immediate())
247 }
248 }
249 sym::unchecked_rem => {
250 if signed {
251 bx.srem(args[0].immediate(), args[1].immediate())
252 } else {
253 bx.urem(args[0].immediate(), args[1].immediate())
254 }
255 }
256 sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
257 sym::unchecked_shr => {
258 if signed {
259 bx.ashr(args[0].immediate(), args[1].immediate())
260 } else {
261 bx.lshr(args[0].immediate(), args[1].immediate())
262 }
263 }
264 sym::unchecked_add => {
265 if signed {
266 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
267 } else {
268 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
269 }
270 }
271 sym::unchecked_sub => {
272 if signed {
273 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
274 } else {
275 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
276 }
277 }
278 sym::unchecked_mul => {
279 if signed {
280 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
281 } else {
282 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
283 }
284 }
285 _ => bug!(),
286 },
287 None => {
288 span_invalid_monomorphization_error(
289 bx.tcx().sess,
290 span,
291 &format!(
292 "invalid monomorphization of `{}` intrinsic: \
293 expected basic integer type, found `{}`",
294 name, ty
295 ),
296 );
297 return;
298 }
299 }
300 }
301 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
302 match float_type_width(arg_tys[0]) {
303 Some(_width) => match name {
304 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
305 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
306 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
307 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
308 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
309 _ => bug!(),
310 },
311 None => {
312 span_invalid_monomorphization_error(
313 bx.tcx().sess,
314 span,
315 &format!(
316 "invalid monomorphization of `{}` intrinsic: \
317 expected basic float type, found `{}`",
318 name, arg_tys[0]
319 ),
320 );
321 return;
322 }
323 }
324 }
325
326 sym::float_to_int_unchecked => {
327 if float_type_width(arg_tys[0]).is_none() {
328 span_invalid_monomorphization_error(
329 bx.tcx().sess,
330 span,
331 &format!(
332 "invalid monomorphization of `float_to_int_unchecked` \
333 intrinsic: expected basic float type, \
334 found `{}`",
335 arg_tys[0]
336 ),
337 );
338 return;
339 }
5e7ed085
FG
340 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
341 span_invalid_monomorphization_error(
342 bx.tcx().sess,
343 span,
344 &format!(
345 "invalid monomorphization of `float_to_int_unchecked` \
346 intrinsic: expected basic integer type, \
347 found `{}`",
348 ret_ty
349 ),
350 );
351 return;
1b1a35ee
XL
352 };
353 if signed {
354 bx.fptosi(args[0].immediate(), llret_ty)
355 } else {
356 bx.fptoui(args[0].immediate(), llret_ty)
357 }
358 }
359
360 sym::discriminant_value => {
361 if ret_ty.is_integral() {
362 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
363 } else {
364 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
365 }
366 }
367
5099ac24
FG
368 sym::const_allocate => {
369 // returns a null pointer at runtime.
370 bx.const_null(bx.type_i8p())
371 }
372
373 sym::const_deallocate => {
374 // nop at runtime.
375 return;
376 }
377
1b1a35ee
XL
378 // This requires that atomic intrinsics follow a specific naming pattern:
379 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
380 name if name_str.starts_with("atomic_") => {
381 use crate::common::AtomicOrdering::*;
382 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
383
a2a8927a 384 let split: Vec<_> = name_str.split('_').collect();
1b1a35ee
XL
385
386 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
387 let (order, failorder) = match split.len() {
388 2 => (SequentiallyConsistent, SequentiallyConsistent),
389 3 => match split[2] {
390 "unordered" => (Unordered, Unordered),
391 "relaxed" => (Monotonic, Monotonic),
392 "acq" => (Acquire, Acquire),
393 "rel" => (Release, Monotonic),
394 "acqrel" => (AcquireRelease, Acquire),
395 "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
396 "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
397 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
398 },
399 4 => match (split[2], split[3]) {
400 ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
401 ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
402 _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
403 },
404 _ => bx.sess().fatal("Atomic intrinsic not in correct format"),
405 };
406
407 let invalid_monomorphization = |ty| {
408 span_invalid_monomorphization_error(
409 bx.tcx().sess,
410 span,
411 &format!(
412 "invalid monomorphization of `{}` intrinsic: \
413 expected basic integer type, found `{}`",
414 name, ty
415 ),
416 );
417 };
418
419 match split[1] {
420 "cxchg" | "cxchgweak" => {
421 let ty = substs.type_at(0);
fc512014 422 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
1b1a35ee 423 let weak = split[1] == "cxchgweak";
fc512014
XL
424 let mut dst = args[0].immediate();
425 let mut cmp = args[1].immediate();
426 let mut src = args[2].immediate();
427 if ty.is_unsafe_ptr() {
428 // Some platforms do not support atomic operations on pointers,
429 // so we cast to integer first.
430 let ptr_llty = bx.type_ptr_to(bx.type_isize());
431 dst = bx.pointercast(dst, ptr_llty);
432 cmp = bx.ptrtoint(cmp, bx.type_isize());
433 src = bx.ptrtoint(src, bx.type_isize());
434 }
435 let pair = bx.atomic_cmpxchg(dst, cmp, src, order, failorder, weak);
1b1a35ee
XL
436 let val = bx.extract_value(pair, 0);
437 let success = bx.extract_value(pair, 1);
438 let val = bx.from_immediate(val);
439 let success = bx.from_immediate(success);
440
441 let dest = result.project_field(bx, 0);
442 bx.store(val, dest.llval, dest.align);
443 let dest = result.project_field(bx, 1);
444 bx.store(success, dest.llval, dest.align);
445 return;
446 } else {
447 return invalid_monomorphization(ty);
448 }
449 }
450
451 "load" => {
452 let ty = substs.type_at(0);
fc512014
XL
453 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
454 let layout = bx.layout_of(ty);
455 let size = layout.size;
456 let mut source = args[0].immediate();
457 if ty.is_unsafe_ptr() {
458 // Some platforms do not support atomic operations on pointers,
459 // so we cast to integer first...
136023e0
XL
460 let llty = bx.type_isize();
461 let ptr_llty = bx.type_ptr_to(llty);
fc512014 462 source = bx.pointercast(source, ptr_llty);
136023e0 463 let result = bx.atomic_load(llty, source, order, size);
fc512014
XL
464 // ... and then cast the result back to a pointer
465 bx.inttoptr(result, bx.backend_type(layout))
466 } else {
136023e0 467 bx.atomic_load(bx.backend_type(layout), source, order, size)
fc512014 468 }
1b1a35ee
XL
469 } else {
470 return invalid_monomorphization(ty);
471 }
472 }
473
474 "store" => {
475 let ty = substs.type_at(0);
fc512014 476 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
1b1a35ee 477 let size = bx.layout_of(ty).size;
fc512014
XL
478 let mut val = args[1].immediate();
479 let mut ptr = args[0].immediate();
480 if ty.is_unsafe_ptr() {
481 // Some platforms do not support atomic operations on pointers,
482 // so we cast to integer first.
483 let ptr_llty = bx.type_ptr_to(bx.type_isize());
484 ptr = bx.pointercast(ptr, ptr_llty);
485 val = bx.ptrtoint(val, bx.type_isize());
486 }
487 bx.atomic_store(val, ptr, order, size);
1b1a35ee
XL
488 return;
489 } else {
490 return invalid_monomorphization(ty);
491 }
492 }
493
494 "fence" => {
495 bx.atomic_fence(order, SynchronizationScope::CrossThread);
496 return;
497 }
498
499 "singlethreadfence" => {
500 bx.atomic_fence(order, SynchronizationScope::SingleThread);
501 return;
502 }
503
504 // These are all AtomicRMW ops
505 op => {
506 let atom_op = match op {
507 "xchg" => AtomicRmwBinOp::AtomicXchg,
508 "xadd" => AtomicRmwBinOp::AtomicAdd,
509 "xsub" => AtomicRmwBinOp::AtomicSub,
510 "and" => AtomicRmwBinOp::AtomicAnd,
511 "nand" => AtomicRmwBinOp::AtomicNand,
512 "or" => AtomicRmwBinOp::AtomicOr,
513 "xor" => AtomicRmwBinOp::AtomicXor,
514 "max" => AtomicRmwBinOp::AtomicMax,
515 "min" => AtomicRmwBinOp::AtomicMin,
516 "umax" => AtomicRmwBinOp::AtomicUMax,
517 "umin" => AtomicRmwBinOp::AtomicUMin,
518 _ => bx.sess().fatal("unknown atomic operation"),
519 };
520
521 let ty = substs.type_at(0);
fc512014
XL
522 if int_type_width_signed(ty, bx.tcx()).is_some()
523 || (ty.is_unsafe_ptr() && op == "xchg")
524 {
525 let mut ptr = args[0].immediate();
526 let mut val = args[1].immediate();
527 if ty.is_unsafe_ptr() {
528 // Some platforms do not support atomic operations on pointers,
529 // so we cast to integer first.
530 let ptr_llty = bx.type_ptr_to(bx.type_isize());
531 ptr = bx.pointercast(ptr, ptr_llty);
532 val = bx.ptrtoint(val, bx.type_isize());
533 }
534 bx.atomic_rmw(atom_op, ptr, val, order)
1b1a35ee
XL
535 } else {
536 return invalid_monomorphization(ty);
537 }
538 }
539 }
540 }
541
542 sym::nontemporal_store => {
543 let dst = args[0].deref(bx.cx());
544 args[1].val.nontemporal_store(bx, dst);
545 return;
546 }
547
548 sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
549 let a = args[0].immediate();
550 let b = args[1].immediate();
551 if name == sym::ptr_guaranteed_eq {
552 bx.icmp(IntPredicate::IntEQ, a, b)
553 } else {
554 bx.icmp(IntPredicate::IntNE, a, b)
555 }
556 }
557
558 sym::ptr_offset_from => {
559 let ty = substs.type_at(0);
560 let pointee_size = bx.layout_of(ty).size;
561
562 // This is the same sequence that Clang emits for pointer subtraction.
563 // It can be neither `nsw` nor `nuw` because the input is treated as
564 // unsigned but then the output is treated as signed, so neither works.
565 let a = args[0].immediate();
566 let b = args[1].immediate();
567 let a = bx.ptrtoint(a, bx.type_isize());
568 let b = bx.ptrtoint(b, bx.type_isize());
569 let d = bx.sub(a, b);
570 let pointee_size = bx.const_usize(pointee_size.bytes());
571 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
572 bx.exactsdiv(d, pointee_size)
573 }
574
575 _ => {
576 // Need to use backend-specific things in the implementation.
577 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
578 return;
579 }
580 };
581
582 if !fn_abi.ret.is_ignore() {
583 if let PassMode::Cast(ty) = fn_abi.ret.mode {
584 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
585 let ptr = bx.pointercast(result.llval, ptr_llty);
586 bx.store(llval, ptr, result.align);
587 } else {
588 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
589 .val
590 .store(bx, result);
591 }
592 }
593 }
594}
595
596// Returns the width of an int Ty, and if it's signed or not
597// Returns None if the type is not an integer
598// FIXME: there’s multiple of this functions, investigate using some of the already existing
599// stuffs.
600fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
601 match ty.kind() {
29967ef6
XL
602 ty::Int(t) => {
603 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
604 }
605 ty::Uint(t) => {
606 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
607 }
1b1a35ee
XL
608 _ => None,
609 }
610}
611
612// Returns the width of a float Ty
613// Returns None if the type is not a float
614fn float_type_width(ty: Ty<'_>) -> Option<u64> {
615 match ty.kind() {
616 ty::Float(t) => Some(t.bit_width()),
617 _ => None,
618 }
619}