]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
CommitLineData
1b1a35ee
XL
1use super::operand::{OperandRef, OperandValue};
2use super::place::PlaceRef;
3use super::FunctionCx;
9c376795
FG
4use crate::common::IntPredicate;
5use crate::errors;
6use crate::errors::InvalidMonomorphization;
1b1a35ee 7use crate::glue;
064997fb 8use crate::meth;
1b1a35ee
XL
9use crate::traits::*;
10use crate::MemFlags;
11
12use rustc_middle::ty::{self, Ty, TyCtxt};
13use rustc_span::{sym, Span};
064997fb
FG
14use rustc_target::abi::{
15 call::{FnAbi, PassMode},
16 WrappingRange,
17};
1b1a35ee
XL
18
19fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
20 bx: &mut Bx,
21 allow_overlap: bool,
22 volatile: bool,
23 ty: Ty<'tcx>,
24 dst: Bx::Value,
25 src: Bx::Value,
26 count: Bx::Value,
27) {
28 let layout = bx.layout_of(ty);
29 let size = layout.size;
30 let align = layout.align.abi;
31 let size = bx.mul(bx.const_usize(size.bytes()), count);
32 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
33 if allow_overlap {
34 bx.memmove(dst, align, src, align, size, flags);
35 } else {
36 bx.memcpy(dst, align, src, align, size, flags);
37 }
38}
39
40fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
41 bx: &mut Bx,
42 volatile: bool,
43 ty: Ty<'tcx>,
44 dst: Bx::Value,
45 val: Bx::Value,
46 count: Bx::Value,
47) {
48 let layout = bx.layout_of(ty);
49 let size = layout.size;
50 let align = layout.align.abi;
51 let size = bx.mul(bx.const_usize(size.bytes()), count);
52 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
53 bx.memset(dst, val, size, align, flags);
54}
55
56impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
57 pub fn codegen_intrinsic_call(
58 bx: &mut Bx,
59 instance: ty::Instance<'tcx>,
60 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
61 args: &[OperandRef<'tcx, Bx::Value>],
62 llresult: Bx::Value,
63 span: Span,
64 ) {
65 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
66
5e7ed085
FG
67 let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
68 bug!("expected fn item type, found {}", callee_ty);
1b1a35ee
XL
69 };
70
71 let sig = callee_ty.fn_sig(bx.tcx());
fc512014 72 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
1b1a35ee
XL
73 let arg_tys = sig.inputs();
74 let ret_ty = sig.output();
75 let name = bx.tcx().item_name(def_id);
a2a8927a 76 let name_str = name.as_str();
1b1a35ee
XL
77
78 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
79 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
80
81 let llval = match name {
1b1a35ee
XL
82 sym::abort => {
83 bx.abort();
84 return;
85 }
86
1b1a35ee
XL
87 sym::va_start => bx.va_start(args[0].immediate()),
88 sym::va_end => bx.va_end(args[0].immediate()),
89 sym::size_of_val => {
90 let tp_ty = substs.type_at(0);
91 if let OperandValue::Pair(_, meta) = args[0].val {
92 let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
93 llsize
94 } else {
95 bx.const_usize(bx.layout_of(tp_ty).size.bytes())
96 }
97 }
98 sym::min_align_of_val => {
99 let tp_ty = substs.type_at(0);
100 if let OperandValue::Pair(_, meta) = args[0].val {
101 let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
102 llalign
103 } else {
104 bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
105 }
106 }
064997fb
FG
107 sym::vtable_size | sym::vtable_align => {
108 let vtable = args[0].immediate();
109 let idx = match name {
110 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
111 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
112 _ => bug!(),
113 };
114 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
9c376795
FG
115 match name {
116 // Size is always <= isize::MAX.
117 sym::vtable_size => {
118 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
119 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
120 },
064997fb 121 // Alignment is always nonzero.
9c376795
FG
122 sym::vtable_align => bx.range_metadata(value, WrappingRange { start: 1, end: !0 }),
123 _ => {}
124 }
064997fb
FG
125 value
126 }
fc512014 127 sym::pref_align_of
1b1a35ee
XL
128 | sym::needs_drop
129 | sym::type_id
130 | sym::type_name
131 | sym::variant_count => {
132 let value = bx
133 .tcx()
134 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
135 .unwrap();
136 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
137 }
1b1a35ee 138 sym::offset => {
94222f64
XL
139 let ty = substs.type_at(0);
140 let layout = bx.layout_of(ty);
1b1a35ee
XL
141 let ptr = args[0].immediate();
142 let offset = args[1].immediate();
94222f64 143 bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
1b1a35ee
XL
144 }
145 sym::arith_offset => {
94222f64
XL
146 let ty = substs.type_at(0);
147 let layout = bx.layout_of(ty);
1b1a35ee
XL
148 let ptr = args[0].immediate();
149 let offset = args[1].immediate();
94222f64 150 bx.gep(bx.backend_type(layout), ptr, &[offset])
1b1a35ee 151 }
1b1a35ee
XL
152 sym::copy => {
153 copy_intrinsic(
154 bx,
155 true,
156 false,
157 substs.type_at(0),
158 args[1].immediate(),
159 args[0].immediate(),
160 args[2].immediate(),
161 );
162 return;
163 }
164 sym::write_bytes => {
165 memset_intrinsic(
166 bx,
167 false,
168 substs.type_at(0),
169 args[0].immediate(),
170 args[1].immediate(),
171 args[2].immediate(),
172 );
173 return;
174 }
175
176 sym::volatile_copy_nonoverlapping_memory => {
177 copy_intrinsic(
178 bx,
179 false,
180 true,
181 substs.type_at(0),
182 args[0].immediate(),
183 args[1].immediate(),
184 args[2].immediate(),
185 );
186 return;
187 }
188 sym::volatile_copy_memory => {
189 copy_intrinsic(
190 bx,
191 true,
192 true,
193 substs.type_at(0),
194 args[0].immediate(),
195 args[1].immediate(),
196 args[2].immediate(),
197 );
198 return;
199 }
200 sym::volatile_set_memory => {
201 memset_intrinsic(
202 bx,
203 true,
204 substs.type_at(0),
205 args[0].immediate(),
206 args[1].immediate(),
207 args[2].immediate(),
208 );
209 return;
210 }
211 sym::volatile_store => {
212 let dst = args[0].deref(bx.cx());
213 args[1].val.volatile_store(bx, dst);
214 return;
215 }
216 sym::unaligned_volatile_store => {
217 let dst = args[0].deref(bx.cx());
218 args[1].val.unaligned_volatile_store(bx, dst);
219 return;
220 }
221 sym::add_with_overflow
222 | sym::sub_with_overflow
223 | sym::mul_with_overflow
1b1a35ee
XL
224 | sym::unchecked_div
225 | sym::unchecked_rem
226 | sym::unchecked_shl
227 | sym::unchecked_shr
228 | sym::unchecked_add
229 | sym::unchecked_sub
230 | sym::unchecked_mul
231 | sym::exact_div => {
232 let ty = arg_tys[0];
233 match int_type_width_signed(ty, bx.tcx()) {
234 Some((_width, signed)) => match name {
235 sym::add_with_overflow
236 | sym::sub_with_overflow
237 | sym::mul_with_overflow => {
238 let op = match name {
239 sym::add_with_overflow => OverflowOp::Add,
240 sym::sub_with_overflow => OverflowOp::Sub,
241 sym::mul_with_overflow => OverflowOp::Mul,
242 _ => bug!(),
243 };
244 let (val, overflow) =
245 bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
246 // Convert `i1` to a `bool`, and write it to the out parameter
247 let val = bx.from_immediate(val);
248 let overflow = bx.from_immediate(overflow);
249
250 let dest = result.project_field(bx, 0);
251 bx.store(val, dest.llval, dest.align);
252 let dest = result.project_field(bx, 1);
253 bx.store(overflow, dest.llval, dest.align);
254
255 return;
256 }
1b1a35ee
XL
257 sym::exact_div => {
258 if signed {
259 bx.exactsdiv(args[0].immediate(), args[1].immediate())
260 } else {
261 bx.exactudiv(args[0].immediate(), args[1].immediate())
262 }
263 }
264 sym::unchecked_div => {
265 if signed {
266 bx.sdiv(args[0].immediate(), args[1].immediate())
267 } else {
268 bx.udiv(args[0].immediate(), args[1].immediate())
269 }
270 }
271 sym::unchecked_rem => {
272 if signed {
273 bx.srem(args[0].immediate(), args[1].immediate())
274 } else {
275 bx.urem(args[0].immediate(), args[1].immediate())
276 }
277 }
278 sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
279 sym::unchecked_shr => {
280 if signed {
281 bx.ashr(args[0].immediate(), args[1].immediate())
282 } else {
283 bx.lshr(args[0].immediate(), args[1].immediate())
284 }
285 }
286 sym::unchecked_add => {
287 if signed {
288 bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
289 } else {
290 bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
291 }
292 }
293 sym::unchecked_sub => {
294 if signed {
295 bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
296 } else {
297 bx.unchecked_usub(args[0].immediate(), args[1].immediate())
298 }
299 }
300 sym::unchecked_mul => {
301 if signed {
302 bx.unchecked_smul(args[0].immediate(), args[1].immediate())
303 } else {
304 bx.unchecked_umul(args[0].immediate(), args[1].immediate())
305 }
306 }
307 _ => bug!(),
308 },
309 None => {
9c376795 310 bx.tcx().sess.emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
1b1a35ee
XL
311 return;
312 }
313 }
314 }
315 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
316 match float_type_width(arg_tys[0]) {
317 Some(_width) => match name {
318 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
319 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
320 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
321 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
322 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
323 _ => bug!(),
324 },
325 None => {
9c376795 326 bx.tcx().sess.emit_err(InvalidMonomorphization::BasicFloatType { span, name, ty: arg_tys[0] });
1b1a35ee
XL
327 return;
328 }
329 }
330 }
331
332 sym::float_to_int_unchecked => {
333 if float_type_width(arg_tys[0]).is_none() {
9c376795 334 bx.tcx().sess.emit_err(InvalidMonomorphization::FloatToIntUnchecked { span, ty: arg_tys[0] });
1b1a35ee
XL
335 return;
336 }
5e7ed085 337 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
9c376795 338 bx.tcx().sess.emit_err(InvalidMonomorphization::FloatToIntUnchecked { span, ty: ret_ty });
5e7ed085 339 return;
1b1a35ee
XL
340 };
341 if signed {
342 bx.fptosi(args[0].immediate(), llret_ty)
343 } else {
344 bx.fptoui(args[0].immediate(), llret_ty)
345 }
346 }
347
348 sym::discriminant_value => {
349 if ret_ty.is_integral() {
350 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
351 } else {
352 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
353 }
354 }
355
5099ac24
FG
356 sym::const_allocate => {
357 // returns a null pointer at runtime.
358 bx.const_null(bx.type_i8p())
359 }
360
361 sym::const_deallocate => {
362 // nop at runtime.
363 return;
364 }
365
1b1a35ee 366 // This requires that atomic intrinsics follow a specific naming pattern:
064997fb
FG
367 // "atomic_<operation>[_<ordering>]"
368 name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
1b1a35ee
XL
369 use crate::common::AtomicOrdering::*;
370 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
371
064997fb 372 let Some((instruction, ordering)) = atomic.split_once('_') else {
9c376795 373 bx.sess().emit_fatal(errors::MissingMemoryOrdering);
064997fb 374 };
1b1a35ee 375
064997fb
FG
376 let parse_ordering = |bx: &Bx, s| match s {
377 "unordered" => Unordered,
378 "relaxed" => Relaxed,
379 "acquire" => Acquire,
380 "release" => Release,
381 "acqrel" => AcquireRelease,
382 "seqcst" => SequentiallyConsistent,
9c376795 383 _ => bx.sess().emit_fatal(errors::UnknownAtomicOrdering),
1b1a35ee
XL
384 };
385
386 let invalid_monomorphization = |ty| {
9c376795 387 bx.tcx().sess.emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
1b1a35ee
XL
388 };
389
064997fb 390 match instruction {
1b1a35ee 391 "cxchg" | "cxchgweak" => {
064997fb 392 let Some((success, failure)) = ordering.split_once('_') else {
9c376795 393 bx.sess().emit_fatal(errors::AtomicCompareExchange);
064997fb 394 };
1b1a35ee 395 let ty = substs.type_at(0);
fc512014 396 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
064997fb 397 let weak = instruction == "cxchgweak";
fc512014
XL
398 let mut dst = args[0].immediate();
399 let mut cmp = args[1].immediate();
400 let mut src = args[2].immediate();
401 if ty.is_unsafe_ptr() {
402 // Some platforms do not support atomic operations on pointers,
403 // so we cast to integer first.
404 let ptr_llty = bx.type_ptr_to(bx.type_isize());
405 dst = bx.pointercast(dst, ptr_llty);
406 cmp = bx.ptrtoint(cmp, bx.type_isize());
407 src = bx.ptrtoint(src, bx.type_isize());
408 }
064997fb 409 let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
1b1a35ee
XL
410 let val = bx.extract_value(pair, 0);
411 let success = bx.extract_value(pair, 1);
412 let val = bx.from_immediate(val);
413 let success = bx.from_immediate(success);
414
415 let dest = result.project_field(bx, 0);
416 bx.store(val, dest.llval, dest.align);
417 let dest = result.project_field(bx, 1);
418 bx.store(success, dest.llval, dest.align);
419 return;
420 } else {
421 return invalid_monomorphization(ty);
422 }
423 }
424
425 "load" => {
426 let ty = substs.type_at(0);
fc512014
XL
427 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
428 let layout = bx.layout_of(ty);
429 let size = layout.size;
430 let mut source = args[0].immediate();
431 if ty.is_unsafe_ptr() {
432 // Some platforms do not support atomic operations on pointers,
433 // so we cast to integer first...
136023e0
XL
434 let llty = bx.type_isize();
435 let ptr_llty = bx.type_ptr_to(llty);
fc512014 436 source = bx.pointercast(source, ptr_llty);
064997fb 437 let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
fc512014
XL
438 // ... and then cast the result back to a pointer
439 bx.inttoptr(result, bx.backend_type(layout))
440 } else {
064997fb 441 bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
fc512014 442 }
1b1a35ee
XL
443 } else {
444 return invalid_monomorphization(ty);
445 }
446 }
447
448 "store" => {
449 let ty = substs.type_at(0);
fc512014 450 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
1b1a35ee 451 let size = bx.layout_of(ty).size;
fc512014
XL
452 let mut val = args[1].immediate();
453 let mut ptr = args[0].immediate();
454 if ty.is_unsafe_ptr() {
455 // Some platforms do not support atomic operations on pointers,
456 // so we cast to integer first.
457 let ptr_llty = bx.type_ptr_to(bx.type_isize());
458 ptr = bx.pointercast(ptr, ptr_llty);
459 val = bx.ptrtoint(val, bx.type_isize());
460 }
064997fb 461 bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
1b1a35ee
XL
462 return;
463 } else {
464 return invalid_monomorphization(ty);
465 }
466 }
467
468 "fence" => {
064997fb 469 bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
1b1a35ee
XL
470 return;
471 }
472
473 "singlethreadfence" => {
064997fb 474 bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
1b1a35ee
XL
475 return;
476 }
477
478 // These are all AtomicRMW ops
479 op => {
480 let atom_op = match op {
481 "xchg" => AtomicRmwBinOp::AtomicXchg,
482 "xadd" => AtomicRmwBinOp::AtomicAdd,
483 "xsub" => AtomicRmwBinOp::AtomicSub,
484 "and" => AtomicRmwBinOp::AtomicAnd,
485 "nand" => AtomicRmwBinOp::AtomicNand,
486 "or" => AtomicRmwBinOp::AtomicOr,
487 "xor" => AtomicRmwBinOp::AtomicXor,
488 "max" => AtomicRmwBinOp::AtomicMax,
489 "min" => AtomicRmwBinOp::AtomicMin,
490 "umax" => AtomicRmwBinOp::AtomicUMax,
491 "umin" => AtomicRmwBinOp::AtomicUMin,
9c376795 492 _ => bx.sess().emit_fatal(errors::UnknownAtomicOperation),
1b1a35ee
XL
493 };
494
495 let ty = substs.type_at(0);
064997fb 496 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
fc512014
XL
497 let mut ptr = args[0].immediate();
498 let mut val = args[1].immediate();
499 if ty.is_unsafe_ptr() {
500 // Some platforms do not support atomic operations on pointers,
501 // so we cast to integer first.
502 let ptr_llty = bx.type_ptr_to(bx.type_isize());
503 ptr = bx.pointercast(ptr, ptr_llty);
504 val = bx.ptrtoint(val, bx.type_isize());
505 }
064997fb 506 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
1b1a35ee
XL
507 } else {
508 return invalid_monomorphization(ty);
509 }
510 }
511 }
512 }
513
514 sym::nontemporal_store => {
515 let dst = args[0].deref(bx.cx());
516 args[1].val.nontemporal_store(bx, dst);
517 return;
518 }
519
f2b60f7d 520 sym::ptr_guaranteed_cmp => {
1b1a35ee
XL
521 let a = args[0].immediate();
522 let b = args[1].immediate();
f2b60f7d 523 bx.icmp(IntPredicate::IntEQ, a, b)
1b1a35ee
XL
524 }
525
04454e1e 526 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
1b1a35ee
XL
527 let ty = substs.type_at(0);
528 let pointee_size = bx.layout_of(ty).size;
529
1b1a35ee
XL
530 let a = args[0].immediate();
531 let b = args[1].immediate();
532 let a = bx.ptrtoint(a, bx.type_isize());
533 let b = bx.ptrtoint(b, bx.type_isize());
1b1a35ee 534 let pointee_size = bx.const_usize(pointee_size.bytes());
04454e1e
FG
535 if name == sym::ptr_offset_from {
536 // This is the same sequence that Clang emits for pointer subtraction.
537 // It can be neither `nsw` nor `nuw` because the input is treated as
538 // unsigned but then the output is treated as signed, so neither works.
539 let d = bx.sub(a, b);
540 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
541 bx.exactsdiv(d, pointee_size)
542 } else {
543 // The `_unsigned` version knows the relative ordering of the pointers,
544 // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
545 let d = bx.unchecked_usub(a, b);
546 bx.exactudiv(d, pointee_size)
547 }
1b1a35ee
XL
548 }
549
550 _ => {
551 // Need to use backend-specific things in the implementation.
552 bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
553 return;
554 }
555 };
556
557 if !fn_abi.ret.is_ignore() {
f2b60f7d
FG
558 if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
559 let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
1b1a35ee
XL
560 let ptr = bx.pointercast(result.llval, ptr_llty);
561 bx.store(llval, ptr, result.align);
562 } else {
563 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
564 .val
565 .store(bx, result);
566 }
567 }
568 }
569}
570
571// Returns the width of an int Ty, and if it's signed or not
572// Returns None if the type is not an integer
573// FIXME: there’s multiple of this functions, investigate using some of the already existing
574// stuffs.
575fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
576 match ty.kind() {
29967ef6
XL
577 ty::Int(t) => {
578 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
579 }
580 ty::Uint(t) => {
581 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
582 }
1b1a35ee
XL
583 _ => None,
584 }
585}
586
587// Returns the width of a float Ty
588// Returns None if the type is not a float
589fn float_type_width(ty: Ty<'_>) -> Option<u64> {
590 match ty.kind() {
591 ty::Float(t) => Some(t.bit_width()),
592 _ => None,
593 }
594}