]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
New upstream version 1.78.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / intrinsic.rs
CommitLineData
1b1a35ee
XL
1use super::operand::{OperandRef, OperandValue};
2use super::place::PlaceRef;
3use super::FunctionCx;
9c376795
FG
4use crate::common::IntPredicate;
5use crate::errors;
6use crate::errors::InvalidMonomorphization;
064997fb 7use crate::meth;
4b012472 8use crate::size_of_val;
1b1a35ee
XL
9use crate::traits::*;
10use crate::MemFlags;
11
12use rustc_middle::ty::{self, Ty, TyCtxt};
13use rustc_span::{sym, Span};
064997fb
FG
14use rustc_target::abi::{
15 call::{FnAbi, PassMode},
16 WrappingRange,
17};
1b1a35ee
XL
18
19fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
20 bx: &mut Bx,
21 allow_overlap: bool,
22 volatile: bool,
23 ty: Ty<'tcx>,
24 dst: Bx::Value,
25 src: Bx::Value,
26 count: Bx::Value,
27) {
28 let layout = bx.layout_of(ty);
29 let size = layout.size;
30 let align = layout.align.abi;
31 let size = bx.mul(bx.const_usize(size.bytes()), count);
32 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
33 if allow_overlap {
34 bx.memmove(dst, align, src, align, size, flags);
35 } else {
36 bx.memcpy(dst, align, src, align, size, flags);
37 }
38}
39
40fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
41 bx: &mut Bx,
42 volatile: bool,
43 ty: Ty<'tcx>,
44 dst: Bx::Value,
45 val: Bx::Value,
46 count: Bx::Value,
47) {
48 let layout = bx.layout_of(ty);
49 let size = layout.size;
50 let align = layout.align.abi;
51 let size = bx.mul(bx.const_usize(size.bytes()), count);
52 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
53 bx.memset(dst, val, size, align, flags);
54}
55
56impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
c620b35d 57 /// In the `Err` case, returns the instance that should be called instead.
1b1a35ee
XL
58 pub fn codegen_intrinsic_call(
59 bx: &mut Bx,
60 instance: ty::Instance<'tcx>,
61 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
62 args: &[OperandRef<'tcx, Bx::Value>],
63 llresult: Bx::Value,
64 span: Span,
c620b35d 65 ) -> Result<(), ty::Instance<'tcx>> {
1b1a35ee
XL
66 let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
67
add651ee 68 let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
5e7ed085 69 bug!("expected fn item type, found {}", callee_ty);
1b1a35ee
XL
70 };
71
72 let sig = callee_ty.fn_sig(bx.tcx());
fc512014 73 let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
1b1a35ee
XL
74 let arg_tys = sig.inputs();
75 let ret_ty = sig.output();
76 let name = bx.tcx().item_name(def_id);
a2a8927a 77 let name_str = name.as_str();
1b1a35ee
XL
78
79 let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
80 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
81
82 let llval = match name {
1b1a35ee
XL
83 sym::abort => {
84 bx.abort();
c620b35d 85 return Ok(());
1b1a35ee
XL
86 }
87
1b1a35ee
XL
88 sym::va_start => bx.va_start(args[0].immediate()),
89 sym::va_end => bx.va_end(args[0].immediate()),
90 sym::size_of_val => {
add651ee 91 let tp_ty = fn_args.type_at(0);
4b012472
FG
92 let meta =
93 if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None };
94 let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
95 llsize
1b1a35ee
XL
96 }
97 sym::min_align_of_val => {
add651ee 98 let tp_ty = fn_args.type_at(0);
4b012472
FG
99 let meta =
100 if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None };
101 let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta);
102 llalign
1b1a35ee 103 }
064997fb
FG
104 sym::vtable_size | sym::vtable_align => {
105 let vtable = args[0].immediate();
106 let idx = match name {
107 sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
108 sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
109 _ => bug!(),
110 };
111 let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
9c376795
FG
112 match name {
113 // Size is always <= isize::MAX.
114 sym::vtable_size => {
115 let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128;
116 bx.range_metadata(value, WrappingRange { start: 0, end: size_bound });
ed00b5ec 117 }
064997fb 118 // Alignment is always nonzero.
ed00b5ec
FG
119 sym::vtable_align => {
120 bx.range_metadata(value, WrappingRange { start: 1, end: !0 })
121 }
9c376795
FG
122 _ => {}
123 }
064997fb
FG
124 value
125 }
fc512014 126 sym::pref_align_of
1b1a35ee
XL
127 | sym::needs_drop
128 | sym::type_id
129 | sym::type_name
130 | sym::variant_count => {
131 let value = bx
132 .tcx()
133 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
134 .unwrap();
135 OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
136 }
1b1a35ee 137 sym::arith_offset => {
add651ee 138 let ty = fn_args.type_at(0);
94222f64 139 let layout = bx.layout_of(ty);
1b1a35ee
XL
140 let ptr = args[0].immediate();
141 let offset = args[1].immediate();
94222f64 142 bx.gep(bx.backend_type(layout), ptr, &[offset])
1b1a35ee 143 }
1b1a35ee
XL
144 sym::copy => {
145 copy_intrinsic(
146 bx,
147 true,
148 false,
add651ee 149 fn_args.type_at(0),
1b1a35ee
XL
150 args[1].immediate(),
151 args[0].immediate(),
152 args[2].immediate(),
153 );
c620b35d 154 return Ok(());
1b1a35ee
XL
155 }
156 sym::write_bytes => {
157 memset_intrinsic(
158 bx,
159 false,
add651ee 160 fn_args.type_at(0),
1b1a35ee
XL
161 args[0].immediate(),
162 args[1].immediate(),
163 args[2].immediate(),
164 );
c620b35d 165 return Ok(());
1b1a35ee
XL
166 }
167
168 sym::volatile_copy_nonoverlapping_memory => {
169 copy_intrinsic(
170 bx,
171 false,
172 true,
add651ee 173 fn_args.type_at(0),
1b1a35ee
XL
174 args[0].immediate(),
175 args[1].immediate(),
176 args[2].immediate(),
177 );
c620b35d 178 return Ok(());
1b1a35ee
XL
179 }
180 sym::volatile_copy_memory => {
181 copy_intrinsic(
182 bx,
183 true,
184 true,
add651ee 185 fn_args.type_at(0),
1b1a35ee
XL
186 args[0].immediate(),
187 args[1].immediate(),
188 args[2].immediate(),
189 );
c620b35d 190 return Ok(());
1b1a35ee
XL
191 }
192 sym::volatile_set_memory => {
193 memset_intrinsic(
194 bx,
195 true,
add651ee 196 fn_args.type_at(0),
1b1a35ee
XL
197 args[0].immediate(),
198 args[1].immediate(),
199 args[2].immediate(),
200 );
c620b35d 201 return Ok(());
1b1a35ee
XL
202 }
203 sym::volatile_store => {
204 let dst = args[0].deref(bx.cx());
205 args[1].val.volatile_store(bx, dst);
c620b35d 206 return Ok(());
1b1a35ee
XL
207 }
208 sym::unaligned_volatile_store => {
209 let dst = args[0].deref(bx.cx());
210 args[1].val.unaligned_volatile_store(bx, dst);
c620b35d 211 return Ok(());
1b1a35ee 212 }
fe692bf9 213 sym::exact_div => {
1b1a35ee
XL
214 let ty = arg_tys[0];
215 match int_type_width_signed(ty, bx.tcx()) {
fe692bf9
FG
216 Some((_width, signed)) => {
217 if signed {
218 bx.exactsdiv(args[0].immediate(), args[1].immediate())
219 } else {
220 bx.exactudiv(args[0].immediate(), args[1].immediate())
1b1a35ee 221 }
ed00b5ec 222 }
1b1a35ee 223 None => {
c0240ec0 224 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
ed00b5ec
FG
225 span,
226 name,
227 ty,
228 });
c620b35d 229 return Ok(());
1b1a35ee
XL
230 }
231 }
232 }
233 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
234 match float_type_width(arg_tys[0]) {
235 Some(_width) => match name {
236 sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
237 sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
238 sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
239 sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
240 sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
241 _ => bug!(),
242 },
243 None => {
c0240ec0 244 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
ed00b5ec
FG
245 span,
246 name,
247 ty: arg_tys[0],
248 });
c620b35d 249 return Ok(());
1b1a35ee
XL
250 }
251 }
252 }
c620b35d
FG
253 sym::fadd_algebraic
254 | sym::fsub_algebraic
255 | sym::fmul_algebraic
256 | sym::fdiv_algebraic
257 | sym::frem_algebraic => match float_type_width(arg_tys[0]) {
258 Some(_width) => match name {
259 sym::fadd_algebraic => {
260 bx.fadd_algebraic(args[0].immediate(), args[1].immediate())
261 }
262 sym::fsub_algebraic => {
263 bx.fsub_algebraic(args[0].immediate(), args[1].immediate())
264 }
265 sym::fmul_algebraic => {
266 bx.fmul_algebraic(args[0].immediate(), args[1].immediate())
267 }
268 sym::fdiv_algebraic => {
269 bx.fdiv_algebraic(args[0].immediate(), args[1].immediate())
270 }
271 sym::frem_algebraic => {
272 bx.frem_algebraic(args[0].immediate(), args[1].immediate())
273 }
274 _ => bug!(),
275 },
276 None => {
277 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType {
278 span,
279 name,
280 ty: arg_tys[0],
281 });
282 return Ok(());
283 }
284 },
1b1a35ee
XL
285
286 sym::float_to_int_unchecked => {
287 if float_type_width(arg_tys[0]).is_none() {
c0240ec0 288 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
ed00b5ec
FG
289 span,
290 ty: arg_tys[0],
291 });
c620b35d 292 return Ok(());
1b1a35ee 293 }
5e7ed085 294 let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
c0240ec0 295 bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
ed00b5ec
FG
296 span,
297 ty: ret_ty,
298 });
c620b35d 299 return Ok(());
1b1a35ee
XL
300 };
301 if signed {
302 bx.fptosi(args[0].immediate(), llret_ty)
303 } else {
304 bx.fptoui(args[0].immediate(), llret_ty)
305 }
306 }
307
308 sym::discriminant_value => {
309 if ret_ty.is_integral() {
310 args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
311 } else {
312 span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
313 }
314 }
315
316 // This requires that atomic intrinsics follow a specific naming pattern:
064997fb
FG
317 // "atomic_<operation>[_<ordering>]"
318 name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
1b1a35ee
XL
319 use crate::common::AtomicOrdering::*;
320 use crate::common::{AtomicRmwBinOp, SynchronizationScope};
321
064997fb 322 let Some((instruction, ordering)) = atomic.split_once('_') else {
c0240ec0 323 bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
064997fb 324 };
1b1a35ee 325
064997fb
FG
326 let parse_ordering = |bx: &Bx, s| match s {
327 "unordered" => Unordered,
328 "relaxed" => Relaxed,
329 "acquire" => Acquire,
330 "release" => Release,
331 "acqrel" => AcquireRelease,
332 "seqcst" => SequentiallyConsistent,
c0240ec0 333 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
1b1a35ee
XL
334 };
335
336 let invalid_monomorphization = |ty| {
c0240ec0 337 bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
ed00b5ec
FG
338 span,
339 name,
340 ty,
341 });
1b1a35ee
XL
342 };
343
064997fb 344 match instruction {
1b1a35ee 345 "cxchg" | "cxchgweak" => {
064997fb 346 let Some((success, failure)) = ordering.split_once('_') else {
c0240ec0 347 bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange);
064997fb 348 };
add651ee 349 let ty = fn_args.type_at(0);
fc512014 350 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
064997fb 351 let weak = instruction == "cxchgweak";
add651ee 352 let dst = args[0].immediate();
c620b35d
FG
353 let cmp = args[1].immediate();
354 let src = args[2].immediate();
c0240ec0 355 let (val, success) = bx.atomic_cmpxchg(
ed00b5ec
FG
356 dst,
357 cmp,
358 src,
359 parse_ordering(bx, success),
360 parse_ordering(bx, failure),
361 weak,
362 );
1b1a35ee
XL
363 let val = bx.from_immediate(val);
364 let success = bx.from_immediate(success);
365
366 let dest = result.project_field(bx, 0);
367 bx.store(val, dest.llval, dest.align);
368 let dest = result.project_field(bx, 1);
369 bx.store(success, dest.llval, dest.align);
1b1a35ee 370 } else {
c620b35d 371 invalid_monomorphization(ty);
1b1a35ee 372 }
c620b35d 373 return Ok(());
1b1a35ee
XL
374 }
375
376 "load" => {
add651ee 377 let ty = fn_args.type_at(0);
fc512014
XL
378 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
379 let layout = bx.layout_of(ty);
380 let size = layout.size;
add651ee 381 let source = args[0].immediate();
c620b35d
FG
382 bx.atomic_load(
383 bx.backend_type(layout),
384 source,
385 parse_ordering(bx, ordering),
386 size,
387 )
1b1a35ee 388 } else {
c620b35d
FG
389 invalid_monomorphization(ty);
390 return Ok(());
1b1a35ee
XL
391 }
392 }
393
394 "store" => {
add651ee 395 let ty = fn_args.type_at(0);
fc512014 396 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
1b1a35ee 397 let size = bx.layout_of(ty).size;
c620b35d 398 let val = args[1].immediate();
add651ee 399 let ptr = args[0].immediate();
064997fb 400 bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
1b1a35ee 401 } else {
c620b35d 402 invalid_monomorphization(ty);
1b1a35ee 403 }
c620b35d 404 return Ok(());
1b1a35ee
XL
405 }
406
407 "fence" => {
ed00b5ec
FG
408 bx.atomic_fence(
409 parse_ordering(bx, ordering),
410 SynchronizationScope::CrossThread,
411 );
c620b35d 412 return Ok(());
1b1a35ee
XL
413 }
414
415 "singlethreadfence" => {
ed00b5ec
FG
416 bx.atomic_fence(
417 parse_ordering(bx, ordering),
418 SynchronizationScope::SingleThread,
419 );
c620b35d 420 return Ok(());
1b1a35ee
XL
421 }
422
423 // These are all AtomicRMW ops
424 op => {
425 let atom_op = match op {
426 "xchg" => AtomicRmwBinOp::AtomicXchg,
427 "xadd" => AtomicRmwBinOp::AtomicAdd,
428 "xsub" => AtomicRmwBinOp::AtomicSub,
429 "and" => AtomicRmwBinOp::AtomicAnd,
430 "nand" => AtomicRmwBinOp::AtomicNand,
431 "or" => AtomicRmwBinOp::AtomicOr,
432 "xor" => AtomicRmwBinOp::AtomicXor,
433 "max" => AtomicRmwBinOp::AtomicMax,
434 "min" => AtomicRmwBinOp::AtomicMin,
435 "umax" => AtomicRmwBinOp::AtomicUMax,
436 "umin" => AtomicRmwBinOp::AtomicUMin,
c0240ec0 437 _ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOperation),
1b1a35ee
XL
438 };
439
add651ee 440 let ty = fn_args.type_at(0);
064997fb 441 if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
add651ee 442 let ptr = args[0].immediate();
c620b35d 443 let val = args[1].immediate();
064997fb 444 bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
1b1a35ee 445 } else {
c620b35d
FG
446 invalid_monomorphization(ty);
447 return Ok(());
1b1a35ee
XL
448 }
449 }
450 }
451 }
452
453 sym::nontemporal_store => {
454 let dst = args[0].deref(bx.cx());
455 args[1].val.nontemporal_store(bx, dst);
c620b35d 456 return Ok(());
1b1a35ee
XL
457 }
458
f2b60f7d 459 sym::ptr_guaranteed_cmp => {
1b1a35ee
XL
460 let a = args[0].immediate();
461 let b = args[1].immediate();
f2b60f7d 462 bx.icmp(IntPredicate::IntEQ, a, b)
1b1a35ee
XL
463 }
464
04454e1e 465 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
add651ee 466 let ty = fn_args.type_at(0);
1b1a35ee
XL
467 let pointee_size = bx.layout_of(ty).size;
468
1b1a35ee
XL
469 let a = args[0].immediate();
470 let b = args[1].immediate();
471 let a = bx.ptrtoint(a, bx.type_isize());
472 let b = bx.ptrtoint(b, bx.type_isize());
1b1a35ee 473 let pointee_size = bx.const_usize(pointee_size.bytes());
04454e1e
FG
474 if name == sym::ptr_offset_from {
475 // This is the same sequence that Clang emits for pointer subtraction.
476 // It can be neither `nsw` nor `nuw` because the input is treated as
477 // unsigned but then the output is treated as signed, so neither works.
478 let d = bx.sub(a, b);
479 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
480 bx.exactsdiv(d, pointee_size)
481 } else {
482 // The `_unsigned` version knows the relative ordering of the pointers,
483 // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
484 let d = bx.unchecked_usub(a, b);
485 bx.exactudiv(d, pointee_size)
486 }
1b1a35ee
XL
487 }
488
489 _ => {
490 // Need to use backend-specific things in the implementation.
c620b35d 491 return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
1b1a35ee
XL
492 }
493 };
494
495 if !fn_abi.ret.is_ignore() {
781aab86 496 if let PassMode::Cast { .. } = &fn_abi.ret.mode {
add651ee 497 bx.store(llval, result.llval, result.align);
1b1a35ee
XL
498 } else {
499 OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
500 .val
501 .store(bx, result);
502 }
503 }
c620b35d 504 Ok(())
1b1a35ee
XL
505 }
506}
507
508// Returns the width of an int Ty, and if it's signed or not
509// Returns None if the type is not an integer
510// FIXME: there’s multiple of this functions, investigate using some of the already existing
511// stuffs.
512fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
513 match ty.kind() {
29967ef6
XL
514 ty::Int(t) => {
515 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
516 }
517 ty::Uint(t) => {
518 Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
519 }
1b1a35ee
XL
520 _ => None,
521 }
522}
523
524// Returns the width of a float Ty
525// Returns None if the type is not a float
526fn float_type_width(ty: Ty<'_>) -> Option<u64> {
527 match ty.kind() {
528 ty::Float(t) => Some(t.bit_width()),
529 _ => None,
530 }
531}