]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
CommitLineData
29967ef6
XL
1//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2//! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
064997fb
FG
4macro_rules! intrinsic_args {
5 ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6 #[allow(unused_parens)]
7 let ($($arg),*) = if let [$($arg),*] = $args {
8 ($(codegen_operand($fx, $arg)),*)
9 } else {
10 $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11 };
29967ef6
XL
12 }
13}
14
5e7ed085
FG
15mod cpuid;
16mod llvm;
9c376795
FG
17mod llvm_aarch64;
18mod llvm_x86;
5e7ed085 19mod simd;
29967ef6 20
5e7ed085
FG
21pub(crate) use cpuid::codegen_cpuid_call;
22pub(crate) use llvm::codegen_llvm_intrinsic_call;
23
24use rustc_middle::ty::print::with_no_trimmed_paths;
25use rustc_middle::ty::subst::SubstsRef;
26use rustc_span::symbol::{kw, sym, Symbol};
27
28use crate::prelude::*;
29use cranelift_codegen::ir::AtomicRmwOp;
30
064997fb
FG
31fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
32 bug!("wrong number of args for intrinsic {}", intrinsic);
33}
34
5e7ed085
FG
35fn report_atomic_type_validation_error<'tcx>(
36 fx: &mut FunctionCx<'_, '_, 'tcx>,
37 intrinsic: Symbol,
38 span: Span,
39 ty: Ty<'tcx>,
40) {
41 fx.tcx.sess.span_err(
42 span,
43 &format!(
44 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
45 intrinsic, ty
46 ),
47 );
48 // Prevent verifier error
f2b60f7d 49 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
29967ef6
XL
50}
51
29967ef6 52pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
c295e0f8
XL
53 let (element, count) = match layout.abi {
54 Abi::Vector { element, count } => (element, count),
29967ef6
XL
55 _ => unreachable!(),
56 };
57
f2b60f7d 58 match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
29967ef6
XL
59 // Cranelift currently only implements icmp for 128bit vectors.
60 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
61 _ => None,
62 }
63}
64
6a06907d
XL
65fn simd_for_each_lane<'tcx>(
66 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
67 val: CValue<'tcx>,
68 ret: CPlace<'tcx>,
5e7ed085 69 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
29967ef6
XL
70) {
71 let layout = val.layout();
72
5869c6ff
XL
73 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
74 let lane_layout = fx.layout_of(lane_ty);
75 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
76 let ret_lane_layout = fx.layout_of(ret_lane_ty);
29967ef6
XL
77 assert_eq!(lane_count, ret_lane_count);
78
79 for lane_idx in 0..lane_count {
94222f64 80 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
29967ef6 81
5e7ed085
FG
82 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
83 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
29967ef6 84
94222f64 85 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
29967ef6
XL
86 }
87}
88
2b03887a
FG
89fn simd_pair_for_each_lane_typed<'tcx>(
90 fx: &mut FunctionCx<'_, '_, 'tcx>,
91 x: CValue<'tcx>,
92 y: CValue<'tcx>,
93 ret: CPlace<'tcx>,
94 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
95) {
96 assert_eq!(x.layout(), y.layout());
97 let layout = x.layout();
98
99 let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
100 let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
101 assert_eq!(lane_count, ret_lane_count);
102
103 for lane_idx in 0..lane_count {
104 let x_lane = x.value_lane(fx, lane_idx);
105 let y_lane = y.value_lane(fx, lane_idx);
106
107 let res_lane = f(fx, x_lane, y_lane);
108
109 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
110 }
111}
112
6a06907d
XL
113fn simd_pair_for_each_lane<'tcx>(
114 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
115 x: CValue<'tcx>,
116 y: CValue<'tcx>,
117 ret: CPlace<'tcx>,
5e7ed085 118 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
29967ef6
XL
119) {
120 assert_eq!(x.layout(), y.layout());
121 let layout = x.layout();
122
5869c6ff
XL
123 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
124 let lane_layout = fx.layout_of(lane_ty);
125 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
126 let ret_lane_layout = fx.layout_of(ret_lane_ty);
29967ef6
XL
127 assert_eq!(lane_count, ret_lane_count);
128
94222f64
XL
129 for lane_idx in 0..lane_count {
130 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
131 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
29967ef6 132
5e7ed085
FG
133 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
134 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
29967ef6 135
94222f64 136 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
29967ef6
XL
137 }
138}
139
6a06907d
XL
140fn simd_reduce<'tcx>(
141 fx: &mut FunctionCx<'_, '_, 'tcx>,
fc512014 142 val: CValue<'tcx>,
94222f64 143 acc: Option<Value>,
fc512014 144 ret: CPlace<'tcx>,
5e7ed085 145 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
fc512014 146) {
5869c6ff
XL
147 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
148 let lane_layout = fx.layout_of(lane_ty);
fc512014
XL
149 assert_eq!(lane_layout, ret.layout());
150
94222f64
XL
151 let (mut res_val, start_lane) =
152 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
153 for lane_idx in start_lane..lane_count {
154 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
5e7ed085 155 res_val = f(fx, lane_layout.ty, res_val, lane);
fc512014
XL
156 }
157 let res = CValue::by_val(res_val, lane_layout);
158 ret.write_cvalue(fx, res);
159}
160
94222f64 161// FIXME move all uses to `simd_reduce`
6a06907d
XL
162fn simd_reduce_bool<'tcx>(
163 fx: &mut FunctionCx<'_, '_, 'tcx>,
fc512014
XL
164 val: CValue<'tcx>,
165 ret: CPlace<'tcx>,
5e7ed085 166 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
fc512014 167) {
5869c6ff 168 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
fc512014
XL
169 assert!(ret.layout().ty.is_bool());
170
94222f64 171 let res_val = val.value_lane(fx, 0).load_scalar(fx);
fc512014
XL
172 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
173 for lane_idx in 1..lane_count {
94222f64 174 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
fc512014
XL
175 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
176 res_val = f(fx, res_val, lane);
177 }
94222f64
XL
178 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
179 fx.bcx.ins().ireduce(types::I8, res_val)
180 } else {
181 res_val
182 };
fc512014
XL
183 let res = CValue::by_val(res_val, ret.layout());
184 ret.write_cvalue(fx, res);
185}
186
29967ef6 187fn bool_to_zero_or_max_uint<'tcx>(
6a06907d 188 fx: &mut FunctionCx<'_, '_, 'tcx>,
5e7ed085 189 ty: Ty<'tcx>,
29967ef6 190 val: Value,
5e7ed085
FG
191) -> Value {
192 let ty = fx.clif_type(ty).unwrap();
29967ef6
XL
193
194 let int_ty = match ty {
195 types::F32 => types::I32,
196 types::F64 => types::I64,
197 ty => ty,
198 };
199
9c376795 200 let mut res = fx.bcx.ins().bmask(int_ty, val);
29967ef6
XL
201
202 if ty.is_float() {
203 res = fx.bcx.ins().bitcast(ty, res);
204 }
205
5e7ed085 206 res
29967ef6
XL
207}
208
209pub(crate) fn codegen_intrinsic_call<'tcx>(
6a06907d 210 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
211 instance: Instance<'tcx>,
212 args: &[mir::Operand<'tcx>],
923072b8
FG
213 destination: CPlace<'tcx>,
214 target: Option<BasicBlock>,
215 source_info: mir::SourceInfo,
29967ef6 216) {
c295e0f8 217 let intrinsic = fx.tcx.item_name(instance.def_id());
29967ef6
XL
218 let substs = instance.substs;
219
923072b8
FG
220 let target = if let Some(target) = target {
221 target
222 } else {
223 // Insert non returning intrinsics here
224 match intrinsic {
225 sym::abort => {
226 fx.bcx.ins().trap(TrapCode::User(0));
29967ef6 227 }
923072b8
FG
228 sym::transmute => {
229 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
230 }
f2b60f7d 231 _ => unimplemented!("unsupported intrinsic {}", intrinsic),
29967ef6 232 }
923072b8 233 return;
29967ef6
XL
234 };
235
17df50a5 236 if intrinsic.as_str().starts_with("simd_") {
923072b8
FG
237 self::simd::codegen_simd_intrinsic_call(
238 fx,
239 intrinsic,
240 substs,
241 args,
242 destination,
243 source_info.span,
244 );
245 let ret_block = fx.get_block(target);
29967ef6 246 fx.bcx.ins().jump(ret_block, &[]);
923072b8
FG
247 } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
248 let ret_block = fx.get_block(target);
5e7ed085
FG
249 fx.bcx.ins().jump(ret_block, &[]);
250 } else {
251 codegen_regular_intrinsic_call(
252 fx,
253 instance,
254 intrinsic,
255 substs,
256 args,
5e7ed085 257 destination,
923072b8
FG
258 Some(target),
259 source_info,
5e7ed085 260 );
29967ef6 261 }
5e7ed085 262}
29967ef6 263
5e7ed085
FG
264fn codegen_float_intrinsic_call<'tcx>(
265 fx: &mut FunctionCx<'_, '_, 'tcx>,
266 intrinsic: Symbol,
267 args: &[mir::Operand<'tcx>],
268 ret: CPlace<'tcx>,
269) -> bool {
270 let (name, arg_count, ty) = match intrinsic {
271 sym::expf32 => ("expf", 1, fx.tcx.types.f32),
272 sym::expf64 => ("exp", 1, fx.tcx.types.f64),
273 sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
274 sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
275 sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
276 sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
277 sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
278 sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
279 sym::powf32 => ("powf", 2, fx.tcx.types.f32),
280 sym::powf64 => ("pow", 2, fx.tcx.types.f64),
281 sym::logf32 => ("logf", 1, fx.tcx.types.f32),
282 sym::logf64 => ("log", 1, fx.tcx.types.f64),
283 sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
284 sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
285 sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
286 sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
287 sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
288 sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
289 sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
290 sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
291 sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
292 sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
293 sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
294 sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
295 sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
296 sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
297 sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
298 sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
299 sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
300 sym::roundf64 => ("round", 1, fx.tcx.types.f64),
301 sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
302 sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
303 sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
304 sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
305 _ => return false,
306 };
29967ef6 307
5e7ed085
FG
308 if args.len() != arg_count {
309 bug!("wrong number of args for intrinsic {:?}", intrinsic);
29967ef6
XL
310 }
311
5e7ed085
FG
312 let (a, b, c);
313 let args = match args {
314 [x] => {
315 a = [codegen_operand(fx, x)];
316 &a as &[_]
317 }
318 [x, y] => {
319 b = [codegen_operand(fx, x), codegen_operand(fx, y)];
320 &b
321 }
322 [x, y, z] => {
323 c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
324 &c
325 }
326 _ => unreachable!(),
327 };
328
f2b60f7d
FG
329 let layout = fx.layout_of(ty);
330 let res = match intrinsic {
331 sym::fmaf32 | sym::fmaf64 => {
332 let a = args[0].load_scalar(fx);
333 let b = args[1].load_scalar(fx);
334 let c = args[2].load_scalar(fx);
335 CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
336 }
337 sym::copysignf32 | sym::copysignf64 => {
338 let a = args[0].load_scalar(fx);
339 let b = args[1].load_scalar(fx);
340 CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
341 }
342 sym::fabsf32
343 | sym::fabsf64
344 | sym::floorf32
345 | sym::floorf64
346 | sym::ceilf32
347 | sym::ceilf64
348 | sym::truncf32
349 | sym::truncf64 => {
350 let a = args[0].load_scalar(fx);
351
352 let val = match intrinsic {
353 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
354 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
355 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
356 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
357 _ => unreachable!(),
358 };
359
360 CValue::by_val(val, layout)
361 }
362 // These intrinsics aren't supported natively by Cranelift.
363 // Lower them to a libcall.
364 _ => fx.easy_call(name, &args, ty),
365 };
366
5e7ed085
FG
367 ret.write_cvalue(fx, res);
368
369 true
370}
371
372fn codegen_regular_intrinsic_call<'tcx>(
373 fx: &mut FunctionCx<'_, '_, 'tcx>,
374 instance: Instance<'tcx>,
375 intrinsic: Symbol,
376 substs: SubstsRef<'tcx>,
377 args: &[mir::Operand<'tcx>],
378 ret: CPlace<'tcx>,
923072b8
FG
379 destination: Option<BasicBlock>,
380 source_info: mir::SourceInfo,
5e7ed085
FG
381) {
382 let usize_layout = fx.layout_of(fx.tcx.types.usize);
383
064997fb 384 match intrinsic {
064997fb
FG
385 sym::likely | sym::unlikely => {
386 intrinsic_args!(fx, args => (a); intrinsic);
29967ef6 387
29967ef6 388 ret.write_cvalue(fx, a);
064997fb
FG
389 }
390 sym::breakpoint => {
391 intrinsic_args!(fx, args => (); intrinsic);
392
29967ef6 393 fx.bcx.ins().debugtrap();
064997fb
FG
394 }
395 sym::copy | sym::copy_nonoverlapping => {
396 intrinsic_args!(fx, args => (src, dst, count); intrinsic);
397 let src = src.load_scalar(fx);
398 let dst = dst.load_scalar(fx);
399 let count = count.load_scalar(fx);
400
5e7ed085 401 let elem_ty = substs.type_at(0);
29967ef6 402 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
29967ef6 403 assert_eq!(args.len(), 3);
064997fb
FG
404 let byte_amount =
405 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
29967ef6 406
17df50a5 407 if intrinsic == sym::copy_nonoverlapping {
29967ef6 408 // FIXME emit_small_memcpy
a2a8927a 409 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
29967ef6
XL
410 } else {
411 // FIXME emit_small_memmove
a2a8927a 412 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
29967ef6 413 }
064997fb
FG
414 }
415 sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
416 // NOTE: the volatile variants have src and dst swapped
417 intrinsic_args!(fx, args => (dst, src, count); intrinsic);
418 let dst = dst.load_scalar(fx);
419 let src = src.load_scalar(fx);
420 let count = count.load_scalar(fx);
421
5e7ed085 422 let elem_ty = substs.type_at(0);
29967ef6 423 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
29967ef6 424 assert_eq!(args.len(), 3);
064997fb
FG
425 let byte_amount =
426 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
29967ef6
XL
427
428 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
17df50a5 429 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
29967ef6 430 // FIXME emit_small_memcpy
a2a8927a 431 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
29967ef6
XL
432 } else {
433 // FIXME emit_small_memmove
a2a8927a 434 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
29967ef6 435 }
064997fb
FG
436 }
437 sym::size_of_val => {
438 intrinsic_args!(fx, args => (ptr); intrinsic);
439
5e7ed085 440 let layout = fx.layout_of(substs.type_at(0));
064997fb
FG
441 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
442 // branch
443 let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
29967ef6
XL
444 let (_ptr, info) = ptr.load_scalar_pair(fx);
445 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
446 size
447 } else {
064997fb 448 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
29967ef6
XL
449 };
450 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
064997fb
FG
451 }
452 sym::min_align_of_val => {
453 intrinsic_args!(fx, args => (ptr); intrinsic);
454
5e7ed085 455 let layout = fx.layout_of(substs.type_at(0));
064997fb
FG
456 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
457 // branch
458 let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
29967ef6
XL
459 let (_ptr, info) = ptr.load_scalar_pair(fx);
460 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
461 align
462 } else {
064997fb 463 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
29967ef6
XL
464 };
465 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
064997fb
FG
466 }
467
468 sym::vtable_size => {
469 intrinsic_args!(fx, args => (vtable); intrinsic);
470 let vtable = vtable.load_scalar(fx);
471
472 let size = crate::vtable::size_of_obj(fx, vtable);
473 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
474 }
475
476 sym::vtable_align => {
477 intrinsic_args!(fx, args => (vtable); intrinsic);
478 let vtable = vtable.load_scalar(fx);
479
480 let align = crate::vtable::min_align_of_obj(fx, vtable);
481 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
482 }
483
484 sym::unchecked_add
485 | sym::unchecked_sub
486 | sym::unchecked_mul
487 | sym::unchecked_div
488 | sym::exact_div
489 | sym::unchecked_rem
490 | sym::unchecked_shl
491 | sym::unchecked_shr => {
492 intrinsic_args!(fx, args => (x, y); intrinsic);
29967ef6 493
29967ef6
XL
494 // FIXME trap on overflow
495 let bin_op = match intrinsic {
17df50a5
XL
496 sym::unchecked_add => BinOp::Add,
497 sym::unchecked_sub => BinOp::Sub,
5e7ed085 498 sym::unchecked_mul => BinOp::Mul,
17df50a5
XL
499 sym::unchecked_div | sym::exact_div => BinOp::Div,
500 sym::unchecked_rem => BinOp::Rem,
501 sym::unchecked_shl => BinOp::Shl,
502 sym::unchecked_shr => BinOp::Shr,
503 _ => unreachable!(),
29967ef6
XL
504 };
505 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
506 ret.write_cvalue(fx, res);
064997fb
FG
507 }
508 sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
509 intrinsic_args!(fx, args => (x, y); intrinsic);
510
29967ef6
XL
511 assert_eq!(x.layout().ty, y.layout().ty);
512 let bin_op = match intrinsic {
17df50a5
XL
513 sym::add_with_overflow => BinOp::Add,
514 sym::sub_with_overflow => BinOp::Sub,
515 sym::mul_with_overflow => BinOp::Mul,
516 _ => unreachable!(),
29967ef6
XL
517 };
518
064997fb 519 let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
29967ef6 520 ret.write_cvalue(fx, res);
064997fb
FG
521 }
522 sym::saturating_add | sym::saturating_sub => {
523 intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
524
29967ef6
XL
525 assert_eq!(lhs.layout().ty, rhs.layout().ty);
526 let bin_op = match intrinsic {
17df50a5
XL
527 sym::saturating_add => BinOp::Add,
528 sym::saturating_sub => BinOp::Sub,
529 _ => unreachable!(),
29967ef6
XL
530 };
531
2b03887a 532 let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
29967ef6 533 ret.write_cvalue(fx, res);
064997fb
FG
534 }
535 sym::rotate_left => {
536 intrinsic_args!(fx, args => (x, y); intrinsic);
537 let y = y.load_scalar(fx);
538
5e7ed085
FG
539 let layout = x.layout();
540 let x = x.load_scalar(fx);
29967ef6
XL
541 let res = fx.bcx.ins().rotl(x, y);
542 ret.write_cvalue(fx, CValue::by_val(res, layout));
064997fb
FG
543 }
544 sym::rotate_right => {
545 intrinsic_args!(fx, args => (x, y); intrinsic);
546 let y = y.load_scalar(fx);
547
5e7ed085
FG
548 let layout = x.layout();
549 let x = x.load_scalar(fx);
29967ef6
XL
550 let res = fx.bcx.ins().rotr(x, y);
551 ret.write_cvalue(fx, CValue::by_val(res, layout));
064997fb 552 }
29967ef6
XL
553
554 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
555 // doesn't have UB both are codegen'ed the same way
064997fb
FG
556 sym::offset | sym::arith_offset => {
557 intrinsic_args!(fx, args => (base, offset); intrinsic);
558 let offset = offset.load_scalar(fx);
559
29967ef6
XL
560 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
561 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
fc512014
XL
562 let ptr_diff = if pointee_size != 1 {
563 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
564 } else {
565 offset
566 };
29967ef6
XL
567 let base_val = base.load_scalar(fx);
568 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
569 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
064997fb
FG
570 }
571
f2b60f7d
FG
572 sym::ptr_mask => {
573 intrinsic_args!(fx, args => (ptr, mask); intrinsic);
574 let ptr = ptr.load_scalar(fx);
575 let mask = mask.load_scalar(fx);
576 fx.bcx.ins().band(ptr, mask);
577 }
578
064997fb
FG
579 sym::transmute => {
580 intrinsic_args!(fx, args => (from); intrinsic);
29967ef6 581
29967ef6 582 ret.write_cvalue_transmute(fx, from);
064997fb
FG
583 }
584 sym::write_bytes | sym::volatile_set_memory => {
585 intrinsic_args!(fx, args => (dst, val, count); intrinsic);
586 let val = val.load_scalar(fx);
587 let count = count.load_scalar(fx);
588
29967ef6
XL
589 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
590 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
fc512014
XL
591 let count = if pointee_size != 1 {
592 fx.bcx.ins().imul_imm(count, pointee_size as i64)
593 } else {
594 count
595 };
29967ef6
XL
596 let dst_ptr = dst.load_scalar(fx);
597 // FIXME make the memset actually volatile when switching to emit_small_memset
598 // FIXME use emit_small_memset
a2a8927a 599 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
064997fb
FG
600 }
601 sym::ctlz | sym::ctlz_nonzero => {
602 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 603 let val = arg.load_scalar(fx);
064997fb 604
29967ef6 605 // FIXME trap on `ctlz_nonzero` with zero arg.
5e7ed085
FG
606 let res = fx.bcx.ins().clz(val);
607 let res = CValue::by_val(res, arg.layout());
29967ef6 608 ret.write_cvalue(fx, res);
064997fb
FG
609 }
610 sym::cttz | sym::cttz_nonzero => {
611 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 612 let val = arg.load_scalar(fx);
064997fb 613
29967ef6 614 // FIXME trap on `cttz_nonzero` with zero arg.
5e7ed085
FG
615 let res = fx.bcx.ins().ctz(val);
616 let res = CValue::by_val(res, arg.layout());
29967ef6 617 ret.write_cvalue(fx, res);
064997fb
FG
618 }
619 sym::ctpop => {
620 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 621 let val = arg.load_scalar(fx);
064997fb 622
5e7ed085
FG
623 let res = fx.bcx.ins().popcnt(val);
624 let res = CValue::by_val(res, arg.layout());
29967ef6 625 ret.write_cvalue(fx, res);
064997fb
FG
626 }
627 sym::bitreverse => {
628 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 629 let val = arg.load_scalar(fx);
064997fb 630
5e7ed085
FG
631 let res = fx.bcx.ins().bitrev(val);
632 let res = CValue::by_val(res, arg.layout());
29967ef6 633 ret.write_cvalue(fx, res);
064997fb
FG
634 }
635 sym::bswap => {
064997fb 636 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 637 let val = arg.load_scalar(fx);
064997fb 638
9c376795
FG
639 let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
640 val
641 } else {
642 fx.bcx.ins().bswap(val)
643 };
644 let res = CValue::by_val(res, arg.layout());
29967ef6 645 ret.write_cvalue(fx, res);
064997fb 646 }
9c376795 647 sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
064997fb
FG
648 intrinsic_args!(fx, args => (); intrinsic);
649
5e7ed085 650 let layout = fx.layout_of(substs.type_at(0));
29967ef6 651 if layout.abi.is_uninhabited() {
5e7ed085
FG
652 with_no_trimmed_paths!({
653 crate::base::codegen_panic(
654 fx,
655 &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
923072b8 656 source_info,
5e7ed085
FG
657 )
658 });
29967ef6
XL
659 return;
660 }
661
064997fb 662 if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
5e7ed085
FG
663 with_no_trimmed_paths!({
664 crate::base::codegen_panic(
665 fx,
064997fb
FG
666 &format!(
667 "attempted to zero-initialize type `{}`, which is invalid",
668 layout.ty
669 ),
923072b8 670 source_info,
5e7ed085
FG
671 );
672 });
29967ef6
XL
673 return;
674 }
675
9c376795
FG
676 if intrinsic == sym::assert_mem_uninitialized_valid
677 && !fx.tcx.permits_uninit_init(layout)
678 {
5e7ed085
FG
679 with_no_trimmed_paths!({
680 crate::base::codegen_panic(
681 fx,
064997fb
FG
682 &format!(
683 "attempted to leave type `{}` uninitialized, which is invalid",
684 layout.ty
685 ),
923072b8 686 source_info,
5e7ed085
FG
687 )
688 });
29967ef6
XL
689 return;
690 }
064997fb
FG
691 }
692
693 sym::volatile_load | sym::unaligned_volatile_load => {
694 intrinsic_args!(fx, args => (ptr); intrinsic);
29967ef6 695
29967ef6 696 // Cranelift treats loads as volatile by default
29967ef6 697 // FIXME correctly handle unaligned_volatile_load
064997fb 698 let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
29967ef6
XL
699 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
700 ret.write_cvalue(fx, val);
064997fb
FG
701 }
702 sym::volatile_store | sym::unaligned_volatile_store => {
703 intrinsic_args!(fx, args => (ptr, val); intrinsic);
704 let ptr = ptr.load_scalar(fx);
705
29967ef6 706 // Cranelift treats stores as volatile by default
29967ef6
XL
707 // FIXME correctly handle unaligned_volatile_store
708 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
709 dest.write_cvalue(fx, val);
064997fb
FG
710 }
711
712 sym::pref_align_of
713 | sym::needs_drop
714 | sym::type_id
715 | sym::type_name
716 | sym::variant_count => {
717 intrinsic_args!(fx, args => (); intrinsic);
29967ef6 718
29967ef6
XL
719 let const_val =
720 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
064997fb 721 let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
29967ef6 722 ret.write_cvalue(fx, val);
064997fb 723 }
29967ef6 724
064997fb
FG
725 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
726 intrinsic_args!(fx, args => (ptr, base); intrinsic);
727 let ptr = ptr.load_scalar(fx);
728 let base = base.load_scalar(fx);
5e7ed085 729 let ty = substs.type_at(0);
29967ef6 730
5e7ed085 731 let pointee_size: u64 = fx.layout_of(ty).size.bytes();
04454e1e 732 let diff_bytes = fx.bcx.ins().isub(ptr, base);
29967ef6 733 // FIXME this can be an exact division.
923072b8
FG
734 let val = if intrinsic == sym::ptr_offset_from_unsigned {
735 let usize_layout = fx.layout_of(fx.tcx.types.usize);
04454e1e
FG
736 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
737 // but unsigned is slightly easier to codegen, so might as well.
923072b8 738 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
04454e1e 739 } else {
923072b8
FG
740 let isize_layout = fx.layout_of(fx.tcx.types.isize);
741 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
04454e1e 742 };
29967ef6 743 ret.write_cvalue(fx, val);
064997fb
FG
744 }
745
f2b60f7d 746 sym::ptr_guaranteed_cmp => {
064997fb 747 intrinsic_args!(fx, args => (a, b); intrinsic);
29967ef6 748
2b03887a
FG
749 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
750 ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
064997fb
FG
751 }
752
064997fb
FG
753 sym::caller_location => {
754 intrinsic_args!(fx, args => (); intrinsic);
29967ef6 755
923072b8 756 let caller_location = fx.get_caller_location(source_info);
29967ef6 757 ret.write_cvalue(fx, caller_location);
064997fb
FG
758 }
759
760 _ if intrinsic.as_str().starts_with("atomic_fence") => {
761 intrinsic_args!(fx, args => (); intrinsic);
29967ef6 762
6a06907d 763 fx.bcx.ins().fence();
064997fb
FG
764 }
765 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
766 intrinsic_args!(fx, args => (); intrinsic);
767
6a06907d
XL
768 // FIXME use a compiler fence once Cranelift supports it
769 fx.bcx.ins().fence();
064997fb
FG
770 }
771 _ if intrinsic.as_str().starts_with("atomic_load") => {
772 intrinsic_args!(fx, args => (ptr); intrinsic);
773 let ptr = ptr.load_scalar(fx);
774
5e7ed085
FG
775 let ty = substs.type_at(0);
776 match ty.kind() {
777 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
778 // FIXME implement 128bit atomics
779 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
780 // special case for compiler-builtins to avoid having to patch it
781 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
5e7ed085
FG
782 return;
783 } else {
064997fb
FG
784 fx.tcx
785 .sess
786 .span_fatal(source_info.span, "128bit atomics not yet supported");
5e7ed085
FG
787 }
788 }
789 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
790 _ => {
923072b8 791 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
5e7ed085
FG
792 return;
793 }
794 }
795 let clif_ty = fx.clif_type(ty).unwrap();
29967ef6 796
5e7ed085 797 let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
29967ef6 798
5e7ed085 799 let val = CValue::by_val(val, fx.layout_of(ty));
6a06907d 800 ret.write_cvalue(fx, val);
064997fb
FG
801 }
802 _ if intrinsic.as_str().starts_with("atomic_store") => {
803 intrinsic_args!(fx, args => (ptr, val); intrinsic);
804 let ptr = ptr.load_scalar(fx);
805
5e7ed085
FG
806 let ty = substs.type_at(0);
807 match ty.kind() {
808 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
809 // FIXME implement 128bit atomics
810 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
811 // special case for compiler-builtins to avoid having to patch it
812 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
5e7ed085
FG
813 return;
814 } else {
064997fb
FG
815 fx.tcx
816 .sess
817 .span_fatal(source_info.span, "128bit atomics not yet supported");
5e7ed085
FG
818 }
819 }
820 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
821 _ => {
923072b8 822 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
5e7ed085
FG
823 return;
824 }
825 }
29967ef6 826
6a06907d 827 let val = val.load_scalar(fx);
29967ef6 828
6a06907d 829 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
064997fb
FG
830 }
831 _ if intrinsic.as_str().starts_with("atomic_xchg") => {
832 intrinsic_args!(fx, args => (ptr, new); intrinsic);
833 let ptr = ptr.load_scalar(fx);
834
6a06907d 835 let layout = new.layout();
5e7ed085
FG
836 match layout.ty.kind() {
837 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
838 _ => {
923072b8 839 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
840 return;
841 }
842 }
6a06907d 843 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6 844
6a06907d 845 let new = new.load_scalar(fx);
29967ef6 846
6a06907d 847 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
29967ef6 848
6a06907d
XL
849 let old = CValue::by_val(old, layout);
850 ret.write_cvalue(fx, old);
064997fb
FG
851 }
852 _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
853 // both atomic_cxchg_* and atomic_cxchgweak_*
854 intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
855 let ptr = ptr.load_scalar(fx);
856
6a06907d 857 let layout = new.layout();
5e7ed085
FG
858 match layout.ty.kind() {
859 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
860 _ => {
923072b8 861 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
862 return;
863 }
864 }
29967ef6
XL
865
866 let test_old = test_old.load_scalar(fx);
867 let new = new.load_scalar(fx);
868
6a06907d 869 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
29967ef6 870 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
29967ef6 871
9c376795 872 let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
6a06907d 873 ret.write_cvalue(fx, ret_val)
064997fb
FG
874 }
875
876 _ if intrinsic.as_str().starts_with("atomic_xadd") => {
877 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
878 let ptr = ptr.load_scalar(fx);
29967ef6 879
6a06907d 880 let layout = amount.layout();
5e7ed085
FG
881 match layout.ty.kind() {
882 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
883 _ => {
923072b8 884 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
885 return;
886 }
887 }
6a06907d
XL
888 let ty = fx.clif_type(layout.ty).unwrap();
889
29967ef6 890 let amount = amount.load_scalar(fx);
6a06907d 891
064997fb
FG
892 let old =
893 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
6a06907d
XL
894
895 let old = CValue::by_val(old, layout);
896 ret.write_cvalue(fx, old);
064997fb
FG
897 }
898 _ if intrinsic.as_str().starts_with("atomic_xsub") => {
899 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
900 let ptr = ptr.load_scalar(fx);
901
6a06907d 902 let layout = amount.layout();
5e7ed085
FG
903 match layout.ty.kind() {
904 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
905 _ => {
923072b8 906 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
907 return;
908 }
909 }
6a06907d
XL
910 let ty = fx.clif_type(layout.ty).unwrap();
911
29967ef6 912 let amount = amount.load_scalar(fx);
6a06907d 913
064997fb
FG
914 let old =
915 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
6a06907d
XL
916
917 let old = CValue::by_val(old, layout);
918 ret.write_cvalue(fx, old);
064997fb
FG
919 }
920 _ if intrinsic.as_str().starts_with("atomic_and") => {
921 intrinsic_args!(fx, args => (ptr, src); intrinsic);
922 let ptr = ptr.load_scalar(fx);
923
6a06907d 924 let layout = src.layout();
5e7ed085
FG
925 match layout.ty.kind() {
926 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
927 _ => {
923072b8 928 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
929 return;
930 }
931 }
6a06907d
XL
932 let ty = fx.clif_type(layout.ty).unwrap();
933
29967ef6 934 let src = src.load_scalar(fx);
6a06907d
XL
935
936 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
937
938 let old = CValue::by_val(old, layout);
939 ret.write_cvalue(fx, old);
064997fb
FG
940 }
941 _ if intrinsic.as_str().starts_with("atomic_or") => {
942 intrinsic_args!(fx, args => (ptr, src); intrinsic);
943 let ptr = ptr.load_scalar(fx);
944
6a06907d 945 let layout = src.layout();
5e7ed085
FG
946 match layout.ty.kind() {
947 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
948 _ => {
923072b8 949 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
950 return;
951 }
952 }
6a06907d 953 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6
XL
954
955 let src = src.load_scalar(fx);
956
6a06907d 957 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
29967ef6 958
6a06907d
XL
959 let old = CValue::by_val(old, layout);
960 ret.write_cvalue(fx, old);
064997fb
FG
961 }
962 _ if intrinsic.as_str().starts_with("atomic_xor") => {
963 intrinsic_args!(fx, args => (ptr, src); intrinsic);
964 let ptr = ptr.load_scalar(fx);
965
6a06907d 966 let layout = src.layout();
5e7ed085
FG
967 match layout.ty.kind() {
968 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
969 _ => {
923072b8 970 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
971 return;
972 }
973 }
6a06907d
XL
974 let ty = fx.clif_type(layout.ty).unwrap();
975
29967ef6 976 let src = src.load_scalar(fx);
6a06907d
XL
977
978 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
979
980 let old = CValue::by_val(old, layout);
981 ret.write_cvalue(fx, old);
064997fb
FG
982 }
983 _ if intrinsic.as_str().starts_with("atomic_nand") => {
984 intrinsic_args!(fx, args => (ptr, src); intrinsic);
985 let ptr = ptr.load_scalar(fx);
986
6a06907d 987 let layout = src.layout();
5e7ed085
FG
988 match layout.ty.kind() {
989 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
990 _ => {
923072b8 991 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
992 return;
993 }
994 }
6a06907d
XL
995 let ty = fx.clif_type(layout.ty).unwrap();
996
29967ef6 997 let src = src.load_scalar(fx);
6a06907d
XL
998
999 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1000
1001 let old = CValue::by_val(old, layout);
1002 ret.write_cvalue(fx, old);
064997fb
FG
1003 }
1004 _ if intrinsic.as_str().starts_with("atomic_max") => {
1005 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1006 let ptr = ptr.load_scalar(fx);
1007
6a06907d 1008 let layout = src.layout();
5e7ed085
FG
1009 match layout.ty.kind() {
1010 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1011 _ => {
923072b8 1012 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
1013 return;
1014 }
1015 }
6a06907d 1016 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6 1017
29967ef6 1018 let src = src.load_scalar(fx);
6a06907d
XL
1019
1020 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1021
1022 let old = CValue::by_val(old, layout);
1023 ret.write_cvalue(fx, old);
064997fb
FG
1024 }
1025 _ if intrinsic.as_str().starts_with("atomic_umax") => {
1026 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1027 let ptr = ptr.load_scalar(fx);
1028
6a06907d 1029 let layout = src.layout();
5e7ed085
FG
1030 match layout.ty.kind() {
1031 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1032 _ => {
923072b8 1033 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
1034 return;
1035 }
1036 }
6a06907d
XL
1037 let ty = fx.clif_type(layout.ty).unwrap();
1038
29967ef6 1039 let src = src.load_scalar(fx);
6a06907d
XL
1040
1041 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1042
1043 let old = CValue::by_val(old, layout);
1044 ret.write_cvalue(fx, old);
064997fb
FG
1045 }
1046 _ if intrinsic.as_str().starts_with("atomic_min") => {
1047 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1048 let ptr = ptr.load_scalar(fx);
1049
6a06907d 1050 let layout = src.layout();
5e7ed085
FG
1051 match layout.ty.kind() {
1052 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1053 _ => {
923072b8 1054 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
1055 return;
1056 }
1057 }
6a06907d
XL
1058 let ty = fx.clif_type(layout.ty).unwrap();
1059
29967ef6 1060 let src = src.load_scalar(fx);
6a06907d
XL
1061
1062 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1063
1064 let old = CValue::by_val(old, layout);
1065 ret.write_cvalue(fx, old);
064997fb
FG
1066 }
1067 _ if intrinsic.as_str().starts_with("atomic_umin") => {
1068 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1069 let ptr = ptr.load_scalar(fx);
1070
6a06907d 1071 let layout = src.layout();
5e7ed085
FG
1072 match layout.ty.kind() {
1073 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1074 _ => {
923072b8 1075 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
1076 return;
1077 }
1078 }
6a06907d
XL
1079 let ty = fx.clif_type(layout.ty).unwrap();
1080
29967ef6 1081 let src = src.load_scalar(fx);
6a06907d
XL
1082
1083 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1084
1085 let old = CValue::by_val(old, layout);
1086 ret.write_cvalue(fx, old);
064997fb
FG
1087 }
1088
1089 sym::minnumf32 => {
1090 intrinsic_args!(fx, args => (a, b); intrinsic);
1091 let a = a.load_scalar(fx);
1092 let b = b.load_scalar(fx);
29967ef6 1093
04454e1e 1094 let val = crate::num::codegen_float_min(fx, a, b);
29967ef6
XL
1095 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1096 ret.write_cvalue(fx, val);
064997fb
FG
1097 }
1098 sym::minnumf64 => {
1099 intrinsic_args!(fx, args => (a, b); intrinsic);
1100 let a = a.load_scalar(fx);
1101 let b = b.load_scalar(fx);
1102
04454e1e 1103 let val = crate::num::codegen_float_min(fx, a, b);
29967ef6
XL
1104 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1105 ret.write_cvalue(fx, val);
064997fb
FG
1106 }
1107 sym::maxnumf32 => {
1108 intrinsic_args!(fx, args => (a, b); intrinsic);
1109 let a = a.load_scalar(fx);
1110 let b = b.load_scalar(fx);
1111
04454e1e 1112 let val = crate::num::codegen_float_max(fx, a, b);
29967ef6
XL
1113 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1114 ret.write_cvalue(fx, val);
064997fb
FG
1115 }
1116 sym::maxnumf64 => {
1117 intrinsic_args!(fx, args => (a, b); intrinsic);
1118 let a = a.load_scalar(fx);
1119 let b = b.load_scalar(fx);
1120
04454e1e 1121 let val = crate::num::codegen_float_max(fx, a, b);
29967ef6
XL
1122 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1123 ret.write_cvalue(fx, val);
064997fb
FG
1124 }
1125
1126 kw::Try => {
1127 intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1128 let f = f.load_scalar(fx);
1129 let data = data.load_scalar(fx);
1130 let _catch_fn = catch_fn.load_scalar(fx);
29967ef6 1131
29967ef6
XL
1132 // FIXME once unwinding is supported, change this to actually catch panics
1133 let f_sig = fx.bcx.func.import_signature(Signature {
a2a8927a 1134 call_conv: fx.target_config.default_call_conv,
2b03887a 1135 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
29967ef6
XL
1136 returns: vec![],
1137 });
1138
1139 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1140
1141 let layout = ret.layout();
1142 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1143 ret.write_cvalue(fx, ret_val);
064997fb 1144 }
29967ef6 1145
064997fb
FG
1146 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1147 intrinsic_args!(fx, args => (x, y); intrinsic);
1148
1149 let res = crate::num::codegen_float_binop(
1150 fx,
1151 match intrinsic {
1152 sym::fadd_fast => BinOp::Add,
1153 sym::fsub_fast => BinOp::Sub,
1154 sym::fmul_fast => BinOp::Mul,
1155 sym::fdiv_fast => BinOp::Div,
1156 sym::frem_fast => BinOp::Rem,
1157 _ => unreachable!(),
1158 },
1159 x,
1160 y,
1161 );
29967ef6 1162 ret.write_cvalue(fx, res);
064997fb
FG
1163 }
1164 sym::float_to_int_unchecked => {
1165 intrinsic_args!(fx, args => (f); intrinsic);
1166 let f = f.load_scalar(fx);
1167
29967ef6
XL
1168 let res = crate::cast::clif_int_or_float_cast(
1169 fx,
1170 f,
1171 false,
1172 fx.clif_type(ret.layout().ty).unwrap(),
1173 type_sign(ret.layout().ty),
1174 );
1175 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
064997fb
FG
1176 }
1177
1178 sym::raw_eq => {
1179 intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1180 let lhs_ref = lhs_ref.load_scalar(fx);
1181 let rhs_ref = rhs_ref.load_scalar(fx);
136023e0 1182
5e7ed085 1183 let size = fx.layout_of(substs.type_at(0)).layout.size();
94222f64 1184 // FIXME add and use emit_small_memcmp
064997fb
FG
1185 let is_eq_value = if size == Size::ZERO {
1186 // No bytes means they're trivially equal
1187 fx.bcx.ins().iconst(types::I8, 1)
1188 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1189 // Can't use `trusted` for these loads; they could be unaligned.
1190 let mut flags = MemFlags::new();
1191 flags.set_notrap();
1192 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1193 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
9c376795 1194 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
064997fb
FG
1195 } else {
1196 // Just call `memcmp` (like slices do in core) when the
1197 // size is too large or it's not a power-of-two.
1198 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1199 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1200 let params = vec![AbiParam::new(fx.pointer_type); 3];
1201 let returns = vec![AbiParam::new(types::I32)];
1202 let args = &[lhs_ref, rhs_ref, bytes_val];
1203 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
9c376795 1204 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
064997fb 1205 };
136023e0 1206 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
064997fb
FG
1207 }
1208
1209 sym::const_allocate => {
1210 intrinsic_args!(fx, args => (_size, _align); intrinsic);
94222f64 1211
5e7ed085
FG
1212 // returns a null pointer at runtime.
1213 let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1214 ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
064997fb 1215 }
5e7ed085 1216
064997fb
FG
1217 sym::const_deallocate => {
1218 intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
5e7ed085 1219 // nop at runtime.
064997fb
FG
1220 }
1221
1222 sym::black_box => {
1223 intrinsic_args!(fx, args => (a); intrinsic);
5e7ed085 1224
94222f64
XL
1225 // FIXME implement black_box semantics
1226 ret.write_cvalue(fx, a);
064997fb
FG
1227 }
1228
1229 // FIXME implement variadics in cranelift
1230 sym::va_copy | sym::va_arg | sym::va_end => {
1231 fx.tcx.sess.span_fatal(
1232 source_info.span,
1233 "Defining variadic functions is not yet supported by Cranelift",
1234 );
1235 }
1236
1237 _ => {
1238 fx.tcx
1239 .sess
1240 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1241 }
29967ef6
XL
1242 }
1243
923072b8 1244 let ret_block = fx.get_block(destination.unwrap());
5e7ed085 1245 fx.bcx.ins().jump(ret_block, &[]);
29967ef6 1246}