]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
New upstream version 1.69.0+dfsg1
[rustc.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
CommitLineData
29967ef6
XL
1//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2//! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
064997fb
FG
4macro_rules! intrinsic_args {
5 ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6 #[allow(unused_parens)]
7 let ($($arg),*) = if let [$($arg),*] = $args {
8 ($(codegen_operand($fx, $arg)),*)
9 } else {
10 $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11 };
29967ef6
XL
12 }
13}
14
5e7ed085
FG
15mod cpuid;
16mod llvm;
9c376795
FG
17mod llvm_aarch64;
18mod llvm_x86;
5e7ed085 19mod simd;
29967ef6 20
5e7ed085
FG
21pub(crate) use cpuid::codegen_cpuid_call;
22pub(crate) use llvm::codegen_llvm_intrinsic_call;
23
9ffffee4
FG
24use rustc_middle::ty;
25use rustc_middle::ty::layout::{HasParamEnv, ValidityRequirement};
5e7ed085
FG
26use rustc_middle::ty::print::with_no_trimmed_paths;
27use rustc_middle::ty::subst::SubstsRef;
28use rustc_span::symbol::{kw, sym, Symbol};
29
30use crate::prelude::*;
31use cranelift_codegen::ir::AtomicRmwOp;
32
064997fb
FG
33fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
34 bug!("wrong number of args for intrinsic {}", intrinsic);
35}
36
5e7ed085
FG
37fn report_atomic_type_validation_error<'tcx>(
38 fx: &mut FunctionCx<'_, '_, 'tcx>,
39 intrinsic: Symbol,
40 span: Span,
41 ty: Ty<'tcx>,
42) {
43 fx.tcx.sess.span_err(
44 span,
45 &format!(
46 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
47 intrinsic, ty
48 ),
49 );
50 // Prevent verifier error
f2b60f7d 51 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
29967ef6
XL
52}
53
29967ef6 54pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
c295e0f8
XL
55 let (element, count) = match layout.abi {
56 Abi::Vector { element, count } => (element, count),
29967ef6
XL
57 _ => unreachable!(),
58 };
59
f2b60f7d 60 match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
29967ef6
XL
61 // Cranelift currently only implements icmp for 128bit vectors.
62 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
63 _ => None,
64 }
65}
66
6a06907d
XL
67fn simd_for_each_lane<'tcx>(
68 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
69 val: CValue<'tcx>,
70 ret: CPlace<'tcx>,
5e7ed085 71 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
29967ef6
XL
72) {
73 let layout = val.layout();
74
5869c6ff
XL
75 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
76 let lane_layout = fx.layout_of(lane_ty);
77 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
78 let ret_lane_layout = fx.layout_of(ret_lane_ty);
29967ef6
XL
79 assert_eq!(lane_count, ret_lane_count);
80
81 for lane_idx in 0..lane_count {
94222f64 82 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
29967ef6 83
5e7ed085
FG
84 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
85 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
29967ef6 86
94222f64 87 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
29967ef6
XL
88 }
89}
90
2b03887a
FG
91fn simd_pair_for_each_lane_typed<'tcx>(
92 fx: &mut FunctionCx<'_, '_, 'tcx>,
93 x: CValue<'tcx>,
94 y: CValue<'tcx>,
95 ret: CPlace<'tcx>,
96 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
97) {
98 assert_eq!(x.layout(), y.layout());
99 let layout = x.layout();
100
101 let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
102 let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
103 assert_eq!(lane_count, ret_lane_count);
104
105 for lane_idx in 0..lane_count {
106 let x_lane = x.value_lane(fx, lane_idx);
107 let y_lane = y.value_lane(fx, lane_idx);
108
109 let res_lane = f(fx, x_lane, y_lane);
110
111 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
112 }
113}
114
6a06907d
XL
115fn simd_pair_for_each_lane<'tcx>(
116 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
117 x: CValue<'tcx>,
118 y: CValue<'tcx>,
119 ret: CPlace<'tcx>,
5e7ed085 120 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
29967ef6
XL
121) {
122 assert_eq!(x.layout(), y.layout());
123 let layout = x.layout();
124
5869c6ff
XL
125 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
126 let lane_layout = fx.layout_of(lane_ty);
127 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
128 let ret_lane_layout = fx.layout_of(ret_lane_ty);
29967ef6
XL
129 assert_eq!(lane_count, ret_lane_count);
130
94222f64
XL
131 for lane_idx in 0..lane_count {
132 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
133 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
29967ef6 134
5e7ed085
FG
135 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
136 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
29967ef6 137
94222f64 138 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
29967ef6
XL
139 }
140}
141
6a06907d
XL
142fn simd_reduce<'tcx>(
143 fx: &mut FunctionCx<'_, '_, 'tcx>,
fc512014 144 val: CValue<'tcx>,
94222f64 145 acc: Option<Value>,
fc512014 146 ret: CPlace<'tcx>,
5e7ed085 147 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
fc512014 148) {
5869c6ff
XL
149 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
150 let lane_layout = fx.layout_of(lane_ty);
fc512014
XL
151 assert_eq!(lane_layout, ret.layout());
152
94222f64
XL
153 let (mut res_val, start_lane) =
154 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
155 for lane_idx in start_lane..lane_count {
156 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
5e7ed085 157 res_val = f(fx, lane_layout.ty, res_val, lane);
fc512014
XL
158 }
159 let res = CValue::by_val(res_val, lane_layout);
160 ret.write_cvalue(fx, res);
161}
162
94222f64 163// FIXME move all uses to `simd_reduce`
6a06907d
XL
164fn simd_reduce_bool<'tcx>(
165 fx: &mut FunctionCx<'_, '_, 'tcx>,
fc512014
XL
166 val: CValue<'tcx>,
167 ret: CPlace<'tcx>,
5e7ed085 168 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
fc512014 169) {
5869c6ff 170 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
fc512014
XL
171 assert!(ret.layout().ty.is_bool());
172
94222f64 173 let res_val = val.value_lane(fx, 0).load_scalar(fx);
fc512014
XL
174 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
175 for lane_idx in 1..lane_count {
94222f64 176 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
fc512014
XL
177 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
178 res_val = f(fx, res_val, lane);
179 }
94222f64
XL
180 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
181 fx.bcx.ins().ireduce(types::I8, res_val)
182 } else {
183 res_val
184 };
fc512014
XL
185 let res = CValue::by_val(res_val, ret.layout());
186 ret.write_cvalue(fx, res);
187}
188
29967ef6 189fn bool_to_zero_or_max_uint<'tcx>(
6a06907d 190 fx: &mut FunctionCx<'_, '_, 'tcx>,
5e7ed085 191 ty: Ty<'tcx>,
29967ef6 192 val: Value,
5e7ed085
FG
193) -> Value {
194 let ty = fx.clif_type(ty).unwrap();
29967ef6
XL
195
196 let int_ty = match ty {
197 types::F32 => types::I32,
198 types::F64 => types::I64,
199 ty => ty,
200 };
201
9c376795 202 let mut res = fx.bcx.ins().bmask(int_ty, val);
29967ef6
XL
203
204 if ty.is_float() {
9ffffee4 205 res = codegen_bitcast(fx, ty, res);
29967ef6
XL
206 }
207
5e7ed085 208 res
29967ef6
XL
209}
210
211pub(crate) fn codegen_intrinsic_call<'tcx>(
6a06907d 212 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
213 instance: Instance<'tcx>,
214 args: &[mir::Operand<'tcx>],
923072b8
FG
215 destination: CPlace<'tcx>,
216 target: Option<BasicBlock>,
217 source_info: mir::SourceInfo,
29967ef6 218) {
c295e0f8 219 let intrinsic = fx.tcx.item_name(instance.def_id());
29967ef6
XL
220 let substs = instance.substs;
221
17df50a5 222 if intrinsic.as_str().starts_with("simd_") {
923072b8
FG
223 self::simd::codegen_simd_intrinsic_call(
224 fx,
225 intrinsic,
226 substs,
227 args,
228 destination,
9ffffee4 229 target.expect("target for simd intrinsic"),
923072b8
FG
230 source_info.span,
231 );
923072b8 232 } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
9ffffee4 233 let ret_block = fx.get_block(target.expect("target for float intrinsic"));
5e7ed085
FG
234 fx.bcx.ins().jump(ret_block, &[]);
235 } else {
236 codegen_regular_intrinsic_call(
237 fx,
238 instance,
239 intrinsic,
240 substs,
241 args,
5e7ed085 242 destination,
9ffffee4 243 target,
923072b8 244 source_info,
5e7ed085 245 );
29967ef6 246 }
5e7ed085 247}
29967ef6 248
5e7ed085
FG
249fn codegen_float_intrinsic_call<'tcx>(
250 fx: &mut FunctionCx<'_, '_, 'tcx>,
251 intrinsic: Symbol,
252 args: &[mir::Operand<'tcx>],
253 ret: CPlace<'tcx>,
254) -> bool {
255 let (name, arg_count, ty) = match intrinsic {
256 sym::expf32 => ("expf", 1, fx.tcx.types.f32),
257 sym::expf64 => ("exp", 1, fx.tcx.types.f64),
258 sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
259 sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
260 sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
261 sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
262 sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
263 sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
264 sym::powf32 => ("powf", 2, fx.tcx.types.f32),
265 sym::powf64 => ("pow", 2, fx.tcx.types.f64),
266 sym::logf32 => ("logf", 1, fx.tcx.types.f32),
267 sym::logf64 => ("log", 1, fx.tcx.types.f64),
268 sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
269 sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
270 sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
271 sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
272 sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
273 sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
274 sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
275 sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
276 sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
277 sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
278 sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
279 sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
280 sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
281 sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
282 sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
283 sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
284 sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
285 sym::roundf64 => ("round", 1, fx.tcx.types.f64),
286 sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
287 sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
288 sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
289 sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
290 _ => return false,
291 };
29967ef6 292
5e7ed085
FG
293 if args.len() != arg_count {
294 bug!("wrong number of args for intrinsic {:?}", intrinsic);
29967ef6
XL
295 }
296
5e7ed085
FG
297 let (a, b, c);
298 let args = match args {
299 [x] => {
300 a = [codegen_operand(fx, x)];
301 &a as &[_]
302 }
303 [x, y] => {
304 b = [codegen_operand(fx, x), codegen_operand(fx, y)];
305 &b
306 }
307 [x, y, z] => {
308 c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
309 &c
310 }
311 _ => unreachable!(),
312 };
313
f2b60f7d
FG
314 let layout = fx.layout_of(ty);
315 let res = match intrinsic {
316 sym::fmaf32 | sym::fmaf64 => {
317 let a = args[0].load_scalar(fx);
318 let b = args[1].load_scalar(fx);
319 let c = args[2].load_scalar(fx);
320 CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
321 }
322 sym::copysignf32 | sym::copysignf64 => {
323 let a = args[0].load_scalar(fx);
324 let b = args[1].load_scalar(fx);
325 CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
326 }
327 sym::fabsf32
328 | sym::fabsf64
329 | sym::floorf32
330 | sym::floorf64
331 | sym::ceilf32
332 | sym::ceilf64
333 | sym::truncf32
334 | sym::truncf64 => {
335 let a = args[0].load_scalar(fx);
336
337 let val = match intrinsic {
338 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
339 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
340 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
341 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
342 _ => unreachable!(),
343 };
344
345 CValue::by_val(val, layout)
346 }
347 // These intrinsics aren't supported natively by Cranelift.
348 // Lower them to a libcall.
349 _ => fx.easy_call(name, &args, ty),
350 };
351
5e7ed085
FG
352 ret.write_cvalue(fx, res);
353
354 true
355}
356
357fn codegen_regular_intrinsic_call<'tcx>(
358 fx: &mut FunctionCx<'_, '_, 'tcx>,
359 instance: Instance<'tcx>,
360 intrinsic: Symbol,
361 substs: SubstsRef<'tcx>,
362 args: &[mir::Operand<'tcx>],
363 ret: CPlace<'tcx>,
923072b8
FG
364 destination: Option<BasicBlock>,
365 source_info: mir::SourceInfo,
5e7ed085
FG
366) {
367 let usize_layout = fx.layout_of(fx.tcx.types.usize);
368
064997fb 369 match intrinsic {
9ffffee4
FG
370 sym::abort => {
371 fx.bcx.ins().trap(TrapCode::User(0));
372 return;
373 }
064997fb
FG
374 sym::likely | sym::unlikely => {
375 intrinsic_args!(fx, args => (a); intrinsic);
29967ef6 376
29967ef6 377 ret.write_cvalue(fx, a);
064997fb
FG
378 }
379 sym::breakpoint => {
380 intrinsic_args!(fx, args => (); intrinsic);
381
29967ef6 382 fx.bcx.ins().debugtrap();
064997fb
FG
383 }
384 sym::copy | sym::copy_nonoverlapping => {
385 intrinsic_args!(fx, args => (src, dst, count); intrinsic);
386 let src = src.load_scalar(fx);
387 let dst = dst.load_scalar(fx);
388 let count = count.load_scalar(fx);
389
5e7ed085 390 let elem_ty = substs.type_at(0);
29967ef6 391 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
29967ef6 392 assert_eq!(args.len(), 3);
064997fb
FG
393 let byte_amount =
394 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
29967ef6 395
17df50a5 396 if intrinsic == sym::copy_nonoverlapping {
29967ef6 397 // FIXME emit_small_memcpy
a2a8927a 398 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
29967ef6
XL
399 } else {
400 // FIXME emit_small_memmove
a2a8927a 401 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
29967ef6 402 }
064997fb
FG
403 }
404 sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
405 // NOTE: the volatile variants have src and dst swapped
406 intrinsic_args!(fx, args => (dst, src, count); intrinsic);
407 let dst = dst.load_scalar(fx);
408 let src = src.load_scalar(fx);
409 let count = count.load_scalar(fx);
410
5e7ed085 411 let elem_ty = substs.type_at(0);
29967ef6 412 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
29967ef6 413 assert_eq!(args.len(), 3);
064997fb
FG
414 let byte_amount =
415 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
29967ef6
XL
416
417 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
17df50a5 418 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
29967ef6 419 // FIXME emit_small_memcpy
a2a8927a 420 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
29967ef6
XL
421 } else {
422 // FIXME emit_small_memmove
a2a8927a 423 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
29967ef6 424 }
064997fb
FG
425 }
426 sym::size_of_val => {
427 intrinsic_args!(fx, args => (ptr); intrinsic);
428
5e7ed085 429 let layout = fx.layout_of(substs.type_at(0));
064997fb
FG
430 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
431 // branch
432 let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
29967ef6
XL
433 let (_ptr, info) = ptr.load_scalar_pair(fx);
434 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
435 size
436 } else {
064997fb 437 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
29967ef6
XL
438 };
439 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
064997fb
FG
440 }
441 sym::min_align_of_val => {
442 intrinsic_args!(fx, args => (ptr); intrinsic);
443
5e7ed085 444 let layout = fx.layout_of(substs.type_at(0));
064997fb
FG
445 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
446 // branch
447 let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
29967ef6
XL
448 let (_ptr, info) = ptr.load_scalar_pair(fx);
449 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
450 align
451 } else {
064997fb 452 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
29967ef6
XL
453 };
454 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
064997fb
FG
455 }
456
457 sym::vtable_size => {
458 intrinsic_args!(fx, args => (vtable); intrinsic);
459 let vtable = vtable.load_scalar(fx);
460
461 let size = crate::vtable::size_of_obj(fx, vtable);
462 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
463 }
464
465 sym::vtable_align => {
466 intrinsic_args!(fx, args => (vtable); intrinsic);
467 let vtable = vtable.load_scalar(fx);
468
469 let align = crate::vtable::min_align_of_obj(fx, vtable);
470 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
471 }
472
473 sym::unchecked_add
474 | sym::unchecked_sub
475 | sym::unchecked_mul
476 | sym::unchecked_div
477 | sym::exact_div
478 | sym::unchecked_rem
479 | sym::unchecked_shl
480 | sym::unchecked_shr => {
481 intrinsic_args!(fx, args => (x, y); intrinsic);
29967ef6 482
29967ef6
XL
483 // FIXME trap on overflow
484 let bin_op = match intrinsic {
17df50a5
XL
485 sym::unchecked_add => BinOp::Add,
486 sym::unchecked_sub => BinOp::Sub,
5e7ed085 487 sym::unchecked_mul => BinOp::Mul,
17df50a5
XL
488 sym::unchecked_div | sym::exact_div => BinOp::Div,
489 sym::unchecked_rem => BinOp::Rem,
490 sym::unchecked_shl => BinOp::Shl,
491 sym::unchecked_shr => BinOp::Shr,
492 _ => unreachable!(),
29967ef6
XL
493 };
494 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
495 ret.write_cvalue(fx, res);
064997fb 496 }
064997fb
FG
497 sym::saturating_add | sym::saturating_sub => {
498 intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
499
29967ef6
XL
500 assert_eq!(lhs.layout().ty, rhs.layout().ty);
501 let bin_op = match intrinsic {
17df50a5
XL
502 sym::saturating_add => BinOp::Add,
503 sym::saturating_sub => BinOp::Sub,
504 _ => unreachable!(),
29967ef6
XL
505 };
506
2b03887a 507 let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
29967ef6 508 ret.write_cvalue(fx, res);
064997fb
FG
509 }
510 sym::rotate_left => {
511 intrinsic_args!(fx, args => (x, y); intrinsic);
512 let y = y.load_scalar(fx);
513
5e7ed085
FG
514 let layout = x.layout();
515 let x = x.load_scalar(fx);
29967ef6
XL
516 let res = fx.bcx.ins().rotl(x, y);
517 ret.write_cvalue(fx, CValue::by_val(res, layout));
064997fb
FG
518 }
519 sym::rotate_right => {
520 intrinsic_args!(fx, args => (x, y); intrinsic);
521 let y = y.load_scalar(fx);
522
5e7ed085
FG
523 let layout = x.layout();
524 let x = x.load_scalar(fx);
29967ef6
XL
525 let res = fx.bcx.ins().rotr(x, y);
526 ret.write_cvalue(fx, CValue::by_val(res, layout));
064997fb 527 }
29967ef6
XL
528
529 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
530 // doesn't have UB both are codegen'ed the same way
064997fb
FG
531 sym::offset | sym::arith_offset => {
532 intrinsic_args!(fx, args => (base, offset); intrinsic);
533 let offset = offset.load_scalar(fx);
534
29967ef6
XL
535 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
536 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
fc512014
XL
537 let ptr_diff = if pointee_size != 1 {
538 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
539 } else {
540 offset
541 };
29967ef6
XL
542 let base_val = base.load_scalar(fx);
543 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
544 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
064997fb
FG
545 }
546
f2b60f7d
FG
547 sym::ptr_mask => {
548 intrinsic_args!(fx, args => (ptr, mask); intrinsic);
549 let ptr = ptr.load_scalar(fx);
550 let mask = mask.load_scalar(fx);
551 fx.bcx.ins().band(ptr, mask);
552 }
553
064997fb
FG
554 sym::transmute => {
555 intrinsic_args!(fx, args => (from); intrinsic);
29967ef6 556
9ffffee4
FG
557 if ret.layout().abi.is_uninhabited() {
558 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
559 return;
560 }
561
29967ef6 562 ret.write_cvalue_transmute(fx, from);
064997fb
FG
563 }
564 sym::write_bytes | sym::volatile_set_memory => {
565 intrinsic_args!(fx, args => (dst, val, count); intrinsic);
566 let val = val.load_scalar(fx);
567 let count = count.load_scalar(fx);
568
29967ef6
XL
569 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
570 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
fc512014
XL
571 let count = if pointee_size != 1 {
572 fx.bcx.ins().imul_imm(count, pointee_size as i64)
573 } else {
574 count
575 };
29967ef6
XL
576 let dst_ptr = dst.load_scalar(fx);
577 // FIXME make the memset actually volatile when switching to emit_small_memset
578 // FIXME use emit_small_memset
a2a8927a 579 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
064997fb
FG
580 }
581 sym::ctlz | sym::ctlz_nonzero => {
582 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 583 let val = arg.load_scalar(fx);
064997fb 584
29967ef6 585 // FIXME trap on `ctlz_nonzero` with zero arg.
5e7ed085
FG
586 let res = fx.bcx.ins().clz(val);
587 let res = CValue::by_val(res, arg.layout());
29967ef6 588 ret.write_cvalue(fx, res);
064997fb
FG
589 }
590 sym::cttz | sym::cttz_nonzero => {
591 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 592 let val = arg.load_scalar(fx);
064997fb 593
29967ef6 594 // FIXME trap on `cttz_nonzero` with zero arg.
5e7ed085
FG
595 let res = fx.bcx.ins().ctz(val);
596 let res = CValue::by_val(res, arg.layout());
29967ef6 597 ret.write_cvalue(fx, res);
064997fb
FG
598 }
599 sym::ctpop => {
600 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 601 let val = arg.load_scalar(fx);
064997fb 602
5e7ed085
FG
603 let res = fx.bcx.ins().popcnt(val);
604 let res = CValue::by_val(res, arg.layout());
29967ef6 605 ret.write_cvalue(fx, res);
064997fb
FG
606 }
607 sym::bitreverse => {
608 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 609 let val = arg.load_scalar(fx);
064997fb 610
5e7ed085
FG
611 let res = fx.bcx.ins().bitrev(val);
612 let res = CValue::by_val(res, arg.layout());
29967ef6 613 ret.write_cvalue(fx, res);
064997fb
FG
614 }
615 sym::bswap => {
064997fb 616 intrinsic_args!(fx, args => (arg); intrinsic);
5e7ed085 617 let val = arg.load_scalar(fx);
064997fb 618
9c376795
FG
619 let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
620 val
621 } else {
622 fx.bcx.ins().bswap(val)
623 };
624 let res = CValue::by_val(res, arg.layout());
29967ef6 625 ret.write_cvalue(fx, res);
064997fb 626 }
9c376795 627 sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
064997fb
FG
628 intrinsic_args!(fx, args => (); intrinsic);
629
9ffffee4
FG
630 let ty = substs.type_at(0);
631
632 let requirement = ValidityRequirement::from_intrinsic(intrinsic);
633
634 if let Some(requirement) = requirement {
635 let do_panic = !fx
636 .tcx
637 .check_validity_requirement((requirement, fx.param_env().and(ty)))
638 .expect("expect to have layout during codegen");
639
640 if do_panic {
641 let layout = fx.layout_of(ty);
642
643 with_no_trimmed_paths!({
644 crate::base::codegen_panic_nounwind(
645 fx,
646 &if layout.abi.is_uninhabited() {
647 format!("attempted to instantiate uninhabited type `{}`", layout.ty)
648 } else if requirement == ValidityRequirement::Zero {
649 format!(
650 "attempted to zero-initialize type `{}`, which is invalid",
651 layout.ty
652 )
653 } else {
654 format!(
655 "attempted to leave type `{}` uninitialized, which is invalid",
656 layout.ty
657 )
658 },
659 source_info,
660 )
661 });
662 return;
663 }
29967ef6 664 }
064997fb
FG
665 }
666
667 sym::volatile_load | sym::unaligned_volatile_load => {
668 intrinsic_args!(fx, args => (ptr); intrinsic);
29967ef6 669
29967ef6 670 // Cranelift treats loads as volatile by default
29967ef6 671 // FIXME correctly handle unaligned_volatile_load
064997fb 672 let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
29967ef6
XL
673 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
674 ret.write_cvalue(fx, val);
064997fb
FG
675 }
676 sym::volatile_store | sym::unaligned_volatile_store => {
677 intrinsic_args!(fx, args => (ptr, val); intrinsic);
678 let ptr = ptr.load_scalar(fx);
679
29967ef6 680 // Cranelift treats stores as volatile by default
29967ef6
XL
681 // FIXME correctly handle unaligned_volatile_store
682 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
683 dest.write_cvalue(fx, val);
064997fb
FG
684 }
685
686 sym::pref_align_of
687 | sym::needs_drop
688 | sym::type_id
689 | sym::type_name
690 | sym::variant_count => {
691 intrinsic_args!(fx, args => (); intrinsic);
29967ef6 692
29967ef6
XL
693 let const_val =
694 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
064997fb 695 let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
29967ef6 696 ret.write_cvalue(fx, val);
064997fb 697 }
29967ef6 698
064997fb
FG
699 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
700 intrinsic_args!(fx, args => (ptr, base); intrinsic);
701 let ptr = ptr.load_scalar(fx);
702 let base = base.load_scalar(fx);
5e7ed085 703 let ty = substs.type_at(0);
29967ef6 704
5e7ed085 705 let pointee_size: u64 = fx.layout_of(ty).size.bytes();
04454e1e 706 let diff_bytes = fx.bcx.ins().isub(ptr, base);
29967ef6 707 // FIXME this can be an exact division.
923072b8
FG
708 let val = if intrinsic == sym::ptr_offset_from_unsigned {
709 let usize_layout = fx.layout_of(fx.tcx.types.usize);
04454e1e
FG
710 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
711 // but unsigned is slightly easier to codegen, so might as well.
923072b8 712 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
04454e1e 713 } else {
923072b8
FG
714 let isize_layout = fx.layout_of(fx.tcx.types.isize);
715 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
04454e1e 716 };
29967ef6 717 ret.write_cvalue(fx, val);
064997fb
FG
718 }
719
f2b60f7d 720 sym::ptr_guaranteed_cmp => {
064997fb 721 intrinsic_args!(fx, args => (a, b); intrinsic);
29967ef6 722
2b03887a
FG
723 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
724 ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
064997fb
FG
725 }
726
064997fb
FG
727 sym::caller_location => {
728 intrinsic_args!(fx, args => (); intrinsic);
29967ef6 729
923072b8 730 let caller_location = fx.get_caller_location(source_info);
29967ef6 731 ret.write_cvalue(fx, caller_location);
064997fb
FG
732 }
733
734 _ if intrinsic.as_str().starts_with("atomic_fence") => {
735 intrinsic_args!(fx, args => (); intrinsic);
29967ef6 736
6a06907d 737 fx.bcx.ins().fence();
064997fb
FG
738 }
739 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
740 intrinsic_args!(fx, args => (); intrinsic);
741
6a06907d
XL
742 // FIXME use a compiler fence once Cranelift supports it
743 fx.bcx.ins().fence();
064997fb
FG
744 }
745 _ if intrinsic.as_str().starts_with("atomic_load") => {
746 intrinsic_args!(fx, args => (ptr); intrinsic);
747 let ptr = ptr.load_scalar(fx);
748
5e7ed085
FG
749 let ty = substs.type_at(0);
750 match ty.kind() {
751 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
752 // FIXME implement 128bit atomics
753 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
754 // special case for compiler-builtins to avoid having to patch it
755 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
5e7ed085
FG
756 return;
757 } else {
064997fb
FG
758 fx.tcx
759 .sess
760 .span_fatal(source_info.span, "128bit atomics not yet supported");
5e7ed085
FG
761 }
762 }
763 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
764 _ => {
923072b8 765 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
5e7ed085
FG
766 return;
767 }
768 }
769 let clif_ty = fx.clif_type(ty).unwrap();
29967ef6 770
5e7ed085 771 let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
29967ef6 772
5e7ed085 773 let val = CValue::by_val(val, fx.layout_of(ty));
6a06907d 774 ret.write_cvalue(fx, val);
064997fb
FG
775 }
776 _ if intrinsic.as_str().starts_with("atomic_store") => {
777 intrinsic_args!(fx, args => (ptr, val); intrinsic);
778 let ptr = ptr.load_scalar(fx);
779
5e7ed085
FG
780 let ty = substs.type_at(0);
781 match ty.kind() {
782 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
783 // FIXME implement 128bit atomics
784 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
785 // special case for compiler-builtins to avoid having to patch it
786 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
5e7ed085
FG
787 return;
788 } else {
064997fb
FG
789 fx.tcx
790 .sess
791 .span_fatal(source_info.span, "128bit atomics not yet supported");
5e7ed085
FG
792 }
793 }
794 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
795 _ => {
923072b8 796 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
5e7ed085
FG
797 return;
798 }
799 }
29967ef6 800
6a06907d 801 let val = val.load_scalar(fx);
29967ef6 802
6a06907d 803 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
064997fb
FG
804 }
805 _ if intrinsic.as_str().starts_with("atomic_xchg") => {
806 intrinsic_args!(fx, args => (ptr, new); intrinsic);
807 let ptr = ptr.load_scalar(fx);
808
6a06907d 809 let layout = new.layout();
5e7ed085
FG
810 match layout.ty.kind() {
811 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
812 _ => {
923072b8 813 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
814 return;
815 }
816 }
6a06907d 817 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6 818
6a06907d 819 let new = new.load_scalar(fx);
29967ef6 820
6a06907d 821 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
29967ef6 822
6a06907d
XL
823 let old = CValue::by_val(old, layout);
824 ret.write_cvalue(fx, old);
064997fb
FG
825 }
826 _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
827 // both atomic_cxchg_* and atomic_cxchgweak_*
828 intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
829 let ptr = ptr.load_scalar(fx);
830
6a06907d 831 let layout = new.layout();
5e7ed085
FG
832 match layout.ty.kind() {
833 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
834 _ => {
923072b8 835 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
836 return;
837 }
838 }
29967ef6
XL
839
840 let test_old = test_old.load_scalar(fx);
841 let new = new.load_scalar(fx);
842
6a06907d 843 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
29967ef6 844 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
29967ef6 845
9c376795 846 let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
6a06907d 847 ret.write_cvalue(fx, ret_val)
064997fb
FG
848 }
849
850 _ if intrinsic.as_str().starts_with("atomic_xadd") => {
851 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
852 let ptr = ptr.load_scalar(fx);
29967ef6 853
6a06907d 854 let layout = amount.layout();
5e7ed085
FG
855 match layout.ty.kind() {
856 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
857 _ => {
923072b8 858 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
859 return;
860 }
861 }
6a06907d
XL
862 let ty = fx.clif_type(layout.ty).unwrap();
863
29967ef6 864 let amount = amount.load_scalar(fx);
6a06907d 865
064997fb
FG
866 let old =
867 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
6a06907d
XL
868
869 let old = CValue::by_val(old, layout);
870 ret.write_cvalue(fx, old);
064997fb
FG
871 }
872 _ if intrinsic.as_str().starts_with("atomic_xsub") => {
873 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
874 let ptr = ptr.load_scalar(fx);
875
6a06907d 876 let layout = amount.layout();
5e7ed085
FG
877 match layout.ty.kind() {
878 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
879 _ => {
923072b8 880 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
881 return;
882 }
883 }
6a06907d
XL
884 let ty = fx.clif_type(layout.ty).unwrap();
885
29967ef6 886 let amount = amount.load_scalar(fx);
6a06907d 887
064997fb
FG
888 let old =
889 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
6a06907d
XL
890
891 let old = CValue::by_val(old, layout);
892 ret.write_cvalue(fx, old);
064997fb
FG
893 }
894 _ if intrinsic.as_str().starts_with("atomic_and") => {
895 intrinsic_args!(fx, args => (ptr, src); intrinsic);
896 let ptr = ptr.load_scalar(fx);
897
6a06907d 898 let layout = src.layout();
5e7ed085
FG
899 match layout.ty.kind() {
900 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
901 _ => {
923072b8 902 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
903 return;
904 }
905 }
6a06907d
XL
906 let ty = fx.clif_type(layout.ty).unwrap();
907
29967ef6 908 let src = src.load_scalar(fx);
6a06907d
XL
909
910 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
911
912 let old = CValue::by_val(old, layout);
913 ret.write_cvalue(fx, old);
064997fb
FG
914 }
915 _ if intrinsic.as_str().starts_with("atomic_or") => {
916 intrinsic_args!(fx, args => (ptr, src); intrinsic);
917 let ptr = ptr.load_scalar(fx);
918
6a06907d 919 let layout = src.layout();
5e7ed085
FG
920 match layout.ty.kind() {
921 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
922 _ => {
923072b8 923 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
924 return;
925 }
926 }
6a06907d 927 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6
XL
928
929 let src = src.load_scalar(fx);
930
6a06907d 931 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
29967ef6 932
6a06907d
XL
933 let old = CValue::by_val(old, layout);
934 ret.write_cvalue(fx, old);
064997fb
FG
935 }
936 _ if intrinsic.as_str().starts_with("atomic_xor") => {
937 intrinsic_args!(fx, args => (ptr, src); intrinsic);
938 let ptr = ptr.load_scalar(fx);
939
6a06907d 940 let layout = src.layout();
5e7ed085
FG
941 match layout.ty.kind() {
942 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
943 _ => {
923072b8 944 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
945 return;
946 }
947 }
6a06907d
XL
948 let ty = fx.clif_type(layout.ty).unwrap();
949
29967ef6 950 let src = src.load_scalar(fx);
6a06907d
XL
951
952 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
953
954 let old = CValue::by_val(old, layout);
955 ret.write_cvalue(fx, old);
064997fb
FG
956 }
957 _ if intrinsic.as_str().starts_with("atomic_nand") => {
958 intrinsic_args!(fx, args => (ptr, src); intrinsic);
959 let ptr = ptr.load_scalar(fx);
960
6a06907d 961 let layout = src.layout();
5e7ed085
FG
962 match layout.ty.kind() {
963 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
964 _ => {
923072b8 965 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
966 return;
967 }
968 }
6a06907d
XL
969 let ty = fx.clif_type(layout.ty).unwrap();
970
29967ef6 971 let src = src.load_scalar(fx);
6a06907d
XL
972
973 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
974
975 let old = CValue::by_val(old, layout);
976 ret.write_cvalue(fx, old);
064997fb
FG
977 }
978 _ if intrinsic.as_str().starts_with("atomic_max") => {
979 intrinsic_args!(fx, args => (ptr, src); intrinsic);
980 let ptr = ptr.load_scalar(fx);
981
6a06907d 982 let layout = src.layout();
5e7ed085
FG
983 match layout.ty.kind() {
984 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
985 _ => {
923072b8 986 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
987 return;
988 }
989 }
6a06907d 990 let ty = fx.clif_type(layout.ty).unwrap();
29967ef6 991
29967ef6 992 let src = src.load_scalar(fx);
6a06907d
XL
993
994 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
995
996 let old = CValue::by_val(old, layout);
997 ret.write_cvalue(fx, old);
064997fb
FG
998 }
999 _ if intrinsic.as_str().starts_with("atomic_umax") => {
1000 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1001 let ptr = ptr.load_scalar(fx);
1002
6a06907d 1003 let layout = src.layout();
5e7ed085
FG
1004 match layout.ty.kind() {
1005 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1006 _ => {
923072b8 1007 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
1008 return;
1009 }
1010 }
6a06907d
XL
1011 let ty = fx.clif_type(layout.ty).unwrap();
1012
29967ef6 1013 let src = src.load_scalar(fx);
6a06907d
XL
1014
1015 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1016
1017 let old = CValue::by_val(old, layout);
1018 ret.write_cvalue(fx, old);
064997fb
FG
1019 }
1020 _ if intrinsic.as_str().starts_with("atomic_min") => {
1021 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1022 let ptr = ptr.load_scalar(fx);
1023
6a06907d 1024 let layout = src.layout();
5e7ed085
FG
1025 match layout.ty.kind() {
1026 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1027 _ => {
923072b8 1028 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
1029 return;
1030 }
1031 }
6a06907d
XL
1032 let ty = fx.clif_type(layout.ty).unwrap();
1033
29967ef6 1034 let src = src.load_scalar(fx);
6a06907d
XL
1035
1036 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1037
1038 let old = CValue::by_val(old, layout);
1039 ret.write_cvalue(fx, old);
064997fb
FG
1040 }
1041 _ if intrinsic.as_str().starts_with("atomic_umin") => {
1042 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1043 let ptr = ptr.load_scalar(fx);
1044
6a06907d 1045 let layout = src.layout();
5e7ed085
FG
1046 match layout.ty.kind() {
1047 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1048 _ => {
923072b8 1049 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
5e7ed085
FG
1050 return;
1051 }
1052 }
6a06907d
XL
1053 let ty = fx.clif_type(layout.ty).unwrap();
1054
29967ef6 1055 let src = src.load_scalar(fx);
6a06907d
XL
1056
1057 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1058
1059 let old = CValue::by_val(old, layout);
1060 ret.write_cvalue(fx, old);
064997fb
FG
1061 }
1062
1063 sym::minnumf32 => {
1064 intrinsic_args!(fx, args => (a, b); intrinsic);
1065 let a = a.load_scalar(fx);
1066 let b = b.load_scalar(fx);
29967ef6 1067
04454e1e 1068 let val = crate::num::codegen_float_min(fx, a, b);
29967ef6
XL
1069 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1070 ret.write_cvalue(fx, val);
064997fb
FG
1071 }
1072 sym::minnumf64 => {
1073 intrinsic_args!(fx, args => (a, b); intrinsic);
1074 let a = a.load_scalar(fx);
1075 let b = b.load_scalar(fx);
1076
04454e1e 1077 let val = crate::num::codegen_float_min(fx, a, b);
29967ef6
XL
1078 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1079 ret.write_cvalue(fx, val);
064997fb
FG
1080 }
1081 sym::maxnumf32 => {
1082 intrinsic_args!(fx, args => (a, b); intrinsic);
1083 let a = a.load_scalar(fx);
1084 let b = b.load_scalar(fx);
1085
04454e1e 1086 let val = crate::num::codegen_float_max(fx, a, b);
29967ef6
XL
1087 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1088 ret.write_cvalue(fx, val);
064997fb
FG
1089 }
1090 sym::maxnumf64 => {
1091 intrinsic_args!(fx, args => (a, b); intrinsic);
1092 let a = a.load_scalar(fx);
1093 let b = b.load_scalar(fx);
1094
04454e1e 1095 let val = crate::num::codegen_float_max(fx, a, b);
29967ef6
XL
1096 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1097 ret.write_cvalue(fx, val);
064997fb
FG
1098 }
1099
1100 kw::Try => {
1101 intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1102 let f = f.load_scalar(fx);
1103 let data = data.load_scalar(fx);
1104 let _catch_fn = catch_fn.load_scalar(fx);
29967ef6 1105
29967ef6
XL
1106 // FIXME once unwinding is supported, change this to actually catch panics
1107 let f_sig = fx.bcx.func.import_signature(Signature {
a2a8927a 1108 call_conv: fx.target_config.default_call_conv,
2b03887a 1109 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
29967ef6
XL
1110 returns: vec![],
1111 });
1112
1113 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1114
1115 let layout = ret.layout();
1116 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1117 ret.write_cvalue(fx, ret_val);
064997fb 1118 }
29967ef6 1119
064997fb
FG
1120 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1121 intrinsic_args!(fx, args => (x, y); intrinsic);
1122
1123 let res = crate::num::codegen_float_binop(
1124 fx,
1125 match intrinsic {
1126 sym::fadd_fast => BinOp::Add,
1127 sym::fsub_fast => BinOp::Sub,
1128 sym::fmul_fast => BinOp::Mul,
1129 sym::fdiv_fast => BinOp::Div,
1130 sym::frem_fast => BinOp::Rem,
1131 _ => unreachable!(),
1132 },
1133 x,
1134 y,
1135 );
29967ef6 1136 ret.write_cvalue(fx, res);
064997fb
FG
1137 }
1138 sym::float_to_int_unchecked => {
1139 intrinsic_args!(fx, args => (f); intrinsic);
1140 let f = f.load_scalar(fx);
1141
29967ef6
XL
1142 let res = crate::cast::clif_int_or_float_cast(
1143 fx,
1144 f,
1145 false,
1146 fx.clif_type(ret.layout().ty).unwrap(),
1147 type_sign(ret.layout().ty),
1148 );
1149 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
064997fb
FG
1150 }
1151
1152 sym::raw_eq => {
1153 intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1154 let lhs_ref = lhs_ref.load_scalar(fx);
1155 let rhs_ref = rhs_ref.load_scalar(fx);
136023e0 1156
5e7ed085 1157 let size = fx.layout_of(substs.type_at(0)).layout.size();
94222f64 1158 // FIXME add and use emit_small_memcmp
064997fb
FG
1159 let is_eq_value = if size == Size::ZERO {
1160 // No bytes means they're trivially equal
1161 fx.bcx.ins().iconst(types::I8, 1)
1162 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1163 // Can't use `trusted` for these loads; they could be unaligned.
1164 let mut flags = MemFlags::new();
1165 flags.set_notrap();
1166 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1167 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
9c376795 1168 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
064997fb
FG
1169 } else {
1170 // Just call `memcmp` (like slices do in core) when the
1171 // size is too large or it's not a power-of-two.
1172 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1173 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1174 let params = vec![AbiParam::new(fx.pointer_type); 3];
1175 let returns = vec![AbiParam::new(types::I32)];
1176 let args = &[lhs_ref, rhs_ref, bytes_val];
1177 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
9c376795 1178 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
064997fb 1179 };
136023e0 1180 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
064997fb
FG
1181 }
1182
1183 sym::const_allocate => {
1184 intrinsic_args!(fx, args => (_size, _align); intrinsic);
94222f64 1185
5e7ed085
FG
1186 // returns a null pointer at runtime.
1187 let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1188 ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
064997fb 1189 }
5e7ed085 1190
064997fb
FG
1191 sym::const_deallocate => {
1192 intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
5e7ed085 1193 // nop at runtime.
064997fb
FG
1194 }
1195
1196 sym::black_box => {
1197 intrinsic_args!(fx, args => (a); intrinsic);
5e7ed085 1198
94222f64
XL
1199 // FIXME implement black_box semantics
1200 ret.write_cvalue(fx, a);
064997fb
FG
1201 }
1202
1203 // FIXME implement variadics in cranelift
1204 sym::va_copy | sym::va_arg | sym::va_end => {
1205 fx.tcx.sess.span_fatal(
1206 source_info.span,
1207 "Defining variadic functions is not yet supported by Cranelift",
1208 );
1209 }
1210
1211 _ => {
1212 fx.tcx
1213 .sess
1214 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1215 }
29967ef6
XL
1216 }
1217
923072b8 1218 let ret_block = fx.get_block(destination.unwrap());
5e7ed085 1219 fx.bcx.ins().jump(ret_block, &[]);
29967ef6 1220}